From 8b918807eefceb12790ea9de55283fa59f3002f1 Mon Sep 17 00:00:00 2001 From: arturoBeccar Date: Fri, 1 Nov 2024 18:26:53 -0300 Subject: [PATCH 1/4] Correct findings-1-Parallel.json --- .../1-Parallel/findings-1-Parallel.json | 27 ++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/audited-projects/1-Parallel/findings-1-Parallel.json b/audited-projects/1-Parallel/findings-1-Parallel.json index 42b772b..e0e663f 100644 --- a/audited-projects/1-Parallel/findings-1-Parallel.json +++ b/audited-projects/1-Parallel/findings-1-Parallel.json @@ -83,28 +83,28 @@ "repository": "https://github.com/parallel-finance/parallel", "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", "reported_remediated_commit": null, - "file_path": "pallets/liquid-staking/src/lib.rs", + "location": [ + { + "file_path": "pallets/liquid-staking/src/lib.rs", + "lines": [{ "from": 1071, "to": 1159 }] + } + ], "reported_impact": "Low", "reported_likelihood": "Difficulty-High", "cwe_classification": null, "vulnerability_class_audit": "Data Validation", - "vulnerable_components": [ - "notification_received", - "do_notification_received" - ], - "lines": [{ "from": 1071, "to": 1159 }], "description": "5. Failed XCM requests left in storage\nSeverity: Low\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-5\nTarget: pallets/liquid-staking/src/lib.rs\nDescription\nWhen the liquid-staking pallet generates an XCM request for the parent chain, the corresponding XCM response triggers a call to Pallet::notification_received. If the response is of the Response::ExecutionResult type, this method calls Pallet::do_notification_received to handle the result.\nThe Pallet::do_notification_received method checks whether the request was successful and then updates the local state according to the corresponding XCM request, which is obtained from the XcmRequests storage map.\nfn do_notification_received(\nquery_id: QueryId,\nrequest: XcmRequest,\nres: Option<(u32, XcmError)>,\n) -> DispatchResult {\nuse ArithmeticKind::*;\nuse XcmRequest::*;\nlet executed = res.is_none();\nif !executed {\nreturn Ok(());\n}\nmatch request {\nBond {\nindex: derivative_index,\namount,\n} => {\nensure!(\n!StakingLedgers::::contains_key(&derivative_index),\nError::::AlreadyBonded\n);\nlet staking_ledger =\n>>::new(\nSelf::derivative_sovereign_account_id(derivative_index),\namount,\n);\nStakingLedgers::::insert(derivative_index, staking_ledger);\nMatchingPool::::try_mutate(|p| -> DispatchResult {\np.update_total_stake_amount(amount, Subtraction)\n})?;\nT::Assets::burn_from(\nSelf::staking_currency()?,\n&Self::account_id(),\nAmount\n)?;\n}\n// ... \n}\nXcmRequests::::remove(&query_id);\nOk(())\n}\nFigure 5.1: pallets/liquid-staking/src/lib.rs:1071-1159\nIf the method completes without errors, the XCM request is removed from storage via a call to XcmRequests::remove(query_id). However, if any of the following conditions are true, the corresponding XCM request is left in storage indefinitely:\n1. The request fails and Pallet::do_notification_received exits early.\n2. Pallet::do_notification_received fails.\n3. The response type is not Response::ExecutionResult.\nThese three cases are currently unhandled by the codebase. The same issue is present in the crowdloans pallet implementation of Pallet::do_notification_received.\nRecommendations\nShort term, ensure that failed XCM requests are handled correctly by the crowdloans and liquid-staking pallets.", "description_summary": "Failed XCM requests are left in storage indefinitely if not handled properly by the notification_received method." }, { - "title": "Failed XCM requests left in storage", + "title": "Risk of using stale oracle prices in loans pallet", "repository": "https://github.com/parallel-finance/parallel", "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", "reported_remediated_commit": null, "location": [ { - "file_path": "pallets/liquid-staking/src/lib.rs", - "lines": [{ "from": 1071, "to": 1159 }] + "file_path": "pallets/loans/src/lib.rs", + "lines": [{ "from": 1430, "to": 1441 }] } ], "reported_impact": "Low", @@ -158,13 +158,16 @@ "repository": "https://github.com/parallel-finance/parallel", "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", "reported_remediated_commit": null, - "file_path": "pallets/crowdloans/src/lib.rs", + "location": [ + { + "file_path": "pallets/crowdloans/src/lib.rs", + "lines": [{ "from": 502, "to": 594 }] + } + ], "reported_impact": "Informational", "reported_likelihood": "Difficulty-High", "cwe_classification": null, "vulnerability_class_audit": "Data Validation", - "vulnerable_components": ["contribute"], - "lines": [{ "from": 502, "to": 594 }], "description": "9. The referral code is a sequence of arbitrary bytes\nSeverity: Informational\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-9\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe referral code is used in a number of extrinsic calls in the crowdloans pallet. Because the referral code is never validated, it can be a sequence of arbitrary bytes. The referral code is logged by a number of extrinsics. However, it is currently impossible to perform log injection because the referral code is printed as a hexadecimal string rather than raw bytes (using the debug representation).\npub fn contribute(\norigin: OriginFor,\ncrowdloan: ParaId,\n#[pallet::compact] amount: BalanceOf,\nreferral_code: Vec,\n) -> DispatchResultWithPostInfo {\n// ... \nlog::trace!(\ntarget: \"crowdloans::contribute\",\n\"who: {:?}, para_id: {:?}, amount: {:?}, referral_code: {:?}\",\n&who,\n&crowdloan,\n&amount,\n&referral_code\n);\nOk(().into())\n}\nFigure 9.1: pallets/crowdloans/src/lib.rs:502-594\nExploit Scenario\nThe referral code is rendered as raw bytes in a vulnerable environment, introducing an opportunity to perform a log injection attack.\nRecommendations\nShort term, choose and implement a data type that models the referral code semantics as closely as possible.", "description_summary": "The referral code in the crowdloans pallet is arbitrary and unvalidated, potentially allowing log injection attacks." }, From 9742f946981902a65bf8b30e8c7ebf7610ea0420 Mon Sep 17 00:00:00 2001 From: arturoBeccar Date: Fri, 1 Nov 2024 18:45:26 -0300 Subject: [PATCH 2/4] Update findings.json with corrected 1-Parallel --- dataset/findings.json | 3422 ++++++++++++++++++++--------------------- 1 file changed, 1664 insertions(+), 1758 deletions(-) diff --git a/dataset/findings.json b/dataset/findings.json index 31077d5..d0d3555 100644 --- a/dataset/findings.json +++ b/dataset/findings.json @@ -1,1759 +1,1665 @@ [ - { - "audited_project_id": 1, - "project_name": "Parallel", - "auditor": "Trail of Bits", - "audit_link": "https://github.com/parallel-finance/auditing-report/blob/main/Trail%20of%20Bits_Parallel%20Finance_Final%20Report.pdf", - "findings": [ - { - "title": "Vulnerable dependencies in the Substrate parachain", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": null, - "reported_impact": "Medium", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Patching", - "description": "1. Vulnerable dependencies in the Substrate parachain\nSeverity: Medium\nDifficulty: High\nType: Patching\nFinding ID: TOB-PLF-1\nTarget: parallel repository\nDescription\nThe Parallel Finance parachain node uses the following dependencies with known vulnerabilities. (All of the dependencies listed are inherited from the Substrate framework.)\nDependencyVersionIDDescription\nchrono 0.4.19 RUSTSEC-2020-0159 Potential segfault in localtime_r invocations\nlru 0.6.6 RUSTSEC-2021-0130 Use after free in lru crate\ntime 0.1.44 RUSTSEC-2020-0071 Potential segfault in the time crate\nnet2 0.2.37 RUSTSEC-2020-0016 net2 crate has been deprecated; use socket2 instead\nOther than chrono, all the dependencies can simply be updated to their newest versions to fix the vulnerabilities. The chrono crate issue has not been mitigated and remains problematic. A specific sequence of calls must occur to trigger the vulnerability, which is discussed in this GitHub thread in the chrono repository.\nExploit Scenario\nAn attacker exploits a known vulnerability in the Parallel Finance node and performs a denial-of-service attack on the network by taking down all nodes in the network.\nRecommendations\nShort term, update all dependencies to their newest versions. Monitor the referenced GitHub thread regarding the chrono crate segfault issue.\nLong term, run cargo-audit as part of the CI/CD pipeline and ensure that the team is alerted to any vulnerable dependencies that are detected.", - "description_summary": "The Substrate parachain uses dependencies with known vulnerabilities, including chrono, lru, time, and net2, which may lead to potential segfaults or memory issues." - }, - { - "title": "Users can avoid accruing interest by repaying a zero amount", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/loans/src/lib.rs", - "lines": [ - { - "from": 1057, - "to": 1087 - }, - { - "from": 1106, - "to": 1121 - } - ] - } - ], - "reported_impact": "Medium", - "reported_likelihood": "Difficulty-Low", - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "2. Users can avoid accruing interest by repaying a zero amount\nSeverity: Medium\nDifficulty: Low\nType: Data Validation\nFinding ID: TOB-PLF-2\nTarget: pallets/loans/src/lib.rs\nDescription\nTo repay borrowed funds, users call the repay_borrow extrinsic. The extrinsic implementation calls the Pallet::repay_borrow_internal method to recompute the loan balance. Pallet::repay_borrow_internal updates the loan balance for the account and resets the borrow index as part of the calculation.\nfn repay_borrow_internal(\nborrower: &T::AccountId,\nasset_id: AssetIdOf,\naccount_borrows: BalanceOf,\nrepay_amount: BalanceOf,\n) -> DispatchResult {\n// ... \nAccountBorrows::::insert(\nasset_id,\nborrower,\nBorrowSnapshot {\nprincipal: account_borrows_new,\nborrow_index: Self::borrow_index(asset_id),\n},\n);\nTotalBorrows::::insert(asset_id, total_borrows_new);\nOk(())\n}\nFigure 2.1: pallets/loans/src/lib.rs:1057-1087\nThe borrow index is used in the calculation of the accumulated interest for the loan in Pallet::current_balance_from_snapshot. Specifically, the outstanding balance, snapshot.principal, is multiplied by the quotient of borrow_index divided by snapshot.borrow_index.\npub fn current_balance_from_snapshot(\nasset_id: AssetIdOf,\nsnapshot: BorrowSnapshot>,\n) -> Result, DispatchError> {\nif snapshot.principal.is_zero() || snapshot.borrow_index.is_zero() {\nreturn Ok(Zero::zero());\n}\n// Calculate new borrow balance using the interest index:\n// recent_borrow_balance = snapshot.principal * borrow_index /\n// snapshot.borrow_index\nlet recent_borrow_balance = Self::borrow_index(asset_id)\n.checked_div(&snapshot.borrow_index)\n.and_then(|r| r.checked_mul_int(snapshot.principal))\n.ok_or(ArithmeticError::Overflow)?;\nOk(recent_borrow_balance)\n}\nFigure 2.2: pallets/loans/src/lib.rs:1106-1121\nTherefore, if the snapshot borrow index is updated to Self::borrow_index(asset_id), the resulting recent_borrow_balance in Pallet::current_balance_from_snapshot will always be equal to snapshot.principal. That is, no interest will be applied to the loan. It follows that the accrued interest is lost whenever part of the loan is repaid. In an extreme case, if the repaid amount passed to repay_borrow is 0, users could reset the borrow index without repaying anything.\nThe same issue is present in the implementations of the liquidated_transfer and borrow extrinsics as well.\nExploit Scenario\nA malicious user borrows assets from Parallel Finance and calls repay_borrow with a repay_amount of zero. This allows her to avoid paying interest on the loan.\nRecommendations\nShort term, modify the code so that the accrued interest is added to the snapshot principal when the snapshot is updated.\nLong term, add unit tests for edge cases (like repaying a zero amount) to increase the chances of discovering unexpected system behavior.", - "description_summary": "Users can avoid paying interest on loans by repaying a zero amount, resetting the borrow index without repaying anything." - }, - { - "title": "Missing validation in Pallet::force_update_market", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/loans/src/lib.rs", - "lines": [ - { - "from": 539, - "to": 556 - } - ] - } - ], - "reported_impact": "Informational", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "3. Missing validation in Pallet::force_update_market\nSeverity: Informational\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-3\nTarget: pallets/loans/src/lib.rs\nDescription\nThe Pallet::force_update_market method can be used to replace the stored market instance for a given asset. Other methods used to update market parameters perform extensive validation of the market parameters, but force_update_market checks only the rate model.\npub fn force_update_market(\norigin: OriginFor,\nasset_id: AssetIdOf,\nmarket: Market>,\n) -> DispatchResultWithPostInfo {\nT::UpdateOrigin::ensure_origin(origin)?;\nensure!(\nmarket.rate_model.check_model(),\nError::::InvalidRateModelParam\n);\nlet updated_market = Self::mutate_market(asset_id, |stored_market| {\n*stored_market = market;\nstored_market.clone()\n})?;\nSelf::deposit_event(Event::::UpdatedMarket(updated_market));\nOk(().into())\n}\nFigure 3.1: pallets/loans/src/lib.rs:539-556\nThis means that the caller (who is either the root account or half of the general council) could inadvertently change immutable market parameters like ptoken_id by mistake.\nExploit Scenario\nThe root account calls force_update_market to update a set of market parameters. By mistake, the ptoken_id market parameter is updated, which means that Pallet::ptoken_id and Pallet::underlying_id are no longer inverses.\nRecommendations\nShort term, consider adding more input validation to the force_update_market extrinsic. In particular, it may make sense to ensure that the ptoken_id market parameter has not changed. Alternatively, add validation to check whether the ptoken_id market parameter is updated and to update the UnderlyingAssetId map to ensure that the value matches the Markets storage map.", - "description_summary": "The force_update_market method lacks sufficient validation, potentially allowing unintended changes to immutable market parameters." - }, - { - "title": "Missing validation in multiple StakingLedger methods", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/liquid-staking/src/types.rs", - "lines": [ - { - "from": 199, - "to": 219 - }, - { - "from": 223, - "to": 226 - }, - { - "from": 230, - "to": 253 - } - ] - } - ], - "reported_impact": "Undetermined", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "4. Missing validation in multiple StakingLedger methods\nSeverity: Undetermined\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-4\nTarget: pallets/liquid-staking/src/types.rs\nDescription\nThe staking ledger is used to keep track of the total amount of staked funds in the system. It is updated in response to cross-consensus messaging (XCM) requests to the parent chain (either Polkadot or Kusama). A number of the StakingLedger methods lack sufficient input validation before they update the staking ledger\u2019s internal state. Even though the input is validated as part of the original XCM call, there could still be issues due to implementation errors or overlooked corner cases.\nFirst, the StakingLedger::rebond method does not use checked arithmetic to update the active balance. The method should also check that the computed unlocking_balance is equal to the input value at the end of the loop to ensure that the system remains consistent.\npub fn rebond(&mut self, value: Balance) {\nlet mut unlocking_balance: Balance = Zero::zero();\nwhile let Some(last) = self.unlocking.last_mut() {\nif unlocking_balance + last.value <= value {\nunlocking_balance += last.value;\nself.active += last.value;\nself.unlocking.pop();\n} else {\nlet diff = value - unlocking_balance;\nunlocking_balance += diff;\nself.active += diff;\nlast.value -= diff;\n}\nif unlocking_balance >= value {\nbreak;\n}\n}\n}\nFigure 4.1: pallets/liquid-staking/src/types.rs:199-219\nSecond, the StakingLedger::bond_extra method does not use checked arithmetic to update the total and active balances.\npub fn bond_extra(&mut self, value: Balance) {\nself.total += value;\nself.active += value;\n}\nFigure 4.2: pallets/liquid-staking/src/types.rs:223-226\nFinally, the StakingLedger::unbond method does not use checked arithmetic when updating the active balance.\npub fn unbond(&mut self, value: Balance, target_era: EraIndex) {\nif let Some(mut chunk) = self\n.unlocking\n.last_mut()\n.filter(|chunk| chunk.era == target_era)\n{\nchunk.value = chunk.value.saturating_add(value);\n} else {\nself.unlocking.push(UnlockChunk {\nvalue,\nera: target_era,\n});\n};\nself.active -= value;\n}\nFigure 4.3: pallets/liquid-staking/src/types.rs:230-253\nSince the staking ledger is updated by a number of the XCM response handlers, and XCM responses may return out of order, it is important to ensure that input to the staking ledger methods is validated to prevent issues due to race conditions and corner cases.\nWe could not find a way to exploit this issue, but we cannot rule out the risk that it could be used to cause a denial-of-service condition in the system.\nExploit Scenario\nThe staking ledger's state is updated as part of a WithdrawUnbonded request, leaving the unlocking vector in the staking ledger empty. Later, when the response to a previous call to rebond is handled, the ledger is updated again, which leaves it in an inconsistent state.\nRecommendations\nShort term, ensure that the balance represented by the staking ledger\u2019s unlocking vector is enough to cover the input balance passed to StakingLedger::rebond. Use checked arithmetic in all staking ledger methods that update the ledger\u2019s internal state to ensure that issues due to data races are detected and handled correctly.", - "description_summary": "StakingLedger methods lack sufficient input validation and unchecked arithmetic, potentially leading to inconsistent states or race conditions." - }, - { - "title": "Failed XCM requests left in storage", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "file_path": "pallets/liquid-staking/src/lib.rs", - "reported_impact": "Low", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "vulnerable_components": [ - "notification_received", - "do_notification_received" - ], - "lines": [ - { - "from": 1071, - "to": 1159 - } - ], - "description": "5. Failed XCM requests left in storage\nSeverity: Low\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-5\nTarget: pallets/liquid-staking/src/lib.rs\nDescription\nWhen the liquid-staking pallet generates an XCM request for the parent chain, the corresponding XCM response triggers a call to Pallet::notification_received. If the response is of the Response::ExecutionResult type, this method calls Pallet::do_notification_received to handle the result.\nThe Pallet::do_notification_received method checks whether the request was successful and then updates the local state according to the corresponding XCM request, which is obtained from the XcmRequests storage map.\nfn do_notification_received(\nquery_id: QueryId,\nrequest: XcmRequest,\nres: Option<(u32, XcmError)>,\n) -> DispatchResult {\nuse ArithmeticKind::*;\nuse XcmRequest::*;\nlet executed = res.is_none();\nif !executed {\nreturn Ok(());\n}\nmatch request {\nBond {\nindex: derivative_index,\namount,\n} => {\nensure!(\n!StakingLedgers::::contains_key(&derivative_index),\nError::::AlreadyBonded\n);\nlet staking_ledger =\n>>::new(\nSelf::derivative_sovereign_account_id(derivative_index),\namount,\n);\nStakingLedgers::::insert(derivative_index, staking_ledger);\nMatchingPool::::try_mutate(|p| -> DispatchResult {\np.update_total_stake_amount(amount, Subtraction)\n})?;\nT::Assets::burn_from(\nSelf::staking_currency()?,\n&Self::account_id(),\nAmount\n)?;\n}\n// ... \n}\nXcmRequests::::remove(&query_id);\nOk(())\n}\nFigure 5.1: pallets/liquid-staking/src/lib.rs:1071-1159\nIf the method completes without errors, the XCM request is removed from storage via a call to XcmRequests::remove(query_id). However, if any of the following conditions are true, the corresponding XCM request is left in storage indefinitely:\n1. The request fails and Pallet::do_notification_received exits early.\n2. Pallet::do_notification_received fails.\n3. The response type is not Response::ExecutionResult.\nThese three cases are currently unhandled by the codebase. The same issue is present in the crowdloans pallet implementation of Pallet::do_notification_received.\nRecommendations\nShort term, ensure that failed XCM requests are handled correctly by the crowdloans and liquid-staking pallets.", - "description_summary": "Failed XCM requests are left in storage indefinitely if not handled properly by the notification_received method." - }, - { - "title": "Failed XCM requests left in storage", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/liquid-staking/src/lib.rs", - "lines": [ - { - "from": 1071, - "to": 1159 - } - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "6. Risk of using stale oracle prices in loans pallet\nSeverity: Low\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-6\nTarget: pallets/loans/src/lib.rs\nDescription\nThe loans pallet uses oracle prices to find a USD value of assets using the get_price function. The get_price function internally uses the T::PriceFeeder::get_price function, which returns a timestamp and the price. However, the returned timestamp is ignored.\npub fn get_price(asset_id: AssetIdOf) -> Result {\nlet (price, _) = T::PriceFeeder::get_price(&asset_id).ok_or(Error::::PriceOracleNotReady)?;\nif price.is_zero() {\nreturn Err(Error::::PriceIsZero.into());\n}\nlog::trace!(target: \"loans::get_price\", \"price: {:?}\", price.into_inner());\nOk(price)\n}\nFigure 6.1: pallets/loans/src/lib.rs:1430-1441\nExploit Scenario\nThe price feeding oracles fail to deliver prices for an extended period of time. The get_price function returns stale prices, causing the get_asset_value function to return a non-market asset value.\nRecommendations\nShort term, modify the code so that it compares the returned timestamp from the T::PriceFeeder::get_price function with the current timestamp, returns an error if the price is too old, and handles the emergency price, which currently has a timestamp of zero. This will stop the market if stale prices are returned and allow the governance process to intervene with an emergency price.", - "description_summary": "The loans pallet may use stale oracle prices, leading to incorrect asset valuations." - }, - { - "title": "Missing calculations in crowdloans extrinsics", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/crowdloans/src/lib.rs", - "lines": [ - { - "from": 718, - "to": 765 - } - ] - } - ], - "reported_impact": "Undetermined", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Undefined Behavior", - "description": "7. Missing calculations in crowdloans extrinsics\nSeverity: Undetermined\nDifficulty: High\nType: Undefined Behavior\nFinding ID: TOB-PLF-7\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe claim extrinsic in the crowdloans pallet is missing code to subtract the claimed amount from vault.contributed to update the total contribution amount. A similar bug exists in the refund extrinsic: there is no subtraction from vault.contributed after the Self::contribution_kill call.\npub fn claim(\norigin: OriginFor,\ncrowdloan: ParaId,\nlease_start: LeasePeriod,\nlease_end: LeasePeriod,\n) -> DispatchResult {\n// ... \nSelf::contribution_kill(\nvault.trie_index,\n&who,\nChildStorageKind::Contributed\n);\nSelf::deposit_event(Event::::VaultClaimed(\ncrowdloan,\n(lease_start, lease_end),\nctoken,\nwho,\namount,\nVaultPhase::Succeeded,\n));\nOk(())\n}\nFigure 7.1: pallets/crowdloans/src/lib.rs:718-765\nExploit Scenario\nThe claim extrinsic is called, but the total amount in vault.contributed is not updated, leading to incorrect calculations in other places.\nRecommendations\nShort term, update the claim and refund extrinsics so that they subtract the amount from vault.contributed.\nLong term, add a test suite to ensure that the vault state stays consistent after the claim and refund extrinsics are called.", - "description_summary": "The claim and refund extrinsics do not update the vault's total contribution, leading to incorrect calculations." - }, - { - "title": "Event emitted when update_vault and set_vrf calls do not make updates", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/crowdloans/src/lib.rs", - "lines": [ - { - "from": 424, - "to": 472 - }, - { - "from": 599, - "to": 616 - } - ] - } - ], - "reported_impact": "Informational", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Auditing and Logging", - "description": "8. Event emitted when update_vault and set_vrf calls do not make updates\nSeverity: Informational\nDifficulty: High\nType: Auditing and Logging\nFinding ID: TOB-PLF-8\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe update_vault extrinsic in the crowdloans pallet is responsible for updating the three values shown. It is possible to call update_vault in such a way that no update is performed, but the function emits an event regardless of whether an update occurred. The same situation occurs in the set_vrfs extrinsic.\npub fn update_vault(\norigin: OriginFor,\ncrowdloan: ParaId,\ncap: Option>,\nend_block: Option>,\ncontribution_strategy: Option,\n) -> DispatchResult {\nT::UpdateVaultOrigin::ensure_origin(origin)?;\nlet mut vault = Self::current_vault(crowdloan).ok_or(Error::::VaultDoesNotExist)?;\nif let Some(cap) = cap {\n// ... \n}\nif let Some(end_block) = end_block {\n// ... \n}\nif let Some(contribution_strategy) = contribution_strategy {\n// ... \n}\nSelf::deposit_event(Event::::VaultUpdated(crowdloan,(lease_start, lease_end),contribution_strategy,cap,end_block));\nOk(())\n}\nFigure 8.1: pallets/crowdloans/src/lib.rs:424-472\npub fn set_vrfs(origin: OriginFor, vrfs: Vec) -> DispatchResult {\nT::VrfOrigin::ensure_origin(origin)?;\nlog::trace!(target: \"crowdloans::set_vrfs\", \"pre-toggle. vrfs: {:?}\",vrfs);\nVrfs::::try_mutate(|b| -> Result<(), DispatchError> {\n*b = vrfs.try_into().map_err(|_| Error::::MaxVrfsExceeded)?;\nOk(())\n})?;\nSelf::deposit_event(Event::::VrfsUpdated(Self::vrfs()));\nOk(())\n}\nFigure 8.2: pallets/crowdloans/src/lib.rs:599-616\nExploit Scenario\nA system observes that the VaultUpdate event was emitted even though the vault state did not actually change. Based on this observation, it performs logic that should be executed only when the state has been updated.\nRecommendations\nShort term, modify the VaultUpdate event so that it is emitted only when the update_vault extrinsic makes an actual update. Optionally, have the update_vault extrinsic return an error to the caller when calling it results in no updates.", - "description_summary": "The update_vault and set_vrfs extrinsics emit events even when no actual updates are made." - }, - { - "title": "The referral code is a sequence of arbitrary bytes", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "file_path": "pallets/crowdloans/src/lib.rs", - "reported_impact": "Informational", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "vulnerable_components": [ - "contribute" - ], - "lines": [ - { - "from": 502, - "to": 594 - } - ], - "description": "9. The referral code is a sequence of arbitrary bytes\nSeverity: Informational\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-9\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe referral code is used in a number of extrinsic calls in the crowdloans pallet. Because the referral code is never validated, it can be a sequence of arbitrary bytes. The referral code is logged by a number of extrinsics. However, it is currently impossible to perform log injection because the referral code is printed as a hexadecimal string rather than raw bytes (using the debug representation).\npub fn contribute(\norigin: OriginFor,\ncrowdloan: ParaId,\n#[pallet::compact] amount: BalanceOf,\nreferral_code: Vec,\n) -> DispatchResultWithPostInfo {\n// ... \nlog::trace!(\ntarget: \"crowdloans::contribute\",\n\"who: {:?}, para_id: {:?}, amount: {:?}, referral_code: {:?}\",\n&who,\n&crowdloan,\n&amount,\n&referral_code\n);\nOk(().into())\n}\nFigure 9.1: pallets/crowdloans/src/lib.rs:502-594\nExploit Scenario\nThe referral code is rendered as raw bytes in a vulnerable environment, introducing an opportunity to perform a log injection attack.\nRecommendations\nShort term, choose and implement a data type that models the referral code semantics as closely as possible.", - "description_summary": "The referral code in the crowdloans pallet is arbitrary and unvalidated, potentially allowing log injection attacks." - }, - { - "title": "Missing validation of referral code size", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/crowdloans/src/lib.rs", - "lines": [ - { - "from": 1429, - "to": 1464 - } - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": "Difficulty-Low", - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "10. Missing validation of referral code size\nSeverity: Low\nDifficulty: Low\nType: Data Validation\nFinding ID: TOB-PLF-10\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe length of the referral code is not validated by the contribute extrinsic defined by the crowdloans pallet. Since the referral code is stored by the node, a malicious user could call contribute multiple times with a very large referral code. This would increase the memory pressure on the node, potentially leading to memory exhaustion.\nfn do_contribute(\nwho: &AccountIdOf,\ncrowdloan: ParaId,\nvault_id: VaultId,\namount: BalanceOf,\nreferral_code: Vec,\n) -> Result<(), DispatchError> {\nXcmRequests::::insert(\nquery_id,\nXcmRequest::Contribute {\ncrowdloan,\nvault_id,\nwho: who.clone(),\namount,\nreferral_code: referral_code.clone(),\n},\n);\nOk(())\n}\nFigure 10.1: pallets/crowdloans/src/lib.rs:1429-1464\nExploit Scenario\nA malicious user calls the contribute extrinsic multiple times with a very large referral code. This increases the memory pressure on the validator nodes and eventually causes all parachain nodes to run out of memory and crash.\nRecommendations\nShort term, add validation that limits the size of the referral code argument to the contribute extrinsic.", - "description_summary": "The contribute extrinsic lacks validation on the referral code size, potentially leading to memory exhaustion." - }, - { - "title": "Code duplication in crowdloans pallet", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/crowdloans/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Informational", - "reported_likelihood": "Difficulty-High", - "cwe_classification": null, - "vulnerability_class_audit": "Patching", - "description": "11. Code duplication in crowdloans pallet\nSeverity: Informational\nDifficulty: High\nType: Patching\nFinding ID: TOB-PLF-11\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nA number of extrinsics in the crowdloans pallet have duplicate code. The close, reopen, and auction_succeeded extrinsics have virtually identical logic. The migrate_pending and refund extrinsics are also fairly similar.\nExploit Scenario\nA vulnerability is found in the duplicate code, but it is patched in only one place.\nRecommendations\nShort term, refactor the close, reopen, and auction_succeeded extrinsics into one function, to be called with values specific to the extrinsics. Refactor common pieces of logic in the migrate_pending and refund extrinsics.\nLong term, avoid code duplication, as it makes the system harder to review and update. Perform regular code reviews and track any logic that is duplicated.", - "description_summary": "Code duplication in the crowdloans pallet increases the risk of inconsistent patches and complicates maintenance." - } - ] - }, - { - "audited_project_id": 2, - "project_name": "Parallel", - "auditor": "Slow Mist", - "audit_link": "https://github.com/parallel-finance/auditing-report/blob/main/Slow%20Mist%20-%20Parallel%20Security%20Audit%20Report.pdf", - "findings": [ - { - "title": "Need to upgrade the module", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "a223cd7910af3540048b58f958fea5b784876468", - "reported_remediated_commit": "fc6d8042ba51719e0f1bca40631e4649a6100510", - "location": null, - "reported_impact": "low-risk", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "5.1 Need to upgrade the module [low-risk]\nID:RUSTSEC-2021-0067\nCrate:cranelift-codegen\nVersion: 0.71.0\nDate:2021-05-21\nURL:https://rustsec.org/advisories/RUSTSEC-2021-0067\nTitle: Memory access due to code generation flaw in Cranelift module\nSolution: upgrade to >= 0.73.1 OR >= 0.74\nDependency tree: cranelift-codegen 0.71.0\nFixed in: https://github.com/parallel-finance/parallel/pull/210", - "description_summary": "Memory access vulnerability in Cranelift module due to code generation flaw, fixed by upgrading to >= 0.73.1 or >= 0.74." - }, - { - "title": "Numeric overflow", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "a223cd7910af3540048b58f958fea5b784876468", - "reported_remediated_commit": "386b0e7e1eea19c143900d4127daa8c475266dad", - "location": [ - { - "file_path": "parallel/pallets/loans/src/lib.rs", - "lines": null - } - ], - "reported_impact": "enhancement", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "5.2 Numeric overflow[enhancement]\nparallel/pallets/loans/src/lib.rs\nlet total_reserves_new = total_reserves - reduce_amount;\n//...snip code...//\nlet total_reserves_new = total_reserves + add_amount;\nIt is recommended to use `checked_add/checked_sub` to prevent numerical overflow.\nFixed in: https://github.com/parallel-finance/parallel/pull/241", - "description_summary": "Numeric overflow vulnerability due to lack of checked_add/checked_sub, fixed by adding these methods." - }, - { - "title": "Lack of bounds checking", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "a223cd7910af3540048b58f958fea5b784876468", - "reported_remediated_commit": "2bb80a4e75416e27a03ffb9f8d3100aca548d8b2", - "location": [ - { - "file_path": "parallel/pallets/loans/src/lib.rs", - "lines": null - } - ], - "reported_impact": "weakness", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "5.3 Lack of bounds checking[weakness]\n`mint_amount` has no boundary limit, it is recommended to enhance.\nFixed in: https://github.com/parallel-finance/parallel/pull/258", - "description_summary": "Lack of bounds checking for mint_amount, recommended to enhance." - }, - { - "title": "Oracle price feed risk", - "repository": "https://github.com/parallel-finance/parallel", - "audited_commit": "a223cd7910af3540048b58f958fea5b784876468", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "parallel/pallets/prices/src/lib.rs", - "lines": null - } - ], - "reported_impact": "weakness", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "5.4 Oracle price feed risk[weakness]\nDue to the lack of time parameter, if the price is not feed in time, the price may be inaccurate.\nparallel/pallets/prices/src/lib.rs\npub enum Event {\n/// Set emergency price. [currency_id, price_detail]\nSetPrice(CurrencyId, PriceWithDecimal),\n/// Reset emergency price. [currency_id]\nResetPrice(CurrencyId),\n}\nFeedback: At present, the source of price feeding is controlled by authority and credible. The range of trustworthiness includes the accuracy and real-time of the price, that is, outdated prices will not be sent to the chain, but the price feeding transaction sent is through operational on the chain. Transaction level guarantees will be packaged in real time, that is, the current block.", - "description_summary": "Oracle price feed may be inaccurate due to lack of a time parameter." - } - ] - }, - { - "audited_project_id": 3, - "project_name": "AvaProtocol", - "auditor": "Slow Mist", - "audit_link": "https://avaprotocol.org/docs/papers/SlowMist.Audit.Report.-.Turing.Network.-.June.2022.pdf", - "findings": [ - { - "title": "Calculate inaccurate risk", - "repository": "https://github.com/OAK-Foundation/OAK-blockchain", - "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", - "reported_remediated_commit": "cba1acd6961fce877cef95c6b6a198ea8b415a0f", - "location": [ - { - "file_path": "pallets/automation-time/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Suggestion", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Integer Overflow", - "description": "[N1] [Suggestion] Calculate inaccurate risk\nCategory: Integer Overflow Audit\nContent\npallets/automation-time/src/lib.rs\nThere are some risks of value overflow.\nsaturating_mul, saturating_sub, saturating_add and +-*/, +=, -=\nsaturating at the numeric bounds instead of overflowing, The returned result is inaccurate.\nSolution\nUse checked_add/checked_sub/checked_mul/checked_div instead of\nsaturating_add/saturating_sub/saturating_mul/saturating_div and +-*/, +=, -= .\nStatus\nFixed", - "description_summary": "Inaccurate calculation risk due to potential integer overflow; replace saturating operations with checked operations." - }, - { - "title": "User balance is not checked before transfer", - "repository": "https://github.com/OAK-Foundation/OAK-blockchain", - "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/automation-time/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Suggestion", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Others", - "description": "[N2] [Suggestion] User balance is not checked\nCategory: Others\nContent\npallets/automation-time/src/lib.rs\nThe amount transferred by the user is not compared with the user's balance here, and the user may not have enough NativeToken.\npub fn schedule_native_transfer_task(\norigin: OriginFor,\nprovided_id: Vec,\nexecution_times: Vec,\nrecipient_id: T::AccountId,\n#[pallet::compact] amount: BalanceOf,\n) -> DispatchResult {\nlet who = ensure_signed(origin)?;\n// check for greater than existential deposit\nif amount < T::NativeTokenExchange::minimum_balance() {\nErr(>::InvalidAmount)?\n}\n// check not sent to self\nif who == recipient_id {\nErr(>::TransferToSelf)?\n}\nlet action =\nAction::NativeTransfer { sender: who.clone(), recipient: recipient_id, amount };\nSelf::validate_and_schedule_task(action, who, provided_id, execution_times)?;\nOk(().into())\n}\nSolution\nCompare the user's transfer amount with the balance\nStatus\nIgnored; This is expected behaviour.", - "description_summary": "User balance is not checked before transfer, allowing potential insufficient funds." - }, - { - "title": "Return value not checked", - "repository": "https://github.com/OAK-Foundation/OAK-blockchain", - "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/parachain-staking/src/lib.rs", - "lines": [ - 962, - 1004, - 1390 - ] - }, - { - "file_path": "pallets/parachain-staking/src/types.rs", - "lines": [ - 463 - ] - }, - { - "file_path": "pallets/parachain-staking/src/delegation_requests.rs", - "lines": [ - 279, - { - "from": 648, - "to": 651 - } - ] - }, - { - "file_path": "pallets/parachain-staking/src/migrations.rs", - "lines": [ - 404 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Others", - "description": "[N3] [Low] Return Value Not Checked\nCategory: Others\nContent\npallets/parachain-staking/src/lib.rs\n//#L962\nT::Currency::unreserve(&bond.owner, bond.amount);\n//#L1004\nT::Currency::unreserve(&candidate, state.bond);\n//#L1390\nT::Currency::unreserve(&delegator, amount);\npallets/parachain-staking/src/types.rs\n//#L463\nT::Currency::unreserve(&who, request.amount.into());\n//#L648:651\nT::Currency::unreserve(&lowest_bottom_to_be_kicked.owner, lowest_bottom_to_be_kicked.amount);\npallets/parachain-staking/src/delegation_requests.rs\n//#L279\nT::Currency::unreserve(&delegator, amount);\npallets/parachain-staking/src/migrations.rs\n//#L404\nT::Currency::unreserve(&owner, *amount);\nThe return value of unreserve needs to be checked.\nSolution\nCheck the return value.\nStatus\nIgnored; If the account has less than that locked up. Not only is this unlikely to happen, there's nothing for parachain-staking to do if it occurs.", - "description_summary": "Return value of unreserve function is not checked, risking unhandled errors." - }, - { - "title": "Calculate inaccurate risk", - "repository": "https://github.com/OAK-Foundation/OAK-blockchain", - "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/parachain-staking/src/types.rs", - "lines": null - }, - { - "file_path": "pallets/parachain-staking/src/lib.rs", - "lines": null - }, - { - "file_path": "pallets/parachain-staking/src/delegation_requests.rs", - "lines": null - } - ], - "reported_impact": "Suggestion", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Integer Overflow", - "description": "[N4] [Suggestion] Calculate inaccurate risk\nCategory: Integer Overflow Audit\nContent\npallets/parachain-staking/src/types.rs\npallets/parachain-staking/src/lib.rs\npallets/parachain-staking/src/delegation_requests.rs\nThere are some risks of value overflow.\nsaturating_mul, saturating_sub, saturating_add and +-*/, +=, -=\nsaturating at the numeric bounds instead of overflowing, The returned result is inaccurate.\nSolution\nUse checked_add/checked_sub/checked_mul/checked_div instead of\nsaturating_add/saturating_sub/saturating_mul/saturating_div and +-*/, +=, -= .\nStatus\nIgnored; These functions are performed on the Balance type. Since the same type is used for total_issuance I don't think we need to be worried about a portion of the issuance overflowing the data type.", - "description_summary": "Potential inaccurate calculation due to integer overflow; replace saturating operations with checked operations." - }, - { - "title": "Missing logic", - "repository": "https://github.com/OAK-Foundation/OAK-blockchain", - "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/parachain-staking/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Others", - "description": "[N5] [Low] Missing logic\nCategory: Others\nContent\npallets/parachain-staking/src/lib.rs\nIt is necessary to make a judgment in the case of deposit_into_existing failure. If the transfer fails, the entire transaction needs to be rolled back.\nfn prepare_staking_payouts(now: RoundIndex) {\n// payout is now - delay rounds ago => now - delay > 0 else return early\nlet delay = T::RewardPaymentDelay::get();\nif now <= delay {\nreturn;\n}\nlet round_to_payout = now.saturating_sub(delay);\nlet total_points = >::get(round_to_payout);\nif total_points.is_zero() {\nreturn;\n}\nlet total_staked = >::take(round_to_payout);\nlet total_issuance = Self::compute_issuance(total_staked);\nlet mut left_issuance = total_issuance;\n// reserve portion of issuance for parachain bond account\nlet bond_config = >::get();\nlet parachain_bond_reserve = bond_config.percent * total_issuance;\nif let Ok(imb) = T::Currency::deposit_into_existing(&bond_config.account, parachain_bond_reserve) {\n// update round issuance iff transfer succeeds\nleft_issuance = left_issuance.saturating_sub(imb.peek());\nSelf::deposit_event(Event::ReservedForParachainBond {\naccount: bond_config.account,\nvalue: imb.peek(),\n});\n}\nlet payout = DelayedPayout {\nround_issuance: total_issuance,\ntotal_staking_reward: left_issuance,\ncollator_commission: >::get(),\n};\n>::insert(round_to_payout, payout);\n}\npub(crate) fn pay_one_collator_reward(paid_for_round: RoundIndex, payout_info: DelayedPayout>, ) -> (Option<(T::AccountId, BalanceOf)>, Weight) {\n// TODO: it would probably be optimal to roll Points into the DelayedPayouts storage\n// item so that we do fewer reads each block\nlet total_points = >::get(paid_for_round);\nif total_points.is_zero() {\n// TODO: this case is obnoxious... it's a value query, so it could mean one\n// of two different logic errors:\n// 1. we removed it before we should have\n// 2. we called pay_one_collator_reward when we were actually done with deferred payouts\nlog::warn!(\"pay_one_collator_reward called with no > for the round!\");\nreturn (None, 0u64.into());\n}\nlet mint = |amt: BalanceOf, to: T::AccountId| {\nif let Ok(amount_transferred) = T::Currency::deposit_into_existing(&to, amt) {\nSelf::deposit_event(Event::Rewarded {\naccount: to.clone(),\nrewards: amount_transferred.peek(),\n});\n}\n};\nlet collator_fee = payout_info.collator_commission;\nlet collator_issuance = collator_fee * payout_info.round_issuance;\nif let Some((collator, pts)) = >::iter_prefix(paid_for_round).drain().next() {\nlet mut extra_weight = 0;\nlet pct_due = Perbill::from_rational(pts, total_points);\nlet total_paid = pct_due * payout_info.total_staking_reward;\nlet mut amt_due = total_paid;\n// Take the snapshot of block author and delegations\nlet state = >::take(paid_for_round, &collator);\nlet num_delegators = state.delegations.len();\nif state.delegations.is_empty() {\n// solo collator with no delegators\nmint(amt_due, collator.clone());\nextra_weight += T::OnCollatorPayout::on_collator_payout(paid_for_round, collator.clone(), amt_due, );\n} else {\n// pay collator first; commission + due_portion\nlet collator_pct = Perbill::from_rational(state.bond, state.total);\nlet commission = pct_due * collator_issuance;\namt_due = amt_due.saturating_sub(commission);\nlet collator_reward = (collator_pct * amt_due).saturating_add(commission);\nmint(collator_reward, collator.clone());\nextra_weight += T::OnCollatorPayout::on_collator_payout(paid_for_round, collator.clone(), collator_reward, );\n// pay delegators due portion\nfor Bond { owner, amount } in state.delegations {\nlet percent = Perbill::from_rational(amount, state.total);\nlet due = percent * amt_due;\nif !due.is_zero() {\nmint(due, owner.clone());\n}\n}\n}\n(Some((collator, total_paid)), T::WeightInfo::pay_one_collator_reward(num_delegators as u32) + extra_weight, )\n} else {\n// Note that we don't clean up storage here; it is cleaned up in handle_delayed_payouts()\n(None, 0u64.into())\n}\n}\nSolution\nIf the transfer fails, the transaction should be rolled back.\nStatus\nIgnored; The project party considers that it will be updated in subsequent versions.", - "description_summary": "Missing rollback logic for failed transfers in staking payouts." - }, - { - "title": "Missing error message", - "repository": "https://github.com/OAK-Foundation/OAK-blockchain", - "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/parachain-staking/src/types.rs", - "lines": null - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Malicious Event Log", - "description": "[N6] [Suggestion] Missing error message\nCategory: Malicious Event Log Audit\nContent\npallets/parachain-staking/src/types.rs\nlet new_bottom_delegation = top_delegations.delegations.pop().expect(\"\"); missing error message.\npub fn add_top_delegation(&mut self, candidate: &T::AccountId, delegation: Bond>) -> Option where BalanceOf: Into + From,\n{\nlet mut less_total_staked = None;\nlet mut top_delegations = >::get(candidate).expect(\"CandidateInfo existence => TopDelegations existence\");\nlet max_top_delegations_per_candidate = T::MaxTopDelegationsPerCandidate::get();\nif top_delegations.delegations.len() as u32 == max_top_delegations_per_candidate {\n// pop lowest top delegation\nlet new_bottom_delegation = top_delegations.delegations.pop().expect(\"\");\ntop_delegations.total = top_delegations.total.saturating_sub(new_bottom_delegation.amount);\nif matches!(self.bottom_capacity, CapacityStatus::Full) {\nless_total_staked = Some(self.lowest_bottom_delegation_amount);\n}\nself.add_bottom_delegation::(true, candidate, new_bottom_delegation);\n}\n// insert into top\ntop_delegations.insert_sorted_greatest_to_least(delegation);\n// update candidate info\nself.reset_top_data::(candidate.clone(), &top_delegations);\nif less_total_staked.is_none() {\n// only increment delegation count if we are not kicking a bottom delegation\nself.delegation_count = self.delegation_count.saturating_add(1u32);\n}\n>::insert(&candidate, top_delegations);\nless_total_staked\n}\nSolution\nRecord the corresponding error message.\nStatus\nIgnored; The project party considers that it will be updated in subsequent versions.", - "description_summary": "Missing error message in delegation handling, leaving potential issues unlogged." - } - ] - }, - { - "audited_project_id": 4, - "project_name": "Pendulum", - "auditor": "Hacken", - "audit_link": "https://audits.hacken.io/pendulum/l1-pendulum-pendulum-chain-jun2023/", - "findings": [ - { - "title": "ChainExtension Implementation Lacks Weight Charging", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": "5b922a210b2a6705d3ea6fefbf67b317698f7b80", - "location": [ - { - "file_path": "runtime/foucoco/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Denial-of-Service (DoS)", - "description": "ChainExtension Implementation Lacks Weight Charging\nThe current implementation of the ChainExtension trait fails to charge weight when allowing smart contracts to call into the runtime.\nIDPDM-006\nScope: Chain Extension\nSeverity: MEDIUM\nVulnerability Type: Denial-of-Service (DoS)\nStatus: Fixed (5b922a210b2a6705d3ea6fefbf67b317698f7b80)\nDescription:\nThe call method in the ChainExtension trait defines how smart contracts can interact with the runtime. When a contract makes state changes by calling into the runtime, the corresponding weight should be charged. However, the current implementation of ChainExtension lacks any weight charging mechanism. It allows smart contracts to make calls such as transfer, approve_transfer, and transfer_approved, which are not queries and result in state changes.\nBy not charging weight for these operations, the system becomes vulnerable to potential security issues, particularly Denial-of-Service (DoS) attacks. Malicious contracts can exploit this lack of weight charging by flooding the system with a high volume of calls, overloading the network and disrupting its normal operation.\nRecommendation:\nTo mitigate this vulnerability, it is crucial to incorporate weight calculations and invoke the charge_weight function before accessing contract memory. Here is an example of how to integrate this in the code:\nlet charged_weight = env.charge_weight(sufficient_weight)?;\ntrace!(target: \"runtime\", \"[ChainExtension]|call|func_id / charge_weight:{:?}\", charged_weight);\nlet input = env.read(256)?;\nBy implementing weight charging for contract calls, the system", - "description_summary": "The ChainExtension implementation does not charge weight for runtime calls, making the system vulnerable to Denial-of-Service (DoS) attacks." - }, - { - "title": "Vector of unlimited size in the pallet", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": "05607a1a9cd2ad3cebeff1294b2e4c34fa3e4721", - "location": [ - { - "file_path": "pallets/orml-currencies-allowance-extension/src/lib.rs", - "lines": [ - 134 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Memory exhaustion / DoS", - "description": "Vector of unlimited size in the pallet\nThe orml-currencies-allowance-extension pallet employs the usage of the Vec data structure without incorporating any size checks.\nID: PDM-007\nScope: orml-currencies-allowance-extension pallet\nSeverity: LOW\nVulnerability Type: Memory exhaustion / DoS\nStatus: Fixed (c1a20acd965cc024ac756effbff8a12522dac87a and 05607a1a9cd2ad3cebeff1294b2e4c34fa3e4721)\n\nDescription:\nWithin the orml-currencies-allowance-extension pallet, the Vec structure is utilized in the enum Event and struct GenesisConfig. Additionally, the extrinsic functions add_allowed_currencies and remove_allowed_currencies accept a vector as a parameter. Here's an example:\npallets/orml-currencies-allowance-extension/src/lib.rs:134:\n\n#[pallet::call_index(0)]\n#[pallet::weight(::WeightInfo::add_allowed_currencies())]\n#[transactional]\npub fn add_allowed_currencies(\n origin: OriginFor,\n currencies: Vec>,\n) -> DispatchResult {\n ensure_root(origin)?;\n for i in currencies.clone() {\n AllowedCurrencies::::insert(i, ());\n }\n Self::deposit_event(Event::AllowedCurrenciesAdded { currencies });\n Ok(())\n}\n\nIf an excessively large vector is provided as input to these functions, it can result in node overload due to the necessity of iterating through the entire vector. This poses a significant risk, leading to potential memory exhaustion and creating a vulnerability for denial-of-service (DoS) attacks. Although the function is restricted to root access, it's important to note that if the root account is compromised or controlled by a malicious actor, they can exploit this vulnerability by deliberately supplying large vectors. This would cause the node to consume excessive resources, potentially disrupting the normal operation of the system.\n\nRecommendation:\nTo mitigate the risks associated with the use of unlimited-sized vectors, it is strongly recommended to revise the implementation in the orml-currencies-allowance-extension pallet. We advise against the use of the Vec data structure within any part of the pallet. Instead, consider utilizing frame_support::BoundedVec, which offers a bounded-size variant of a vector. Alternatively, it's important to impose a cap on the length of the vector during each of its instances. By ensuring a maximum limit, the vector's length stays within manageable bounds, preventing excessively long processing times or resource consumption during iterations over this data structure.\n\nBy proactively addressing this issue, you can enhance the stability and security of the orml-currencies-allowance-extension pallet, ensuring the robustness and reliability of the overall system.", - "description_summary": "Unrestricted vector size can lead to memory exhaustion and DoS attacks." - }, - { - "title": "Employment of Sudo Pallet", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "runtime/foucoco/src/lib.rs", - "lines": [ - 1509 - ] - }, - { - "file_path": "node/src/chain_spec.rs", - "lines": [ - 229 - ] - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Centralization", - "description": "Employment of Sudo Pallet\nThe current implementation of the sudo FRAME pallet in the runtime employs it as an alternative to the governance mechanism.\nID: PDM-010\nScope: Decentralization\nStatus: Acknowledged\n\nDescription:\nThe sudo pallet is integrated into the runtime configuration:\nruntime/foucoco/src/lib.rs:1509:\nconstruct_runtime!(\n pub enum Runtime where\n Block = Block,\n NodeBlock = opaque::Block,\n UncheckedExtrinsic = UncheckedExtrinsic,\n {\n /* ... */\n // Governance\n Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event} = 12,\n /* ... */\n }\n);\n\nThe root account, initially set at genesis, is defined as follows:\nnode/src/chain_spec.rs:229:\nlet sudo_account =\n pallet_multisig::Pallet::::multi_account_id(&signatories[..], 3);\n\nThis configuration allows the use of the ensure_root method for operations such as add_allowed_currencies and remove_allowed_currencies. Although there is no immediate security risk associated with this setup, it raises concerns regarding potential centralization.\n\nRecommendation:\nUtilizing a root account for governance purposes is considered a less favorable design choice due to its potential for centralization and the security risks associated with compromised private keys.\n\nTo address this issue, we recommend the following actions:\n1. Comprehensive Documentation: It is crucial to thoroughly document the intended use of the sudo pallet and the root account within the project. This documentation should provide clear explanations of the potential risks and limitations associated with their usage. Both the development team and end-users should be adequately informed to ensure transparency and informed decision-making. If there are plans to deactivate the sudo functionality after the network launch, a detailed process similar to the sudo removal outlined by Polkadot should be documented.\n\n2. Auditing and Monitoring: Prior to the removal of the sudo pallet, it is essential to establish regular auditing and monitoring processes. These measures will help ensure that the sudo pallet and the root account are not misused or compromising the system's security. By conducting thorough audits and continuous monitoring, any potential vulnerabilities or misuse can be identified and addressed promptly.\n\nBy following these recommendations, you will mitigate potential centralization concerns and enhance the overall governance and security aspects of the project.", - "description_summary": "The use of the sudo pallet risks centralization and security vulnerabilities due to potential misuse of root access." - }, - { - "title": "Error Handling in Chain Extension", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "runtime/foucoco/src/lib.rs", - "lines": [ - null - ] - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Code Quality", - "description": "Error Handling in Chain Extension\nImplementation of ChainExtension often utilizes DispatchError::Other(\"Explanatory string\") error, which makes error handling difficult.\nID: PDM-009\nScope: Code Quality\nStatus: Acknowledged\n\nDescription:\nThe current implementation of ChainExtension frequently relies on the generic DispatchError::Other() error. This approach makes error handling challenging, particularly for developers working with smart contracts. The indiscriminate use of this error type makes it difficult to monitor and diagnose specific errors, impeding efficient troubleshooting and code improvement.\n\nRecommendation:\nTo improve error handling in the ChainExtension implementation, we recommend implementing a custom enum that covers all the required error types. This custom enum will provide a more structured approach to error handling by encapsulating specific errors and enabling precise identification and resolution of issues.\n\nBy implementing the custom enum and incorporating it into the RetVal result, developers will have access to more detailed error information, allowing them to effectively identify and handle different error scenarios. This approach enhances the overall robustness and maintainability of the codebase, as well as facilitates troubleshooting and future improvements.", - "description_summary": "Use of generic DispatchError::Other hampers error handling and troubleshooting." - }, - { - "title": "Hardcoded Constants in match", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "runtime/foucoco/src/lib.rs", - "lines": [ - 964 - ] - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Code Quality", - "description": "Hardcoded Constants in match\nThe presence of hardcoded constants in the match statement within the implementation of ChainExtension hampers code readability.\nID: PDM-008\nScope: Code Quality\nStatus: Acknowledged\n\nDescription:\nThe following code segment has drawn our attention:\nruntime/foucoco/src/lib.rs:964:\n\nmatch func_id {\n // transfer\n 1105 => { /* ... */ },\n // balance\n 1106 => { /* ... */ },\n // total_supply\n 1107 => { /* ... */ },\n // approve_transfer\n 1108 => { /* ... */ },\n // transfer_approved\n 1109 => { /* ... */ },\n // allowance\n 1110 => { /* ... */ },\n // dia price feed\n 7777 => { /* ... */ },\n _ => { /* ... */ },\n}\n\nWhile the code comments provide some understanding of the functionality associated with each func_id, it is recommended to enhance the code logic in this section to improve comprehensibility, rather than solely relying on comments.\n\nRecommendation:\nWe recommend creating a separate enum to handle all the supported func_id options, using descriptive names that correspond to each function. This approach enhances code readability and maintainability:\n\nenum FuncId {\n Transfer,\n Balance,\n TotalSupply,\n ApproveTransfer,\n TransferApproved,\n Allowance,\n DiaPriceFeed,\n}\n\nimpl TryFrom for FuncId {\n type Error = DispatchError;\n fn try_from(func_id: u16) -> Result {\n let id = match func_id {\n 1105 => Self::Transfer,\n 1106 => Self::Balance,\n 1107 => Self::TotalSupply,\n 1108 => Self::ApproveTransfer,\n 1109 => Self::TransferApproved,\n 1110 => Self::Allowance,\n 7777 => Self::DiaPriceFeed,\n _ => {\n error!(\"Called an unregistered `func_id`: {:?}\", func_id);\n return Err(DispatchError::Other(\"Unimplemented func_id\"));\n }\n };\n Ok(id)\n }\n}\n\nThis enum allows for easier handling of the logic within the match statement in the call implementation.\n\nFurthermore, it is good practice to implement the logic for each function in a separate method instead of encapsulating all the code within the match arms. This promotes code modularity and readability.\n\nHere's an example of how the code would look with the recommended changes:\n\nlet func_id = FuncId::try_from(env.func_id())?;\nmatch func_id {\n FuncId::Transfer => /* Call method that performs transfer */,\n FuncId::Balance => /* Call method that returns balance */,\n FuncId::TotalSupply => /* ... */ ,\n FuncId::ApproveTransfer => /* ... */,\n FuncId::TransferApproved => /* ... */,\n FuncId::Allowance => /* ... */,\n FuncId::DiaPriceFeed => /* ... */,\n}\n\nWith these improvements, the code becomes more comprehensible, maintainable, and follows best practices.", - "description_summary": "Hardcoded constants in match statements reduce code readability and maintainability." - }, - { - "title": "Linter Warnings", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/orml-currencies-allowance-extension/src/lib.rs", - "lines": [ - 245, - 246 - ] - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Linters" - }, - { - "title": "Logging in Runtime", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "runtime/foucoco/src/lib.rs", - "lines": null - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Code Quality", - "description": "Logging in Runtime\nThe implementation of ChainExtension contains a lot of unnecessary warn!() macros.\nID: PDM-011\nScope: Logging\nStatus: Acknowledged\n\nDescription:\nUpon reviewing the runtime code, it was observed that numerous instances of the warn!() macro are present. While the warn macro can be useful during testing or debugging phases, it is unnecessary in the final version of the code. These superfluous warn macros clutter the codebase and can impede code readability and maintenance.\n\nUnneeded logging statements add noise to the code and distract developers from critical information. In the absence of proper justification, excessive logging can obscure important log messages and make it more challenging to identify and address genuine issues.\n\nRecommendation:\nTo enhance the code quality and maintainability of the runtime, we recommend refining the use of warn macros. By limiting the use of these macros to critical and important situations, the codebase will become more concise and easier to comprehend. This targeted approach will enable developers to focus on essential log messages that offer meaningful insights into the system's behavior, rather than being overwhelmed by extraneous warnings. Therefore, unnecessary warn macros should be reviewed and removed, with a priority on retaining only those that provide critical and important warnings.", - "description_summary": "Excessive warn!() macros in the implementation of ChainExtension clutter code and reduce maintainability." - }, - { - "title": "Pendulum build", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "Cargo.toml", - "lines": null - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Build Process", - "description": "Pendulum build\nThe Pendulum chain demonstrates a smooth and error-free build process.\nID: PDM-001\nScope: Build Process\nStatus: Fixed\n\nDescription:\nThe build process for the Pendulum chain is efficient and error-free. When executing the cargo build --release command, the output confirms a successful build with only one minor compiler warning related to an unused import. The build process adheres to sound Rust coding practices and follows idiomatic conventions.\n\nDuring the assessment, it was observed that the orml-currencies-allowance-extension pallet is not included as a member of the workspace, although it should be.\n\nRecommendation:\nTo address this issue, we recommend adding pallets/orml-currencies-allowance-extension to the members list in the main Cargo.toml file of the workspace.\n\nPlease note that this issue does not encompass the results of linting tools, such as Clippy, which may provide additional warnings and recommendations for improving code quality. These will be addressed separately in PDM-002.", - "description_summary": "Missing a member in the workspace configuration." - }, - { - "title": "Superfluous Implementation of Hooks Trait", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/orml-currencies-allowance-extension/src/lib.rs", - "lines": [ - 99 - ] - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Code Quality", - "description": "Superfluous Implementation of Hooks Trait\nThe Hooks trait has been declared in the orml-currencies-allowance-extension pallet, but no custom implementations have been provided.\nID: PDM-005\nScope: Code Quality\nStatus: Acknowledged\n\nDescription:\nThe following code snippet raises concerns:\npallets/orml-currencies-allowance-extension/src/lib.rs:99:\n\n#[pallet::hooks]\nimpl Hooks for Pallet {}\n\nThe Hooks trait is typically used to perform logic on every block initialization, finalization, and other specific actions. However, in the case of the orml-currencies-allowance-extension pallet, no methods from the Hooks trait are implemented. While this does not cause any immediate detrimental effects, it can reduce code readability and increase complexity.\n\nRecommendation:\nTo improve code clarity and readability, it is recommended to eliminate the use of the Hooks trait in the orml-currencies-allowance-extension pallet. Since no custom implementations are provided, removing the Hooks trait declaration will simplify the code and remove unnecessary complexity.", - "description_summary": "The Hooks trait has been declared in the orml-currencies-allowance-extension pallet, but no custom implementations have been provided." - }, - { - "title": "Test Coverage", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/orml-currencies-allowance-extension/src/lib.rs", - "lines": null - }, - { - "file_path": "runtime/foucoco/src/lib.rs", - "lines": null - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Code Quality / Testing" - }, - { - "title": "Vulnerable and Unmaintained Dependencies", - "repository": "https://github.com/pendulum-chain/pendulum", - "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/orml-currencies-allowance-extension/Cargo.toml", - "lines": null - } - ], - "reported_impact": null, - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Dependency Management", - "description": "Vulnerable and Unmaintained Dependencies\nThe orml-currencies-allowance-extension pallet has dependencies that include one vulnerable crate plus three unmaintained crates.\nID: PDM-004\nScope: Dependencies\nStatus: Acknowledged\n\nDescription:\nThe orml-currencies-allowance-extension pallet has several dependencies that raise concerns regarding their security and maintenance. The following table provides details about these dependencies:\n\nDependency | Version | Id | Type | Remediation\n------------|---------|---------------|-----------------|--------------------------------------------------\ntime | 0.1.45 | RUSTSEC-2020-0071 | Vulnerability | Upgrade to >=0.2.23\nansi_term | 0.12.1 | RUSTSEC-2021-0139 | Unmaintained | Use alternative crates: anstyle, console, nu-ansi-term, owo-colors, stylish, yansi\nmach | 0.3.2 | RUSTSEC-2020-0168 | Unmaintained | Switch to mach2\nparity-wasm | 0.45.0 | RUSTSEC-2022-0061 | Unmaintained | Switch to wasm-tools\n\nAlthough these dependencies may not have an immediate impact on the security aspect and are part of the Substrate project, it is essential to regularly review dependencies and monitor for updates to ensure the overall security and maintenance of the project.\n\nRecommendation:\nTo address these concerns and maintain a secure codebase, it is recommended to take the following actions:\n\n1. Use cargo audit to check for any new vulnerabilities or outdated packages in your project's dependencies. Regularly perform these checks to stay updated on potential security issues.\n2. Monitor for new releases of Substrate and update your project accordingly. Keeping your project up to date with the latest Substrate releases ensures that you benefit from bug fixes, security patches, and improvements.\n\nAdditionally, it is worth noting that the latest available Substrate release at the time of writing is v0.9.43. Stay informed about new releases and evaluate the feasibility of updating your project to benefit from the latest features and improvements.", - "description_summary": "Dependency includes one vulnerable and three unmaintained crates requiring updates or replacements." - } - ] - }, - { - "audited_project_id": 5, - "project_name": "Nodle", - "auditor": "Halborn", - "audit_link": "https://github.com/HalbornSecurity/PublicReports/blob/master/Substrate%20Audits/Nodle_Nodl_Substrate_Pallet_Security_Audit_Report_Halborn_Final.pdf", - "findings": [ - { - "title": "TCR voting design should be improved", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": null, - "location": null, - "reported_impact": "3", - "reported_likelihood": "3", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.1 (HAL-01) HAL-01 TCR VOTING DESIGN SHOULD BE IMPROVED - MEDIUM \n Description:\nIt was observed that it is possible to:\n\u2022 Vote with 0 amount\n\u2022 Challenge yourself\n\u2022 Counter yourself\n\u2022 Vote for yourself\nBy combining these properties, some scenarios might be possible:\n\u2022 A whale can influence any challenge/counter decision by voting for itself.\nA whale can also farm additional tokens upon success by countering any application and then voting to itself.\nBy countering your application and voting with 0 amount, it is possible to fill up the storage since the values are pushed into vector\nTo remove yourself from members in the root of trust.\n\nRisk Level:\nLikelihood - 3\nImpact - 4\n\nRecommendation:\nConsider improving the design by not letting the same account to:\n\u2022 Vote to itself\n\u2022 Counter itself\n\u2022 Challenge itself\n\u2022 Vote with 0 deposit\n\nRemediation Plan:\nNOT APPLICABLE: The issue is marked as not applicable by the Nodle team as the TCR and root of trust pallets will be removed.", - "description_summary": "TCR voting design allows self-voting, zero-amount votes, and potential manipulation by large token holders." - }, - { - "title": "Denomination logic should be improved", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": "2db8b4707606bf88b8fdefbe616d67a720e434ea", - "location": [ - { - "file_path": "pallets/staking/src/lib.rs", - "lines": null - } - ], - "reported_impact": "2", - "reported_likelihood": "4", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.2 (HAL-02) HAL-02 DENOMINATION LOGIC SHOULD BE IMPROVED - MEDIUM\n Description:\nIt was observed that if a nominator has a single validator, it is not possible to remove a validator through nominator_denominate since it has a check for >. In that case, nominator_denominate_all has to be used, which bypasses that check, which is not intentional.\n\nCode Location:\nListing 1: pallets/staking/src/lib.rs\n1 if !do_force {\n ensure!(\n remaining >= >::get(),\n >::NominatorBondBelowMin\n );\n}\n\nRisk Level:\nLikelihood - 4\nImpact - 2\n\nRecommendation:\nConsider having a conditional statement in nominator_denominate that allows the forced removal of a validator if the nominator has only one validator.\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", - "description_summary": "Denomination logic bypasses minimum bond check when only one validator is nominated." - }, - { - "title": "Emergency shutdown not used in critical functions", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets", - "lines": null - } - ], - "reported_impact": "4", - "reported_likelihood": "3", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.3 (HAL-03) HAL-03 EMERGENCY SHUTDOWN NOT USED IN MANY CRITICAL FUNCTIONS - MEDIUM\n Description:\nIt was observed that the emergency shutdown pallet is used only in the allocate function in the allocations pallet. However, there are more public functions across different pallets that might be problematic if, at any point in time, there is a bug (security/non-security) discovered within them. There should be a functionality to shut them down before new fixes are pushed.\n\nCode Location:\nThese functions should have a shutdown functionality:\n\nGrants pallet\n\u2022 add_vesting_schedule\n\nStaking pallet\n\u2022 validator_join_pool\n\u2022 validator_exit_pool\n\u2022 validator_bond_more\n\u2022 validator_bond_less\n\u2022 nominator_nominate\n\u2022 nominator_denominate\n\u2022 nominator_bond_more\n\u2022 nominator_bond_less\n\u2022 nominator_move_nomination\n\u2022 unbond_frozen\n\u2022 withdraw_unbonded\n\u2022 withdraw_staking_rewards\n\nRisk Level:\nLikelihood - 3\nImpact - 4\n\nRecommendation:\nConsider enabling shutdown functionality in critical public functions.\n\nExample Code:\nListing 2\n1 ensure!(\n !pallet_emergency_shutdown :: Pallet::::shutdown(),\n Error::::UnderShutdown\n);\n\nRemediation Plan:\nPENDING: In a future release, the Nodle team will modify the emergency shutdown pallet to better generalize.", - "description_summary": "Emergency shutdown functionality is missing in multiple critical public functions across different pallets." - }, - { - "title": "Missing sanity checks", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": "2db8b4707606bf88b8fdefbe616d67a720e434ea", - "location": [ - { - "file_path": "pallets/staking/src/lib.rs", - "lines": [ - { - "from": 201, - "to": 208 - } - ] - } - ], - "reported_impact": "2", - "reported_likelihood": "3", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.4 (HAL-04) HAL-04 MISSING SANITY CHECKS - LOW\n Description:\nIt was observed that the set_staking_limits privileged function is missing sanity checks on provided values. Even though it is a protected function, it is still advised to have some sanity checks to avoid any human error.\n\nCode Location:\nListing 3: pallets/staking/src/lib.rs\n201 pub fn set_staking_limits (\n origin: OriginFor,\n max_stake_validators: u32,\n min_stake_session_selection: BalanceOf,\n min_validator_bond: BalanceOf,\n min_nominator_total_bond: BalanceOf,\n min_nominator_chill_threshold: BalanceOf\n) -> DispatchResultWithPostInfo {\n\nRisk Level:\nLikelihood - 3\nImpact - 2\n\nRecommendation:\nIt is recommended to add sanity checks to ensure:\n\u2022 max_stake_validators != 0\n\u2022 min_stake_session_selection != 0\n\u2022 min_validator_bond != 0\n\u2022 min_nominator_total_bond != 0\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", - "description_summary": "Missing sanity checks in set_staking_limits function may allow invalid parameter values." - }, - { - "title": "Vesting to yourself is allowed", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": "91cfe0cb3300811bb02a47c4cf70f68c7e48f94d", - "location": [ - { - "file_path": "pallets/grants/src/lib.rs", - "lines": [ - { - "from": 157, - "to": 168 - } - ] - } - ], - "reported_impact": "2", - "reported_likelihood": "3", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.5 (HAL-05) HAL-05 VESTING TO YOURSELF IS ALLOWED - LOW\n Description:\nIt was observed that you can create a vesting schedule to yourself.\n\nCode Location:\nListing 4: pallets/grants/src/lib.rs\n157 pub fn add_vesting_schedule (\n origin: OriginFor,\n dest: ::Source,\n schedule: VestingScheduleOf\n) -> DispatchResultWithPostInfo {\n let from = ensure_signed(origin)?;\n let to = T::Lookup::lookup(dest)?;\n Self::do_add_vesting_schedule(&from, &to, schedule.clone())?;\n Self::deposit_event(Event::VestingScheduleAdded(from, to, schedule));\n Ok(().into())\n}\n\nRisk Level:\nLikelihood - 3\nImpact - 2\n\nRecommendation:\nPlease add a check that ensures that from != to in fn add_vesting_schedule.\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", - "description_summary": "Vesting schedule allows setting up a vesting to oneself without restrictions." - }, - { - "title": "Missing zero value check", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": "f31c19a06ab0047f0c533c83ba67654eedfa6147", - "location": [ - { - "file_path": "pallets/grants/src/lib.rs", - "lines": [ - 88 - ] - } - ], - "reported_impact": "2", - "reported_likelihood": "2", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.6 (HAL-06) HAL-06 MISSING ZERO VALUE CHECK - LOW\n Description:\nIt was observed that the allocate function should have a zero value check on the amount argument.\n\nCode Location:\nListing 5: pallets/grants/src/lib.rs (Line 88)\n85 pub fn allocate (\n origin: OriginFor,\n to: T::AccountId,\n amount: BalanceOf,\n proof: Vec\n) -> DispatchResultWithPostInfo {\n Self::ensure_oracle(origin)?;\n ...\n\nRisk Level:\nLikelihood - 2\nImpact - 2\n\nRecommendation:\nConsider adding zero value checks to these functions to avoid performing redundant operations if a zero value is received.\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", - "description_summary": "Allocate function lacks a zero-value check for the amount argument." - }, - { - "title": "Vesting schedules less than a current block can be created", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": null, - "location": null, - "reported_impact": "1", - "reported_likelihood": "1", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.7 (HAL-07) HAL-07 VESTING SCHEDULES LESS THAN A CURRENT BLOCK CAN BE CREATED - INFORMATIONAL\n Description:\nIt was observed that the pallet allows the creation of vesting schedules that are less than the current block number. Those vesting schedules are not more than the regular transfers with extra steps. Therefore, those are redundant.\n\nExample:\nListing 6\n1 Current Block: 100\n\n3 Vesting Schedule Start: 1st Block\n5 Period: 10 Blocks\n7 Period_count: 2\n9 Per Period: 1 knodl\n\n13 Vesting Duration: 10 * 2 + 1 = 21 Blocks\n14 Initial Transfer sent: 2 knodl\n16 Next Claims: 0 since Vesting Duration < Current Block\n\nRisk Level:\nLikelihood - 1\nImpact - 1\n\nRecommendation:\nConsider adding a check that ensures that:\n(period * period_count) + start > current_block_number\n\nRemediation Plan:\nNOT APPLICABLE: The issue was marked as not applicable by the Nodle team, stating:\nThis can be useful to keep as it is. In fact, we may have to create retroactive awards that may have been partially vested.", - "description_summary": "Vesting schedules can be created with start blocks earlier than the current block, making them redundant." - }, - { - "title": "Redundant check", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": "91cfe0cb3300811bb02a47c4cf70f68c7e48f94d", - "location": [ - { - "file_path": "pallets/grants/src/lib.rs", - "lines": [ - 253 - ] - } - ], - "reported_impact": "1", - "reported_likelihood": "1", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.8 (HAL-08) HAL-08 REDUNDANT CHECK - INFORMATIONAL\n Description:\nIt was observed that the grants pallet contains a redundant check.\n\nCode Location:\nThere is no need for a second new_lock.is_zero() since it was already checked prior. Removing of the vestingSchedule can be performed within the first check.\n\nListing 7: pallets/grants/src/lib.rs (Line 253)\n247 if new_lock . is_zero () {\n248 T :: Currency :: remove_lock ( VESTING_LOCK_ID , & target );\n249 }\n250 else {\n251 T :: Currency :: set_lock ( VESTING_LOCK_ID , & target , new_lock , WithdrawReasons :: all () );\n252 }\n253 if new_lock . is_zero () {\n254 // No more claimable , clear\n255 VestingSchedules :: :: remove ( target . clone () );\n256 }\n257 else {\n258 T :: Currency :: set_lock ( VESTING_LOCK_ID , & target , new_lock , WithdrawReasons :: all () );\n}\n\nRisk Level:\nLikelihood - 1\nImpact - 1\n\nRecommendation:\nPlease remove the second new_lock.is_zero() check and remove the vestingSchedule within the first check.\n\nListing 8: pallets/grants/src/lib.rs\n247 if new_lock . is_zero () {\n248 T :: Currency :: remove_lock ( VESTING_LOCK_ID , & target );\n249 VestingSchedules :: :: remove ( target . clone () );\n250 }\n251 else {\n252 T :: Currency :: set_lock ( VESTING_LOCK_ID , & target , new_lock , WithdrawReasons :: all () );\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", - "description_summary": "Redundant check of new_lock.is_zero() found in grants pallet." - }, - { - "title": "Redundant variable", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": "91cfe0cb3300811bb02a47c4cf70f68c7e48f94d", - "location": [ - { - "file_path": "pallets/tcr/src/lib.rs", - "lines": [ - { - "from": 138, - "to": 152 - }, - 478 - ] - } - ], - "reported_impact": "1", - "reported_likelihood": "1", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.9 (HAL-09) HAL-09 REDUNDANT VARIABLE - INFORMATIONAL\n Description:\nIt was observed that the old1 variable in on_finalize function in tcr pallet is redundant. Tuple returned from commit_applications is Ok(( new_members, Vec::new())). Therefore, old1 is always going to be an empty vector. Hence, extending it with old2 does not make any difference. In this scenario, we only care about new_1.\n\nCode Location:\nListing 9: pallets/tcr/src/lib.rs\n138 fn on_finalize ( block : T :: BlockNumber ) {\n139 let ( mut new_1 , mut old_1 ) =\n140 Self :: commit_applications ( block ) . unwrap_or (( Vec :: new () , Vec :: new () ) );\n141 let ( new_2 , old_2 ) =\n142 Self :: resolve_challenges ( block ). unwrap_or (( Vec :: new () , Vec :: new () ) );\n143 // Should never be the same , so should not need some uniq checks\n145 new_1 . extend ( new_2 ) ;\n146 old_1 . extend ( old_2 ) ;\n148 new_1 . sort () ;\n149 old_1 . sort () ;\n151 Self :: notify_members_change ( new_1 , old_1 ) ;\n\nListing 10: pallets/tcr/src/lib.rs (Line 478)\n460 fn commit_applications ( block : T :: BlockNumber ) -> FinalizeHelperResultFrom {\n461 let new_members = < Applications >:: iter ()\n462 . filter (|( _account_id , application )| {\n463 block\n464 . checked_sub (& application . clone () . created_block )\n465 . expect ( \" created_block should always be smaller than block ; qed \")\n466 >= T :: FinalizeApplicationPeriod :: get ()\n})\n468 . map (|( account_id , application ) | {\n469 < Applications >:: remove ( account_id . clone () );\n470 < Members >:: insert ( account_id . clone () , application . clone () ) ;\n471 Self :: unreserve_for ( account_id . clone () , application . candidate_deposit );\n472 Self :: deposit_event ( Event :: ApplicationPassed ( account_id . clone () ) ) ;\n474 account_id\n475 })\n476 . collect :: < Vec >() ;\n478 Ok (( new_members , Vec :: new () ) ) // === HERE ===\n}\n\nRisk Level:\nLikelihood - 1\nImpact - 1\n\nRecommendation:\nConsider omitting old1 and remove all actions performed on it.\n\nListing 11: pallets/tcr/src/lib.rs\n247 fn on_finalize ( block : T :: BlockNumber ) {\n248 let ( mut new_1 , _ ) =\n249 Self :: commit_applications ( block ) . unwrap_or (( Vec :: new () , Vec :: new () ) );\n250 let ( new_2 , mut old ) =\n251 Self :: resolve_challenges ( block ). unwrap_or (( Vec :: new () , Vec :: new () ) );\n253 // Should never be the same , so should not need some uniq checks\n254 new_1 . extend ( new_2 ) ;\n257 new_1 . sort () ;\n258 old . sort () ;\n260 Self :: notify_members_change ( new_1 , old ) ;\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", - "description_summary": "Redundant variable old1 in on_finalize function." - }, - { - "title": "Usage of vulnerable crates", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": null, - "location": null, - "reported_impact": "1", - "reported_likelihood": "2", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.10 (HAL-10) HAL-10 USAGE OF VULNERABLE CRATES - INFORMATIONAL\n Description:\nIt was observed that the project uses crates with known vulnerabilities.\n\nCode Location:\nID\nRUSTSEC-2020-0159\nRUSTSEC-2020-0071\nRUSTSEC-2021-0130\nRUSTSEC-2021-0067\nRUSTSEC-2021-009\nRUSTSEC-2021-0079\nRUSTSEC-2021-0078\nRUSTSEC-2021-0076\nRUSTSEC-2021-0070\nRUSTSEC-2021-0073\nRUSTSEC-2021-0013\nRUSTSEC-2021-0089\nRUSTSEC-2021-0124\nRUSTSEC-2021-0110\nRUSTSEC-2021-0115\n\nFINDINGS & TECH DETAILS\npackage: chrono\nShort Description: Potential segfault in 'localtime_r' invocations\npackage: time\nShort Description: Potential segfault in the time crate\npackage: lru\nShort Description: Use after free in lru crate\npackage: cranelift-codegen\nShort Description: Memory access due to code generation flaw in Cranelift module\npackage: crossbeam-deque\nShort Description: Data race in crossbeam-deque\npackage: hyper\nShort Description: Integer overflow in hyper's parsing of the Transfer-Encoding header leads to data loss\npackage: hyper\nShort Description: Lenient hyper header parsing of Content-Length could allow request smuggling\npackage: libsecp256k1\nShort Description: libsecp256k1 allows overflowing signatures\npackage: nalgebra\nShort Description: VecStorage Deserialize Allows Violation of Length Invariant\npackage: prost-types\nShort Description: Conversion from prost_types::Timestamp to SystemTime can cause an overflow and panic\npackage: raw-cpuid\nShort Description: Soundness issues in raw-cpuid\npackage: raw-cpuid\nShort Description: Optional Deserialize implementations lacking validation\npackage: tokio\nShort Description: Data race when sending and receiving after closing a oneshot channel\npackage: wasmtime\nShort Description: Multiple Vulnerabilities in Wasmtime\npackage: zeroize-derive\nShort Description: #[zeroize(drop)] doesn\u2019t implement Drop for enums\n\nRisk Level:\nLikelihood - 2\nImpact - 1\n\nRecommendation:\nEven if those vulnerable crates cannot impact the underlying application, it is advised to be aware of them and attempt to update them to non-vulnerable versions. Furthermore, it is necessary to set up dependency monitoring to always be alerted when a new vulnerability is disclosed in one of the project\u2019s crates.\n\nRemediation Plan:\nACKNOWLEDGED: The issue was acknowledged by the Nodle team and will be fixed later.", - "description_summary": "Usage of crates with known vulnerabilities identified." - }, - { - "title": "Outdated Rust edition", - "repository": "https://github.com/NodleCode/chain/", - "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", - "reported_remediated_commit": "41ce7de56ff6dd701de1f5d247c91aaff6803954", - "location": [ - { - "file_path": "Cargo.toml", - "lines": null - } - ], - "reported_impact": "1", - "reported_likelihood": "1", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.11 (HAL-11) HAL-11 OUTDATED RUST EDITION - INFORMATIONAL\n Description:\nIt was observed that the project is using outdated rust edition (2018). Recently, 2021 rust edition came out, which includes a lot of stability improvements and new features that might make the code more readable.\n\nCode Location:\n\u2022 Cargo.toml\n\nRisk Level:\nLikelihood - 1\nImpact - 1\n\nRecommendation:\nConsider updating the Rust to the latest edition to use the latest features and stability improvements.\n\nReference:\nRust 2021 Edition Guide\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", - "description_summary": "Project is using an outdated Rust edition (2018)." - } - ] - }, - { - "audited_project_id": 6, - "project_name": "ReefChain", - "auditor": "Halborn", - "audit_link": "https://github.com/HalbornSecurity/PublicReports/blob/master/Substrate%20Audits/Reef_Chain_Substrate_Security_Audit_Report_Halborn_v1_1.pdf", - "findings": [ - { - "title": "Integer overflow", - "repository": "https://github.com/reef-defi/reef-chain", - "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", - "reported_remediated_commit": "6e4153498a28d03b8600739709cb200065c88781", - "location": [ - { - "file_path": "modules/evm-accounts/src/lib.rs", - "lines": [ - 182, - 313, - 314 - ] - }, - { - "file_path": "modules/evm-bridge/src/lib.rs", - "lines": [ - 183, - 186, - 191 - ] - } - ], - "reported_impact": "3", - "reported_likelihood": "3", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.1 (HAL-01) INTEGER OVERFLOW - MEDIUM\nDescription:\nAn overflow happens when an arithmetic operation reaches the maximum size of a type. For instance, in the ethereum_signable_message() method, an if statement is summing up a few u32 values, which may end up overflowing the integer. In computer programming, an integer overflow occurs when an arithmetic operation attempts to create a numeric value that is outside of the range that can be represented with a given number of bits\u2014either larger than the maximum or lower than the minimum representable value.\n\nCode Location:\nListing 1: modules/evm-accounts/src/lib.rs (Lines 182)\n180 pub fn ethereum_signable_message(what: &[u8], extra: &[u8]) -> Vec {\n181 let prefix = b\"reef evm:\";\n182 let mut l = prefix.len() + what.len() + extra.len();\n183 let mut rev = Vec::new();\n\nListing 2: modules/evm-accounts/src/lib.rs (Lines 313,314)\n312 pub fn to_ascii_hex(data: &[u8]) -> Vec {\n313 let mut r = Vec::with_capacity(data.len() * 2);\n314 let mut push_nibble = |n| r.push(if n < 10 { b'0' + n } else { b'a' - 10 + n });\n\nListing 3: modules/evm-bridge/src/lib.rs (Lines 183,186,191)\n182 let offset = U256::from_big_endian(&output[0..32]);\n183 let length = U256::from_big_endian(&output[offset.as_usize()..offset.as_usize() + 32]);\n184 ensure!(\n// output is 32-byte aligned. ensure total_length >= offset + string length + string data length.\n186 output.len() >= offset.as_usize() + 32 + length.as_usize(),\n187 Error::::InvalidReturnValue\n188 );\n189 let mut data = Vec::new();\n191 data.extend_from_slice(&output[offset.as_usize() + 32..offset.as_usize() + 32 + length.as_usize()]);\n\nRisk Level:\nLikelihood - 3\nImpact - 3\n\nRecommendations:\nIt is recommended to use vetted safe math libraries for arithmetic operations consistently throughout the smart contract system. Consider replacing the addition and multiplication operators with Rust\u2019s checked_add and checked_mul methods.\n\nRemediation:\nSOLVED: Reef fixed the issue in commit 6e4153498a28d03b8600739709cb200065c88781.", - "description_summary": "Integer overflow vulnerability due to unchecked arithmetic operations in ethereum_signable_message and to_ascii_hex functions." - }, - { - "title": "Total issuance not updated on mint", - "repository": "https://github.com/reef-defi/reef-chain", - "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "modules/currencies/src/lib.rs", - "lines": [ - 168 - ] - } - ], - "reported_impact": "3", - "reported_likelihood": "3", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.2 (HAL-02) TOTAL ISSUANCE NOT UPDATED ON MINT - MEDIUM\nDescription:\nThe update_balance dispatchable defined in modules/currencies/src/lib.rs does not update the total issuance of the currency (identified by user-supplied ID) which is minted to the target address. This may lead to discrepancies in token data.\n\nCode Location:\nListing 4: modules/currencies/src/lib.rs (Lines 168)\n159 #[pallet::weight(T::WeightInfo::update_balance_non_native_currency())]\n160 pub fn update_balance(\n161 origin: OriginFor,\n162 who: ::Source,\n163 currency_id: CurrencyIdOf,\n164 amount: AmountOf,\n165 ) -> DispatchResultWithPostInfo {\n166 ensure_root(origin)?;\n167 let dest = T::Lookup::lookup(who)?;\n168 >::update_balance(currency_id, &dest, amount)?;\n169 Ok(().into())\n170 }\n\nRisk Level:\nLikelihood - 3\nImpact - 3\n\nRecommendations:\nTotal issuance should be updated every time tokens are minted or burned.\n\nRemediation Plan:\nACKNOWLEDGED: Reef states that the affected function is sudo only and will be deprecated in a future release.", - "description_summary": "Total issuance is not updated on mint, causing potential discrepancies in token data." - }, - { - "title": "Casting overflow", - "repository": "https://github.com/reef-defi/reef-chain", - "audited_commit": "26ed9e88e773f5d628c01d558945cd38cd5a7d5a", - "reported_remediated_commit": "313439bb7940afa0f0d5060fbcbbe26d5a3e5298", - "location": [ - { - "file_path": "modules/evm-bridge/src/lib.rs", - "lines": [ - 183, - 186, - 191 - ] - } - ], - "reported_impact": "3", - "reported_likelihood": "2", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.3 (HAL-03) CASTING OVERFLOW - LOW\nDescription:\nWhen converting or casting between types, an \u201coverflow\u201d/wrapping may occur and result in logic bugs leading to thread panic. The decode_string utility method defined in modules/evm-bridge/src/lib.rs does not validate if the values of the offset and length variables can be cast to the usize type. Although the method is not exported and available externally, the method is vulnerable still and the risk could increase in the future if the method is used before it\u2019s patched.\n\nCode Location:\nListing 5: modules/evm-bridge/src/lib.rs (Lines 183,186,191)\n182 let offset = U256::from_big_endian(&output[0..32]);\n183 let length = U256::from_big_endian(&output[offset.as_usize()..offset.as_usize() + 32]);\n184 ensure!(\n// output is 32-byte aligned. ensure total_length >= offset + string length + string data length.\n186 output.len() >= offset.as_usize() + 32 + length.as_usize(),\n187 Error::::InvalidReturnValue\n188 );\n189 let mut data = Vec::new();\n191 data.extend_from_slice(&output[offset.as_usize() + 32..offset.as_usize() + 32 + length.as_usize()]);\n\nRisk Level:\nLikelihood - 2\nImpact - 3\n\nRecommendations:\nCheck the value against maximum type value before casting.\n\nListing 6:\n1 if (x <= usize::MAX) {\n2 // logic ...\n3 }\n\nRemediation:\nSOLVED: Reef fixed the issue in commit 313439bb7940afa0f0d5060fbcbbe26d5a3e5298.", - "description_summary": "Casting overflow vulnerability due to unchecked type conversion in decode_string method." - }, - { - "title": "Slash amount validation missing", - "repository": "https://github.com/reef-defi/reef-chain", - "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", - "reported_remediated_commit": "bd43bec58890be763b32bfdfd18ba85a8c0ef9e5", - "location": [ - { - "file_path": "modules/currencies/src/lib.rs", - "lines": [ - 396 - ] - } - ], - "reported_impact": "2", - "reported_likelihood": "2", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.4 (HAL-04) SLASH AMOUNT VALIDATION MISSING - LOW\nDescription:\nThe slash_reserved method defined in modules/currencies/src/lib.rs does not validate if the value of the user-supplied value parameter exceeds the actual balance of the account owned by the address that is to have its ERC20 tokens slashed.\n\nCode Location:\nListing 7: modules/currencies/src/lib.rs (Lines 396)\n394 fn slash_reserved(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance) -> Self::Balance {\n395 match currency_id {\n396 CurrencyId::ERC20(_) => value,\n397 CurrencyId::Token(TokenSymbol::REEF) => T::NativeCurrency::slash_reserved(who, value),\n398 _ => T::MultiCurrency::slash_reserved(currency_id, who, value),\n399 }\n400 }\n\nRisk Level:\nLikelihood - 2\nImpact - 2\n\nRecommendations:\nThe slashed amount should always be lesser or equal to the account balance that is to be slashed.\n\nRemediation:\nSOLVED: Reef fixed the issue in commit bd43bec58890be763b32bfdfd18ba85a8c0ef9e5.", - "description_summary": "Missing validation in slash_reserved method allows slashing beyond account balance." - }, - { - "title": "Currency ID validation missing", - "repository": "https://github.com/reef-defi/reef-chain", - "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "modules/currencies/src/lib.rs", - "lines": [ - 125, - 178, - 186, - 199, - 217, - 235, - 290, - 303, - 316, - 324, - 336, - 376, - 386, - 394, - 402, - 423, - 445 - ] - } - ], - "reported_impact": "2", - "reported_likelihood": "2", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.5 (HAL-05) CURRENCY ID VALIDATION MISSING - LOW\nDescription:\nMany dispatchables and helper methods defined in modules/currencies/src/lib.rs do not check if the user-supplied currency ID matches any of the existing ones before calling the possibly resource-intensive underlying utility functions.\n\nCode Location:\nListing 8: modules/evm-accounts/src/lib.rs (Lines 125)\n121 #[pallet::weight(T::WeightInfo::transfer_non_native_currency())]\n122 pub fn transfer(\n123 origin: OriginFor,\n124 dest: ::Source,\n125 currency_id: CurrencyIdOf,\n126 #[pallet::compact] amount: BalanceOf,\n127 ) -> DispatchResultWithPostInfo {\n128 let from = ensure_signed(origin)?;\n129 let to = T::Lookup::lookup(dest)?;\n130 >::transfer(currency_id, &from, &to, amount)?;\n131 Ok(().into())\n132 }\n\nList of all the functions that fail to validate the currency ID:\nListing 9: (Lines 2,3)\n1 auditor@halborn:~/projects/reef/reef-chain/modules/currencies$ \\\n2 > grep -ne 'fn.*CurrencyId' src/lib.rs \\\n3 > | cut -d '-' -f 1\n4 178: fn minimum_balance(currency_id: Self::CurrencyId)\n5 186: fn total_issuance(currency_id: Self::CurrencyId)\n6 199: fn total_balance(currency_id: Self::CurrencyId, who: &T::AccountId)\n7 217: fn free_balance(currency_id: Self::CurrencyId, who: &T::AccountId)\n8 235: fn ensure_can_withdraw(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n9 290: fn deposit(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n10 303: fn withdraw(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n11 316: fn can_slash(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n12 324: fn slash(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n13 336: fn update_balance(currency_id: Self::CurrencyId, who: &T::AccountId, by_amount: Self::Amount)\n14 376: fn remove_lock(lock_id: LockIdentifier, currency_id: Self::CurrencyId, who: &T::AccountId)\n15 386: fn can_reserve(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance)\n16 394: fn slash_reserved(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance)\n17 402: fn reserved_balance(currency_id: Self::CurrencyId, who: &T::AccountId)\n18 423: fn reserve(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance)\n19 445: fn unreserve(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance)\n\nRisk Level:\nLikelihood - 2\nImpact - 2\n\nRecommendations:\nIt is recommended to validate all user-supplied input in order to avoid executing unnecessary operations and mitigate the risk of resource exhaustion.\n\nRemediation Plan:\nACKNOWLEDGED: Reef states that there is only 1 currency id in use, and there likely won\u2019t be more going forward.", - "description_summary": "Missing validation for currency ID in multiple methods could lead to resource exhaustion." - }, - { - "title": "Vector capacity validation missing", - "repository": "https://github.com/reef-defi/reef-chain", - "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", - "reported_remediated_commit": "6b826f7ca16d1a30f3fa55f0606d0b94b69b2b3a", - "location": [ - { - "file_path": "modules/evm-accounts/src/lib.rs", - "lines": [ - 313 - ] - } - ], - "reported_impact": "2", - "reported_likelihood": "1", - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.6 (HAL-06) VECTOR CAPACITY VALIDATION MISSING - INFORMATIONAL\nDescription:\nThe to_ascii_hex utility function defined in modules/evm-accounts/src/lib.rs when creating a new Vec from the user-supplied data slice with a Vec::with_capacity method does not validate if the capacity of the new vector exceeds the maximum allowed capacity.\n\nCode Location:\nListing 10: modules/currencies/src/lib.rs (Lines 313)\n312 pub fn to_ascii_hex(data: &[u8]) -> Vec {\n313 let mut r = Vec::with_capacity(data.len() * 2);\n314 let mut push_nibble = |n| r.push(if n < 10 { b'0' + n } else { b'a' - 10 + n });\n315 for &b in data.iter() {\n316 push_nibble(b / 16);\n317 push_nibble(b % 16);\n318 }\n319 r\n320 }\n\nRisk Level:\nLikelihood - 1\nImpact - 2\n\nRecommendations:\nValidate if the new capacity (data.len() * 2) does not exceed isize::MAX bytes.\n\nRemediation:\nSOLVED: Reef fixed the issue in commit 6b826f7ca16d1a30f3fa55f0606d0b94b69b2b3a.", - "description_summary": "Missing validation in to_ascii_hex function allows vector capacity to exceed maximum limit." - } - ] - }, - { - "audited_project_id": 7, - "project_name": "MantaNetwork", - "auditor": "Veridise", - "audit_link": "https://github.com/Manta-Network/Atlantic-Audits/blob/main/Atlantic-Veridise-Chain.pdf", - "findings": [ - { - "title": "Static fee charged despite dynamic storage accesses", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "parachain-staking/lib.rs", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Bad Extrinsic Weight", - "description": "4.1.1 V-MANC-VUL-001: Static fee charged despite dynamic storage accesses\n\nSeverity: Medium\nType: Bad Extrinsic Weight\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): parachain-staking/lib.rs\nLocation(s): go_online, go_offline, candidate_bond_more\n\nBlockchain computations must have appropriate fees to prevent network congestion. For substrate extrinsics, these fees are set by computing an associated weight for the operation where the weight is intended to capture the maximum computational cost. As reads from and writes to storage are expensive, these weights should consider the number of these operations that are performed. The following extrinsics, however, have a fixed weight despite requiring a dynamic number of reads or writes due to insert or remove operations being performed on CandidatePool.\n\n- go_online\n- go_offline\n- candidate_bond_more\n- execute_candidate_bond_less\n- delegate\n- execute_leave_delegators\n- delegator_bond_more\n- execute_delegation_request\n- schedule_leave_delegators\n- schedule_delegator_bond_less\n- cancel_leave_delegators\n\nAlso note that similar functions in the same pallet, such as schedule_leave_candidates charge the users dynamic fees. An example can be seen in Snippet 4.1.\n\nImpact: As the size of the CandidatePool grows, the cost of insert and remove will increase linearly since vector inserts in Rust are linear in the size of the vector. This allows malicious actors to add many candidates to the pool for a fixed monetary cost despite an increasing computational cost. If the size of the pool becomes too large, this could effectively create a DoS.\n\nRecommendation: Similar to schedule_leave_candidates, calculate the weights dynamically rather than charging a fixed cost.\n\nDeveloper Response: The developers have acknowledged the issue and are determining how to address it.\n\nSnippet 4.1: go_offline calls remove on the CandidatePool but charges users a fixed weight in WeightInfo::go_offline\n\n#[pallet::call_index(12)]\n#[pallet::weight(::WeightInfo::go_offline())]\n/// Temporarily leave the set of collator candidates without unbonding\npub fn go_offline(origin: OriginFor) -> DispatchResultWithPostInfo {\n let collator = ensure_signed(origin)?;\n let mut state = >::get(&collator).ok_or(Error::::CandidateDNE)?;\n ensure!(state.is_active(), Error::::AlreadyOffline);\n state.go_offline();\n let mut candidates = >::get();\n if candidates.remove(&Bond::from_owner(collator.clone())) {\n >::put(candidates);\n }\n >::insert(&collator, state);\n Self::deposit_event(Event::CandidateWentOffline { candidate: collator });\n Ok(().into())\n}", - "description_summary": "Fixed fees are charged for dynamic storage operations, potentially leading to DoS as CandidatePool size grows." - }, - { - "title": "Users can use any previously seen Merkle root", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-pay/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Hash Collision", - "description": "4.1.2 V-MANC-VUL-002: Users can use any previously seen Merkle root\n\nSeverity: Medium\nType: Hash Collision\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): has_matching_utxo_accumulator_output\n\nThe MantaPay protocol maintains a Merkle tree on the ledger where the leaves of the ledger are the hashes of the UTXOs generated during the protocol\u2019s lifetime. In order to spend a UTXO, users must supply a ZK proof that the UTXO belongs to the Merkle tree on the ledger. The membership proof takes as input the root of the Merkle tree (public input), the inner node hashes (private inputs), and proves that the root can be derived from the inner node hashes and leaf.\n\nIdeally, the ledger would check that the root provided is equal to the latest root on-chain. However, this isn\u2019t done in practice as the transaction could easily be front-runned since every transaction changes the root. Instead, the ledger maintains a set of all previously generated roots and just checks that the root provided belongs to that set.\n\nHowever, by allowing the root provided by the user to be any previously generated root, an attacker simply needs to find a hash collision with any previously generated root to steal assets. The likelihood of finding a collision grows quadratically with the number of previously seen hashes. In particular, given an output size of b bits and n previously generated hashes, the likelihood of finding a collision with any of the n hashes is approximately n^2/2^(b+1).\n\nThe current version of the Protocol uses the Poseidon hash function, which produces 255-bit hashes and, in theory, should be safe even with billions of previously seen roots. However, this is contingent on the safety of the Poseidon hash. While there has been a significant amount of research and analysis conducted on the function, including various attacks and optimizations, there is no formal proof of its security and correctness, let alone any proofs about concrete implementations.\n\nImpact: Storing all previously seen roots significantly increases the likelihood of collisions. If any attack or weakness is found in the Poseidon hash, this can be an additional means of attacking the protocol.\n\nRecommendation: There are a few ways to mitigate this. Protocols like Semaphore maintain a timeout period TIMEOUT and associate each root with a timestamp indicating when it was created. Any root created before now() - TIMEOUT is rejected. Another option is to only store the N previously generated roots and only allow a root if it belongs to the set of N previously generated roots. The latter option would have the additional benefit of not needing to store every root on-chain.\n\nDeveloper Response: The developers acknowledged the issue and will either use a timestamp or only maintain the N previously generated roots.", - "description_summary": "Allowing any previously seen Merkle root increases the risk of hash collisions, potentially allowing asset theft." - }, - { - "title": "MantaPay weights calculated with a small database", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-pay/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Bad Extrinsic Weight", - "description": "4.1.3 V-MANC-VUL-003: MantaPay weights calculated with a small database\n\nSeverity: Medium\nType: Bad Extrinsic Weight\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): to_private, to_public, private_transfer\n\nTransactions to_public, to_private, and private_transfer take as input nullifiers and membership proofs and generate UTXOs. These UTXOs are then added to a Merkle tree on the ledger.\n\nMantaPay shards this Merkle tree into 256 buckets where each bucket has its own Merkle tree. Instead of storing the entire tree at each bucket, the Ledger just stores the last path added to the tree. When adding a UTXO, the Ledger first computes its corresponding bucket, then computes the new path pointing to that UTXO, and finally adds that path to the bucket.\n\nComputing the new path should take time proportional to log(n) where n is the size of the Merkle Tree. The current benchmarking scheme only covers cases where the previous path is small, i.e., at most size 1. However, if the number of transactions gets large, on the order of hundreds of millions or billions, then the size of the path can get to 24-28 (taking shards into account). If the tree grows to this size, this means each execution of the extrinsic will perform 24-28 hashes, multiplied by the number of UTXOs to be added.\n\nThe benchmarking scheme should take into account the size of the tree to ensure that the existing weights are sufficient to offset the computation of the new Merkle tree path.\n\nImpact: Setting the weight too low can allow users to perform a large number of transactions with little cost, potentially allowing malicious users to launch a DoS attack.\n\nRecommendation: There are several ways to address this. One strategy is to take an additional parameter corresponding to the logarithm of the Merkle Tree's size on the ledger. The weight charged can be proportional to this value. In the implementation, this value (technically 2^value) can be compared against the actual size, and the transaction will only proceed if it is larger than or equal to the actual size.\n\nAnother strategy is to benchmark the pallets by taking into account the tree's size. If MantaPay is expected not to exceed more than a billion transactions, then the pallet could be benchmarked assuming the current path length is around 24-28.\n\nDeveloper Response: The developers acknowledged that the weights should be recalculated with a more saturated database.", - "description_summary": "Inadequate benchmarking of weights for MantaPay's Merkle tree could lead to DoS attacks as the tree grows." - }, - { - "title": "Total supply of native assets can exceed the set limit", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/asset-manager/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1.4 V-MANC-VUL-004: Total supply of native assets can exceed the set limit\n\nSeverity: Medium\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): mint_asset\n\nOne invariant underlying the correctness of MantaPay is that the total supply of an asset cannot exceed the maximum amount that can be held in a particular account. This is because MantaPay uses a dedicated account A to store the value of all the private assets. As such, A should, in principle, be able to hold all the supply in the case where all of that asset is privatized.\n\nIn more detail, when privatizing a user\u2019s public assets (via to_private), MantaPay constructs opaque UTXOs to encode the amount privatized, and then transfers those public assets into A. This transfer is expected not to fail because of the invariant described above. However, we found a case where the transfer can fail.\n\nManta enforces this invariant for NonNative assets because every time an asset is minted into an account, the total supply is increased. If the total supply would exceed the maximum that can be held in an account, the mint fails with the error Overflow. However, there is no such check for Native assets. Thus, if the total supply of Native assets exceeds the maximum that can be held in an account, u128::MAX, then to_private calls that should succeed can fail if the amount held in A is close to the maximum allowed. This is demonstrated in Snippet 4.2.\n\nImpact: By not constraining the amount of Native assets to be less than the maximum amount that can be held in an account, to_private transactions that should succeed will fail.\n\nRecommendation: We recommend a similar check be done for Native assets as is done for NonNative assets to enforce that the total supply cannot exceed the maximum that can be held in a given account.\n\nDeveloper Response: The developers have acknowledged the issue and are determining how to address it.\n\nSnippet 4.2: Failed to_private due to count of native assets exceeding u128::MAX\n\n#[test]\nfn public_account_issue() {\nlet mut rng = OsRng;\nnew_test_ext().execute_with(|| {\nlet asset_id = NATIVE_ASSET_ID;\nlet value = 1000u128;\nlet id = NATIVE_ASSET_ID;\nlet metadata = AssetRegistryMetadata {\nmetadata: AssetStorageMetadata {\nname: b\"Calamari\".to_vec(),\nsymbol: b\"KMA\".to_vec(),\ndecimals: 12,\nis_frozen: false,\n},\nmin_balance: TEST_DEFAULT_ASSET_ED2,\nis_sufficient: true,\n};\nassert_ok!(MantaAssetRegistry::create_asset(\nid, metadata.into(), TEST_DEFAULT_ASSET_ED2,\ntrue\n));\nassert_ok!(FungibleLedger::::deposit_minting(id, &ALICE, 2*value));\nassert_ok!(FungibleLedger::::deposit_minting(id, &MantaPay::account_id(), u128::MAX));\nlet mut utxo_accumulator = UtxoAccumulator::new(UTXO_ACCUMULATOR_MODEL.clone());\nlet spending_key = rng.gen();\nlet address = PARAMETERS.address_from_spending_key(&spending_key);\nlet mut authorization =\nAuthorization::from_spending_key(&PARAMETERS, &spending_key, &mut rng);\nlet asset_0 = Asset::new(Fp::from(asset_id), value);\nlet (to_private_0, pre_sender_0) = ToPrivate::internal_pair(\n&PARAMETERS, &mut authorization.context,\naddress, asset_0,\nDefault::default(), &mut rng,\n);\nlet to_private_0 = to_private_0\n.into_post(\nFullParametersRef::new(&PARAMETERS, utxo_accumulator.model()),\n&PROVING_CONTEXT.to_private,\nNone, Vec::new(), &mut rng,\n)\n.expect(\"Unable to build TO_PRIVATE proof.\")\n.expect(\"Did not match transfer shape.\");\nassert_ok!(MantaPay::to_private(\nMockOrigin::signed(ALICE),\nPalletTransferPost::try_from(to_private_0).unwrap()\n));\n}\n}\n", - "description_summary": "Native asset supply can exceed the maximum limit for an account, causing to_private transactions to fail." - }, - { - "title": "Missing updates in update_asset_metadata", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/asset-manager/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1.5 V-MANC-VUL-005: Missing updates in update_asset_metadata\n\nSeverity: Medium\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): update_asset_metadata\n\nManta Chain has an asset-manager pallet responsible for registering and minting assets. Each asset has a unique id and is associated with various metadata like a name, symbol, decimal places, etc. One important metadata is called min_balance. To store an account with some quantity of assets on the ledger, it must have more than min_balance quantity. This metadata is also used when validating transfers.\n\nIn particular, many asset transfers take an \u201cexistential parameter\u201d as input, called KeepAlive, which decides what to do if the transfer would take the account\u2019s balance (with respect to the asset) below min_balance. If KeepAlive is set, the transfer will fail if the amount goes below the min_balance. If it is not set, other configurations come into play, and the account may be removed and the remaining balance burned.\n\nThe asset-manager pallet exposes an extrinsic called update_asset_metadata, which takes as input the new metadata for that asset and updates the ledger to associate the asset with that metadata. While the implementation took a new min_balance as input, it did not update the ledger to associate the asset with this metadata.\n\nAdditionally, this API also took as input a new value for the metadata is_sufficient, but similarly did not update the ledger to associate the asset with this metadata.\n\nImpact: While it is rare for the min_balance to be changed, it is sometimes necessary if it was originally set too high, for example. The current API made it appear that min_balance could be changed, so users might think the min_balance was changed when, in fact, it wasn\u2019t.\n\nRecommendation: The main issue with this extrinsic is its interface makes it appear as though the metadata min_balance and is_sufficient could be changed when it actually didn\u2019t. Either the API should be changed to only take the metadata that should be changed, or it should appropriately update min_balance and is_sufficient.\n\nDeveloper Response: The developers acknowledged this issue and are discussing two possible fixes. The first is to update both parameters in the asset pallet, and the second is to change the interface to prevent updating the min_balance or is_sufficient parameters.", - "description_summary": "update_asset_metadata does not apply updates to min_balance and is_sufficient metadata as expected." - }, - { - "title": "No slashing mechanism for collators", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "parachain-staking", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Consensus", - "description": "4.1.6 V-MANC-VUL-006: No slashing mechanism for collators\n\nSeverity: Medium\nType: Consensus\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): parachain-staking\nLocation(s): N/A\n\nProof of Stake blockchains often have a slashing mechanism to detect poorly performing stakers and punish them. Typically, a significant portion of the staker\u2019s stake is taken by the chain as punishment for poor performance.\n\nCurrently, Manta Chain does not have any slashing mechanism. Instead, it uses a combination of social pressure and manual slashing to incentivize good behavior. Specifically, when the owners detect a poorly performing collator, they contact the collator over Discord and warn them of the poor performance. If their performance does not improve, the owners manually slash the collator\u2019s funds.\n\nWhile this approach may work when the blockchain is small, it will be challenging to enforce as the chain grows. Therefore, we recommend that Manta Chain implement a slashing mechanism.\n\nImpact: Manta Chain\u2019s current method of using social pressure will only work with a small set of trusted collators. However, as the chain expands, this mechanism is unlikely to sufficiently incentivize collators to perform well.\n\nRecommendation: We recommend that Manta Chain establish a slashing mechanism to implement if/when the current process becomes inadequate.\n\nDeveloper Response: The developers acknowledged the lack of a slashing mechanism and plan to include one in the future if/when the current process stops working.", - "description_summary": "No automated slashing mechanism exists to penalize poorly performing collators, relying on social pressure and manual intervention." - }, - { - "title": "Collators given full rewards regardless of quality", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/parachain-staking/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Consensus", - "description": "4.1.7 V-MANC-VUL-007: Collators given full rewards regardless of quality\n\nSeverity: Medium\nType: Consensus\nCommit: 45ba60e1d\nStatus: Open\n\nFile(s): pallets/parachain-staking/src/lib.rs\nLocation(s): pay_one_collator_reward\n\nManta Chain rewards collators by first allocating a fixed number of points (20) for every block they author and then giving the collator a fixed percentage of those allocated points as rewards. However, there is no check on the quality of the blocks authored by the collator: an empty block will result in just as many rewards as a full block.\n\nCurrently, Manta relies on the owners to monitor the blocks on-chain and manually punish collators who perform poorly. However, as the chain grows, this misbehavior may not be easy to detect.\n\nOne relatively simple way to address this issue is to adjust the reward system to incentivize high quality blocks.\n\nImpact: Collators can effectively steal funds from Manta by authoring low-quality blocks (i.e., empty or partial blocks) and reaping full rewards.\n\nRecommendation: We recommend the developers adjust the rewards system to either reward the collators for high-quality blocks or punish them for authoring poor ones.\n\nDeveloper Response: TBD", - "description_summary": "Collators receive full rewards regardless of block quality, allowing low-quality blocks to earn the same as high-quality ones." - }, - { - "title": "Missing validation in pull_ledger_diff", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-pay/src/lib.rs", - "lines": [ - 593 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "4.1.8 V-MANC-VUL-008: Missing validation in pull_ledger_diff\n\nSeverity: Low\nType: Data Validation\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): Line 593\n\npull_ledger_diff takes as input a Checkpoint, which is a struct with two fields: receiver_index and sender_index. It pulls sender and receiver data from the ledger, starting at sender_index (resp. receiver_index) up to at most sender_index + PULL_MAX_SENDER_UPDATE_SIZE (resp. receiver_index + PULL_MAX_RECEIVER_UPDATE_SIZE). However, there is no check to ensure that this sum cannot overflow for both the sender and receiver index in pull_senders, pull_receivers, pull_senders_for_shard, and pull_receivers_for_shard.\n\nImpact: If the code is compiled without the --release flag, a malicious user could crash the node by passing in bad values. If it is built with --release, the call will be reported as successful and no senders or receivers will be returned. However, if a benign end user calls the API with incorrect indexes, it might be preferable to return an Error informing them that the index is invalid.\n\nRecommendation: We recommend adding bounds checks for safety and to return an Error.\n\nDeveloper Response: The developers are aware and agree that it would be better to check and return an error.", - "description_summary": "Missing bounds checks in pull_ledger_diff could lead to overflow, potentially crashing the node." - }, - { - "title": "increase_count_of_associated_assets can overflow", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/asset-manager/src/lib.rs", - "lines": [ - 590 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1.9 V-MANC-VUL-009: increase_count_of_associated_assets can overflow\n\nSeverity: Low\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): Line 590\n\nThe asset_manager pallet maintains a mapping of paraids to a count of assets associated with that paraid. Each paraid can be associated with at most u32::MAX assets. When registering an asset or moving its location, the pallet calls increase_count_of_associated_assets, which takes as input a paraid and increments the number of assets associated with that paraid. However, this function does not check whether increasing the number of assets will result in an overflow.\n\nImpact: If the runtime is compiled using --debug, this can crash the node. However, if built under --release, the asset count will go to zero.\n\nRecommendation: Make this function check if the addition will result in an overflow, i.e., check if the current count is u32::MAX and return an error.\n\nDeveloper Response: Acknowledged", - "description_summary": "increase_count_of_associated_assets lacks overflow check, risking node crash or reset to zero in certain cases." - }, - { - "title": "Account checks are incorrect", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-pay/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1.10 V-MANC-VUL-010: Account checks are incorrect.\n\nSeverity: Low\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): check_sink_accounts, check_source_accounts\n\nWhen validating a transaction, the source and sink accounts are checked by check_sink_accounts and check_source_accounts. These functions iterate over pairs (account, value) and check that value can be safely deposited (withdrawn) from account. The logic is correct only if every account appears in at most one pair. While this is fine for the current APIs, if the APIs change to allow multiple sink or multiple source accounts, then this code needs to be refactored or the uniqueness needs to be enforced elsewhere.\n\nImpact: Currently there is no impact since the current APIs only allow one account for the source and sink accounts.\n\nRecommendation: To be safe, we recommend adding an additional check in the validation step to ensure the accounts are distinct for both sources and sinks.\n\nDeveloper Response: The developers acknowledged the issue and plan to add a check during validation to ensure that the accounts are distinct.", - "description_summary": "Account checks in transaction validation assume unique accounts, which may lead to issues if APIs change to allow multiple accounts." - }, - { - "title": "Unstaked user may be selected as collator", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "parachain-staking/lib.rs", - "lines": null - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1.11 V-MANC-VUL-011: Unstaked user may be selected as collator\n\nSeverity: Low\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): parachain-staking/lib.rs\nLocation(s): select_top_candidates\n\nParachains use collators to combine transactions into blocks that are then checked by Validators on the relay chain. Notably, this allows collators to remain relatively untrusted as validators ensure blocks are created correctly. On Manta\u2019s chain, collators are selected from a group of staked users who receive rewards for creating blocks. Requiring that collators be staked provides additional security guarantees as, if a collator misbehaves (e.g., submits no blocks for validation, submits multiple conflicting blocks), governance can step in and slash the user\u2019s staked funds. As such, unstaked collators have less incentive to maintain parachain stability and should be avoided.\n\nHowever, in the collator selection process, if no sufficiently staked collator can be found, collators from the previous round will be selected. As there is no validation on the current state of the previous collators\u2019 stake, this could select unstaked validators who lack the incentive to ensure network stability.\n\nHere is a simple test case demonstrating this:\n\n#[test]\nfn test_failed_candidate_selection() {\n ExtBuilder::default()\n .with_balances(vec![(10, 10)])\n .with_candidates(vec![(10, 10)])\n .build()\n .execute_with(|| {\n roll_to(2);\n assert_ok!(ParachainStaking::schedule_leave_candidates(Origin::signed(10), 6u32));\n roll_to(5);\n let candidate: Vec = ParachainStaking::selected_candidates();\n assert_ne!(candidate[0], 10u64);\n });\n}\n\nImpact: Collators will not be incentivized to ensure network stability. Another set of partially staked or \u201ctrusted\u201d collators could provide better stability.\n\nRecommendation: Consider maintaining a set of \u201ctrusted\u201d collators to fall back on if no staked collators can be found.\n\nDeveloper Response: Acknowledged.", - "description_summary": "Unstaked users may be selected as collators if no sufficiently staked collators are available, reducing network stability." - }, - { - "title": "XCM instructions can charge 0 weight", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "runtime/calamari/src/weights/xcm/mod.rs", - "lines": null - }, - { - "file_path": "runtime/dolphin/src/weights/xcm/mod.rs", - "lines": null - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Bad Extrinsic Weight", - "description": "4.1.12 V-MANC-VUL-012: XCM instructions can charge 0 weight\n\nSeverity: Low\nType: Bad Extrinsic Weight\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): runtime/(calamari, dolphin)/src/weights/xcm/mod.rs\nLocation(s): Every use of weigh_multi_assets\n\nThe Polkadot ecosystem uses the XCM messaging standard to enable parachains and the relay chain to communicate with each other. For example, if a parachain P1 wants to deposit an asset onto another parachain P2, they can construct an XCM message stating they wish to deposit an asset into an account associated with P1 and send it to P2.\n\nEach XCM message consists of a sequence of low-level XCM instructions that get executed by the XCM executor on the destination parachain. To offset the cost of executing these instructions, parachains set weights for each instruction, so the sender of the XCM message is charged fees for the destination parachain executing their message.\n\nManta Chain configured the weights of multiple instructions so that senders could generate messages with a total weight of 0. For example, in the code snippet below, the function deposit_asset sets the weight for the XCM instruction deposit_asset based on a parameter called assets. When assets is an empty vector, the weight_multi_assets function returns 0, resulting in a total weight of 0 for the instruction:\n\nfn deposit_asset(\n assets: &MultiAssetFilter,\n _max_assets: &u32,\n _dest: &MultiLocation,\n) -> Weight {\n let hardcoded_weight: u64 = 1_000_000_000;\n let weight = assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset());\n cmp::min(hardcoded_weight, weight)\n}\n\nThis setup allows malicious or incompetent senders to spam Manta with messages costing 0, even though the instruction is successfully executed by the XCM executor. While a denial of service may be unlikely due to the fast execution of 0-length vectors, spam prevention is recommended. Adding a minimal base fee for instructions that can be executed with 0 weight would help prevent spam.\n\nImpact: Malicious users may spam Manta with XCM messages of weight 0, potentially slowing down blockchain performance and risking denial of service.\n\nRecommendation: We recommend always charging a base fee to prevent spam.\n\nDeveloper Response: The developers acknowledged the issue and plan to change weigh_multi_assets to charge the benchmarked weight of a single asset execution for any successfully executed multiasset XCM message.", - "description_summary": "XCM instructions can be executed with zero weight, allowing potential spam through 0-cost messages." - }, - { - "title": "Missing validation in set_units_per_second", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/asset-manager/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "4.1.13 V-MANC-VUL-013: Missing validation in set_units_per_second\n\nSeverity: Low\nType: Data Validation\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): set_units_per_second\n\nThe asset-manager pallet manages a hashmap called UnitsPerSecond which maps assetIds to a u128 value units_per_second, used to determine the price for an XCM transfer. It provides a function, set_units_per_second, to set the units_per_second for a given asset. This value determines the cost (in the corresponding asset) to purchase a certain weight for a transaction. The following code snippet calculates the amount:\n\nlet units_per_second = M::units_per_second(&asset_id).ok_or({\n log::debug!(\n target: \"FirstAssetTrader::buy_weight\",\n \"units_per_second missing for asset with id: {:?}\",\n id,\n );\n XcmError::TooExpensive\n})?;\n\nlet amount = units_per_second * (weight as u128) / (WEIGHT_PER_SECOND as u128);\n\nif amount.is_zero() {\n return Ok(payment);\n}\n\nThis calculation of amount uses multiplication, which can overflow. Currently, both units_per_second and amount are of type u128. If units_per_second exceeds u128::MAX / u64::MAX, a large weight (u64::MAX) can be bought for a small asset amount, allowing a malicious parachain to potentially perform a DoS attack.\n\nThere is currently no validation in set_units_per_second to ensure units_per_second is small enough. Although only the root can call set_units_per_second, there is a risk if the root user mistakenly sets an excessive value or is tricked into doing so.\n\nImpact: If units_per_second is set above u128::MAX / u64::MAX, large amounts of weight can be purchased at low cost, potentially enabling a DoS attack on the chain.\n\nRecommendation: Change the type of units_per_second to map assetId to a u64 value or validate that the amount is sufficiently small.\n\nDeveloper Response: The developers acknowledged the issue and plan to fix the weight calculation to use saturating arithmetic, which should address this vulnerability.", - "description_summary": "Missing validation in set_units_per_second allows potential overflow, enabling weight to be purchased at low cost, risking DoS." - }, - { - "title": "Collator is a single point of failure for a round", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": null, - "lines": null - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Consensus", - "description": "4.1.14 V-MANC-VUL-014: Collator is a single point of failure for a round\n\nSeverity: Low\nType: Consensus\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nThe Manta parachain uses the Aura consensus mechanism to select collators to author blocks. Aura selects a primary collator for a round, and only that collator is allowed to produce blocks in that round. However, if that collator goes down, then no blocks will be produced, making that collator a single point of failure.\n\nOther parachains, like Moonbeam, address this by selecting multiple collators for a given round.\n\nImpact: If a collator goes down, no blocks will be produced for that round, thereby impacting the transaction throughput of Manta.\n\nRecommendation: We recommend that Manta adopt a consensus mechanism that selects multiple collators, ideally geographically separated, so if one collator fails, the likelihood of others failing remains low.\n\nDeveloper Response: The developers are aware of this issue and have plans to move away from the Aura consensus mechanism.", - "description_summary": "Single collator selection creates a point of failure, risking block production halt if the collator goes down." - }, - { - "title": "Unchecked index calculation in spend_all", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-pay/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Warning", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1.15 V-MANC-VUL-015: Unchecked index calculation in spend_all\n\nSeverity: Warning\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): spend_all\n\nThe spend_all function in the Manta-Pay pallet does the following:\n1. Adds the nullifier commitments in the TransactionPost to the NullifierCommitmentSet.\n2. Inserts each (nullifier, outgoingNote) pair into the NullifierSetInsertionOrder structure.\n3. Updates a global variable, NullifierSetSize, which stores the size of the nullifier commitment set.\n\nThe index where the pair gets inserted, along with the new nullifier size, is calculated as index + i, where i is the index of the corresponding SenderPost, and index is the current size of the set. However, this arithmetic is unchecked and could result in an overflow.\n\nImpact: When the size of the commitment set reaches u64::MAX, the index calculation may overflow, causing the pair to be inserted at the beginning of the list and setting the nullifier set size to 1. However, reaching this value through normal execution is extremely unlikely.\n\nRecommendation: Add an overflow check and return an error.\n\nDeveloper Response: TBD.", - "description_summary": "Unchecked index calculation in spend_all may cause overflow, leading to incorrect data insertion." - }, - { - "title": "Excess fees not refunded", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "parachain-staking/lib.rs", - "lines": null - } - ], - "reported_impact": "Warning", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Bad Extrinsic Weight", - "description": "4.1.16 V-MANC-VUL-016: Excess fees not refunded\n\nSeverity: Warning\nType: Bad Extrinsic Weight\nFile(s): parachain-staking/lib.rs\nLocation(s): (cancel_leave, execute_leave, schedule_leave, join)_candidates\nCommit: 45ba60e1d\nStatus: Intended Behavior\n\nWhen a substrate extrinsic is created, its weight must be carefully considered to ensure it correctly reflects the computational cost of the operation as extrinsic weight is directly related to the fees that are charged to the user. This weight should capture the maximum number of computational resources that will be consumed by the extrinsic as excess fees can be returned. In several functions, though, the weights are computed based on the value of an argument provided by the user which might not always reflect the true cost of the computation.\n\nFor example, consider the following:\n\n1#[pallet::call_index(11)]\n2#[pallet::weight(::WeightInfo::cancel_leave_candidates(*candidate_count))]\n3/// Cancel open request to leave candidates\n4/// - only callable by collator account\n5/// - result upon successful call is the candidate is active in the candidate pool\npub fn cancel_leave_candidates(\n7origin: OriginFor,\n8#[pallet::compact] candidate_count: u32,\n) -> DispatchResultWithPostInfo {\n...\n12let mut candidates = >::get();\n13ensure!(\n14candidates.0.len() as u32 <= candidate_count,\n15Error::::TooLowCandidateCountWeightHintCancelLeaveCandidates\n);\n...\n18Ok(().into())\n}\n\nIn this function, the weight is computed using the candidate_count argument, and in order for the function to execute successfully, candidate_count must be greater than or equal to the current size of the candidate pool. A user might need to call this function with a candidate_count that is larger than the size of the pool to prevent a front-running attack where a malicious user would add candidates to prevent the transaction from executing successfully. In such a case, the weight would be larger than necessary, but no fees are returned to the user.\n\nImpact: Such functions can charge unnecessary fees to the user.\n\nRecommendation: Refund the user additional fees that are not consumed.\n\nDeveloper Response: Since these extrinsics are likely to be executed sparingly and since the additional fees are likely to be small, we feel like the additional computational cost of determining the excess does not.", - "description_summary": "Extrinsics may charge unnecessary fees without refunding excess." - }, - { - "title": "Assets can be registered at unsupported locations", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/asset-manager/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Warning", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "4.1.17 V-MANC-VUL-017: Assets can be registered at unsupported locations\n\nSeverity: Warning\nType: Data Validation\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): register_asset\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nThe asset-manager pallet allows assets to be registered, managed, and minted. In particular, register_asset takes as input an asset, location, and corresponding asset_metadata and registers the asset. Every asset must be associated with a location; however, Manta only supports assets from specific locations. The current implementation of asset-manager does not perform any validation on the locations passed into register_asset, potentially allowing assets to be registered from untested locations. The pallet also exposes a method called update_asset_location, which is supposed to update the location of an asset. It similarly does not perform any validation on the new location of the asset.\n\nImpact: The current implementation allows assets to be registered from untested locations.\n\nRecommendation: The asset-manager pallet already implements the Contains trait, which exposes a method contains that takes as input a location and returns true if and only if the location is supported. Currently, that method is unused and can be used to validate the locations passed in.\n\nDeveloper Response: The developers acknowledged the issue and plan to add a check in register_asset.", - "description_summary": "Assets can be registered without validating their location." - }, - { - "title": "Minimum delegator funds is not MinDelegatorStk", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "parachain-staking/lib.rs", - "lines": null - } - ], - "reported_impact": "Warning", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1.18 V-MANC-VUL-018: Minimum delegator funds is not MinDelegatorStk\n\nSeverity: Warning\nType: Logic Error\nFile(s): parachain-staking/lib.rs\nLocation(s): N/A\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nIn the case where MinDelegation < MinDelegatorStk, it is possible for the delegator\u2019s staked funds to be less than MinDelegatorStk. This can occur through the following sequence of calls:\n\n1. delegate amount N from delegator D to candidate C1 where N >= MinDelegatorStk\n2. delegate amount M from delegator D to candidate C2 where M < MinDelegatorStk and M >= MinDelegation\n3. schedule_leave_candidates and execute_leave_candidates for C1\n\nThis results in D having M funds staked, where M < MinDelegatorStk.\n\nImpact: If MinDelegation is less than MinDelegatorStk, a delegator may end up with less than MinDelegatorStk funds actually staked.\n\nNote: This is not currently exploitable because MinDelegation == MinDelegatorStk in all production runtimes. However, if these values are adjusted in the future, this bug may become exploitable.\n\nRecommendation: There are two options:\n\n1. When starting a runtime, ensure that MinDelegation >= MinDelegatorStk\n2. Whenever a delegation is removed (such as in execute_leave_candidates), ensure that the remaining locked funds for the delegator are at least MinDelegatorStk.\n\nDeveloper Response: The developers acknowledged the issue and are considering removing MinDelegation as there is no apparent reason for it being different from MinDelegatorStk.", - "description_summary": "Delegators can have staked funds below the required MinDelegatorStk." - }, - { - "title": "Unintended test crashes", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-pay/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Info", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Maintainability", - "description": "4.1.19 V-MANC-VUL-019: Unintended test crashes\n\nSeverity: Info\nType: Maintainability\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): to_private_should_work\nCommit: 45ba60e1d\nStatus: Open\n\nMany of the manta-pay tests randomly generate an asset id, total supply, and an amount to make private. To ensure the total supply of the asset is greater than the minimum balance, the minimum balance is always added to the randomly generated total supply, as seen in this test:\n\nfn to_private_should_work() {\n let mut rng = OsRng;\n for _ in 0..RANDOMIZED_TESTS_ITERATIONS {\n new_test_ext().execute_with(|| {\n let asset_id = rng.gen();\n let total_free_supply = rng.gen();\n initialize_test(asset_id, total_free_supply + TEST_DEFAULT_ASSET_ED);\n mint_private_tokens(\n asset_id,\n &value_distribution(5, total_free_supply, &mut rng),\n &mut rng,\n );\n });\n }\n}\n\nIf the random number generator generates a value for the total_free_supply which is greater than u128::MAX - TEST_DEFAULT_ASSET_ED, then the test will fail even though it is expected to succeed.\n\nImpact: May cause tests to fail when they are expected to succeed.\n\nRecommendation: Change the test to generate a value for total_free_supply between [0, u128::MAX - TEST_DEFAULT_ASSET_ED).\n\nDeveloper Response: TBD", - "description_summary": "Randomized test may fail due to unintended overflow in total supply generation." - } - ] - }, - { - "audited_project_id": 8, - "project_name": "MantaNetwork", - "auditor": "Halborn", - "audit_link": "https://github.com/Manta-Network/Atlantic-Audits/blob/main/Atlantic-Halborn-zkSBT.pdf", - "findings": [ - { - "title": "Loss of Reserved SBT IDs", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": [ - 376 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "4.1 (HAL-01) LOSS OF RESERVED SBT IDS - LOW (2.5)\\nDescription:\\nCode Location:\\nThe reserve_sbt function calculates a range of IDs and stores this range in the ReservedIds storage map, using the caller\u2019s address as the key. It was identified that users lose their reserved SBT IDs when they call the reserve_sbt function without first minting their previously reserved SBT IDs. This occurs because the previous reserved range is overwritten.\\nBody of the reserve_sbt function:\\nListing 1: pallets/manta-sbt/src/lib.rs (Line 376)\\n368 let asset_id_range : Vec < StandardAssetId > = (0.. T :: MintsPerReserve :: get () )\\n369 . map (| _ | Self :: next_sbt_id_and_increment () )\\n370 . collect :: < Result < Vec < StandardAssetId > , _ > >() ?;\\n\\n372 // The range of ` AssetIds ` that are reserved as SBTs\\n373 let start_id : StandardAssetId = * asset_id_range . first () . ok_or ( Error :: :: ZeroMints ) ?;\\n374 let stop_id : StandardAssetId = * asset_id_range . last () . ok_or ( Error :: :: ZeroMints ) ?;\\n375 ReservedIds :: :: insert (& who , ( start_id , stop_id ) ) ;\\nBVSS:\\nAO:A/AC:L/AX:L/C:N/I:N/A:N/D:L/Y:N/R:N/S:U (2.5)\\nProof Of Concept:\\nThis test reserves ids two times and mints one zkSBT. The first zkSBT token will not have the id 1, it will have the id 6 instead.\\nListing 2: pallets/manta-sbt/src/tests.rs\\n1 #[ test ]\\n2 fn hal01 () {\\n3 let mut rng = OsRng ;\\n4 new_test_ext () . execute_with (|| {\\n5 assert_ok! ( Balances :: set_balance (\\n6 MockOrigin :: root () ,\\n7 ALICE ,\\n8 1 _000_000_000_000_000 ,\\n9 0\\n10 )) ;\\n11 // Reserve IDs from 1 to 5\\n12 assert_ok! ( MantaSBTPallet :: reserve_sbt ( MockOrigin :: signed ( ALICE ) ) ) ;\\n13 // Reserve IDs from 6 to 10\\n14 assert_ok! ( MantaSBTPallet :: reserve_sbt ( MockOrigin :: signed ( ALICE ) ) ) ;\\n15\\n16 let value = 1;\\n17 let id = field_from_id ( ReservedIds :: < Test >:: get ( ALICE ) . unwrap () .0) ;\\n18 let post = sample_to_private ( id , value , & mut rng ) ;\\n19 assert_ok! ( MantaSBTPallet :: to_private (\\n20 MockOrigin :: signed ( ALICE ) ,\\n21 Box :: new ( post ) ,\\n22 bvec! [0]\\n23 )) ;\\n\\n25 // The first zkSBT minted has the id 6.\\n26 assert_eq! (\\n27 SbtMetadata :: < Test >:: get (6) . unwrap () . extra ,\\n28 Some ( bvec! [0])\\n29 );\\n30 });\\n31 }\\nRecommendation:\\nTo resolve this issue, it is recommended to restrict users from reserving additional SBT IDs if they have not minted their previously reserved IDs.", - "description_summary": "Users lose previously reserved SBT IDs if they reserve new ones without minting the old." - }, - { - "title": "Last SBT IDs Cannot Be Reserved", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": [ - 369, - 883 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "4.2 (HAL-02) LAST SBT IDS CANNOT BE RESERVED - LOW (2.5)\\nDescription:\\nWhen users invoke the reserve_sbt function, it reserves a specific number of IDs - quantified by MintPerReserve. The reserve_sbt function achieves this by repeatedly calling the next_sbt_id_and_increment function - as many times as the MintPerReserve value. This next_sbt_id_and_increment function serves to return the next available ID and concurrently increment the NextSbtId storage value by 1.\\nA potential problem arises if the incrementing process results in an overflow, causing the next_sbt_id_and_increment function to throw an overflow exception, which in turn fails the ongoing transaction. In this scenario, previously identified IDs that did not contribute to the overflow situation remain unreserved. This issue presents a concern as it could potentially lead to resource allocation inefficiencies and transaction failures.\\nCode Location:\\nBody of the reserve_sbt function, where the next zkSBT id is incremented.\\nListing 3: pallets/manta-sbt/src/lib.rs (Line 369)\\n356 pub fn reserve_sbt ( origin : OriginFor ) -> DispatchResult {\\nlet who = ensure_signed ( origin ) ?;\\n// Charges fee to reserve AssetIds\\n:: Currency :: transfer (\\n& who ,\\n& Self :: account_id () ,\\nT :: ReservePrice :: get () ,\\nExistenceRequirement :: KeepAlive ,\\n) ?;\\n// Reserves uniques AssetIds to be used later to mint SBTs\\n368 let asset_id_range : Vec < StandardAssetId > = (0.. T :: MintsPerReserve :: get () )\\n369 . map (| _ | Self :: next_sbt_id_and_increment () )\\n370 . collect :: < Result < Vec < StandardAssetId > , _ > >() ?;\\nnext_sbt_id_and_increment function will overflow if the max number for u128 is surpassed\\nListing 4: pallets/manta-sbt/src/lib.rs (Line 883)\\n875 fn next_sbt_id_and_increment () -> Result < StandardAssetId , DispatchError > {\\n876 NextSbtId :: :: try_mutate (| maybe_val | {\\n877 match maybe_val {\\n878 Some ( current ) = > {\\n879 let id = * current ;\\n880 * maybe_val = Some (\\n881 current\\n882 . checked_add ( One :: one () )\\n883 . ok_or ( ArithmeticError :: Overflow )? ,\\n884 );\\n885 Ok ( id )\\n886 }\\n887 // If storage is empty , starts at value of one ( Field cannot be zero )\\n888 None = > {\\n889 * maybe_val = Some (2) ;\\n890 Ok ( One :: one () )\\n891 }\\n892 }\\n893 })\\n894 }\\nBVSS:\\nAO:A/AC:L/AX:L/C:N/I:N/A:L/D:N/Y:N/R:N/S:U (2.5)\\nProof Of Concept:\\nNote: For this Proof of Concept (PoC), the codebase was modified such that the zkSBT IDs are now u8 instead of u128. This alteration reduces the time needed to demonstrate that the function fails in this edge-case scenario.\\nListing 5: pallets/manta-sbt/src/tests.rs\\n236 #[ test ]\\n237 fn hal02 () {\\n238 new_test_ext () . execute_with (|| {\\n239 assert_ok! ( Balances :: set_balance (\\n240 MockOrigin :: root () ,\\n241 ALICE ,\\n242 1 _000_000_000_000_000 ,\\n243 0\\n244 )) ;\\n245 for i in (1..51) {\\n246 assert_ok! ( MantaSBTPallet :: reserve_sbt_bis ( MockOrigin :: signed ( ALICE )) );\\n247 println! (\" First id : {} - Last id : {} \" , ReservedIdsBis :: < Test >:: get ( ALICE ) . unwrap () .0 , ReservedIdsBis :: < Test >:: get ( ALICE ) . unwrap () .1) ;\\n248 }\\n250 assert_noop! ( MantaSBTPallet :: reserve_sbt_bis ( MockOrigin :: signed ( ALICE )) , ArithmeticError :: Overflow );\\n251 });\\n252 }\\nIn this test, we reserve all available IDs, excluding the last five. Attempting to reserve the last ID will cause the StandardAssetId value to overflow, resulting in a failure.\\nRecommendation:\\nTo address this issue, it is recommended to implement a check to determine whether the value of the StandardAssetId has reached the maximum value for u128 can prevent overflow. This measure will stop the occurrence of an exception.", - "description_summary": "Overflow in next_sbt_id_and_increment causes unreserved IDs and transaction failure." - }, - { - "title": "Downcasting of 64-Bit Integer", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": [ - 783 - ] - }, - { - "file_path": "pallets/manta-support/src/manta_pay.rs", - "lines": [ - 874, - 1095, - 1096 - ] - }, - { - "file_path": "runtime/calamari/src/migrations/staking.rs", - "lines": [ - 70 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "4.3 (HAL-03) DOWNCASTING OF 64-BIT INTEGER - LOW (2.5)\\nDescription:\\nIt was observed that in certain circumstances, usize values are cast to types such as u8 and u32. The usize data type in the Rust programming language represents a pointer-sized unsigned integer. The actual size of usize is dependent on the platform: it\u2019s 32 bits on a 32-bit platform and 64 bits on a 64-bit platform. Consequently, depending on the system, there could be a cast from an u64 to an u32. This implies that an attempt could be made to store a value larger than the maximum value that can be held in an u32, leading to unexpected consequences.\\nCode Location:\\nFINDINGS & TECH DETAILS\\nUsize is casted to u8:\\nListing 6: pallets/manta-sbt/src/lib.rs (Line 783)\\n768 fn pull_receivers (\\n769 receiver_indices : [ usize ; MerkleTreeConfiguration :: FOREST_WIDTH ],\\n770 max_update_request : u64 ,\\n771 ) -> ( bool , ReceiverChunk ) {\\n772 let mut more_receivers = false ;\\n773 let mut receivers = Vec :: new () ;\\n774 let mut receivers_pulled : u64 = 0;\\n775 let max_update = if max_update_request > Self :: PULL_MAX_RECEIVER_UPDATE_SIZE {\\n776 Self :: PULL_MAX_RECEIVER_UPDATE_SIZE\\n777 } else {\\n778 max_update_request\\n779 };\\n781 for ( shard_index , utxo_index ) in receiver_indices . into_iter () . enumerate () {\\n782 more_receivers |= Self :: pull_receivers_for_shard (\\n783 shard_index as u8 ,\\n784 utxo_index ,\\n785 max_update ,\\n786 & mut receivers ,\\n787 & mut receivers_pulled ,\\n788 );\\n790 if receivers_pulled == max_update && more_receivers {\\n791 break ;\\n792 }\\n794 ( more_receivers , receivers )\\n795 }\\nUsize is casted to u32:\\nListing 7: pallets/manta-support/src/manta_pay.rs (Line 860)\\n867 impl TryFrom < merkle_tree :: CurrentPath < MerkleTreeConfiguration > > for CurrentPath {\\n868 type Error = Error ;\\n871 fn try_from ( path : merkle_tree :: CurrentPath < MerkleTreeConfiguration >) -> Result < Self , Error > {\\n872 Ok ( Self {\\n873 sibling_digest : fp_encode ( path . sibling_digest )? ,\\n874 leaf_index : path . inner_path . leaf_index .0 as u32 ,\\n875 inner_path : path . inner_path . path . into_iter () . map ( fp_encode ) . collect :: < Result <_ , _ > >() ? ,\\n881 })\\n883 }\\nListing 8: pallets/manta-support/src/manta_pay.rs (Lines 1095,1096)\\n1091 impl From < RawCheckpoint > for Checkpoint {\\n1094 Self :: new ( checkpoint . receiver_index . map (| i| i as usize ) . into () , checkpoint . sender_index as usize ,)\\nListing 9: runtime/calamari/src/migrations/staking.rs (Line 70)\\n70 let n_of_candidates = manta_collator_selection :: Pallet :: :: candidates () . len () as u32 ;\\nRecommendation:\\nTo address this issue, it is recommended to check the value against the maximum value before casting.", - "description_summary": "Casting usize to smaller types may cause data loss and unexpected behavior." - }, - { - "title": "Unchecked Math Could Impact Weight Calculation", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "runtime/calamari/src/fee.rs", - "lines": [ - 76 - ] - }, - { - "file_path": "primitives/manta/src/xcm.rs", - "lines": [ - 183, - 251, - 252 - ] - }, - { - "file_path": "runtime/manta/src/fee.rs", - "lines": [ - 47, - 52 - ] - }, - { - "file_path": "runtime/common/src/lib.rs", - "lines": [ - 115 - ] - }, - { - "file_path": "primitives/manta/src/constants.rs", - "lines": [ - 110 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "4.4 (HAL-04) UNCHECKED MATH COULD IMPACT WEIGHT CALCULATION - LOW (2.5)\\nDescription:\\nIt was identified that several areas in the buy_weight and the refund_weight functions that could potentially benefit from enhanced computational checks. Currently, despite numerous instances of proven arithmetic calculations, the function does not have a mechanism to handle situations where underflow or overflow states might occur.\\nWhile these states haven\u2019t been identified as potential risks for exploitation, implementing additional safeguards to account for them will be beneficial.\\nAnother point of consideration pertains to the WEIGHT_PER_SECOND value. This value serves as a divisor in computing the number of tokens required for payment or refund during the weight purchasing procedure. While it is predetermined as a constant during the system\u2019s compilation, it currently lacks a constraint to assure that it never equals zero. This is a significant potential risk as it could result in a system panic if the value happens to be zero, causing a division by zero error. Moreover, as the WEIGHT_PER_SECOND value is also used in calculations elsewhere in the system, this issue could potentially affect other sections of the codebase as well.\\nCode Location:\\nUnsafe multiplication in the tests multiplier_growth_simulator_and_congestion_budget_test:\\nListing 10: runtime/calamari/src/fee.rs (Line 76)\\n69 #[ test ]\\n#[ ignore ] // This test should not fail CI\\n71 fn multiplier_growth_simulator_and_congestion_budget_test () {\\n72 let target_daily_congestion_cost_usd = 100 _000 ;\\n73 let kma_price = fetch_kma_price () . unwrap () ;\\n74 println! (\" KMA / USD price as read from CoinGecko = { kma_price } \") ;\\n75 let target_daily_congestion_cost_kma =\\n76 ( target_daily_congestion_cost_usd as f32 / kma_price * KMA as f32 ) as u128 ;\\nUnsafe multiplication in buy_weight function\\nListing 11: primitives/manta/src/xcm.rs (Line 183)\\n146 fn buy_weight (& mut self , weight : Weight , payment : Assets ) -> Result < Assets > {\\n153 let first_asset = payment . fungible_assets_iter () . next () . ok_or ({\\n160 XcmError :: TooExpensive }) ?;\\n183 let amount = units_per_second * ( weight as u128 ) / ( WEIGHT_PER_SECOND as u128 ) ;\\nUnsafe subtraction in refund_weight function\\nListing 12: primitives/manta/src/xcm.rs (Line 251)\\n248 fn refund_weight (& mut self , weight : Weight ) -> Option < MultiAsset > {\\n251 self . weight -= weight ;\\n252 let amount = * units_per_second * ( weight as u128 ) / ( WEIGHT_PER_SECOND as u128 ) ;\\nPlaces where WEIGHT_PER_SECOND is used as a divisor:\\n\u2022 Function refund_weight\\nListing 13: primitives/manta/src/xcm.rs (Line 252)\\n\u2022 Function buy_weight:\\nListing 14: primitives/manta/src/xcm.rs (Line 183)\\nThe following snippets show how the q divisor is calculated and how it\u2019s equal to zero if WEIGHT_PER_SECOND is zero too.\\nListing 15: runtime/manta/src/fee.rs (Lines 47,52)\\nRecommendation:\\nWe recommend a review of these identified areas to ensure that adequate arithmetic checks are in place and a safety constraint is set for WEIGHT_PER_SECOND to prevent it from reaching zero. These improvements will further fortify the system, ensuring stability, reliability, and secure operation.\\n\u2022 It is recommended to add a constraint to ensure that WEIGHT_PER_SECOND is never 0.\\n\u2022 In \u201crelease\u201d mode, Rust does not panic! due to overflows and overflowed values simply \u201cwrap\u201d without any explicit feedback to the user. It is recommended to use vetted safe math libraries for arithmetic operations consistently throughout the smart contract system. Consider replacing the multiplication operator with Rust\u2019s checked_mul method, the subtraction operator with Rust\u2019s checked_subs method, and so on.", - "description_summary": "Unchecked math in weight calculations may lead to overflow, underflow, or division by zero errors." - } - ] - }, - { - "audited_project_id": 9, - "project_name": "MantaNetwork", - "auditor": "Veridise", - "audit_link": "https://github.com/Manta-Network/Atlantic-Audits/blob/main/Atlantic-Veridise-zkSBT.pdf", - "findings": [ - { - "title": "SBT reservations can be overwritten", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": "be7f1c7a8b7d35a84f657854032b2bf3b7e01ab8", - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Medium", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1 Detailed Description of Issues\n\nV-MSBT-VUL-001: SBT reservations can be overwritten\n\nSeverity: Medium\nType: Logic Error\nCommit: ceb9e46\nStatus: Fixed\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): reserve_sbt\n\nThe manta-sbt pallet exposes a method reserve_sbt whereby callers pay MANTA tokens to reserve the right to mint N SBTs. This reservation is enforced by allocating reservation ids to the caller of the method so that when users mint an SBT, Manta Chain uses one of the reserved ids to track the number reserved. In particular, Manta Chain maintains a map called ReservedIds which maps users to an interval of reservation ids such that the length of the interval indicates the number of SBTs they can mint.\n\nHowever, reserve_sbt does not check whether a user has already reserved SBTs to mint when calling the method and simply updates the mapping to a new interval of length N. This can be seen in the method implementation below:\n\n/// Reserves AssetIds to be used subsequently in 'to_private' above.\n\n/// Increments AssetManager\u2019s AssetId counter.\n\n#[pallet::call_index(1)]\n#[pallet::weight(::WeightInfo::reserve_sbt())]\n#[transactional]\npub fn reserve_sbt(origin: OriginFor) -> DispatchResult {\n let who = ensure_signed(origin)?;\n\n // Charges fee to reserve AssetIds\n ::Currency::transfer(\n &who,\n &Self::account_id(),\n T::ReservePrice::get(),\n ExistenceRequirement::KeepAlive,\n )?;\n\n // Reserves unique AssetIds to be used later to mint SBTs\n let asset_id_range: Vec = (0..T::MintsPerReserve::get())\n .map(|_| Self::next_sbt_id_and_increment())\n .collect::, _>>()?;\n\n // The range of 'AssetIds' that are reserved as SBTs\n let start_id: StandardAssetId = *asset_id_range.first().ok_or(Error::::ZeroMints)?;\n let stop_id: StandardAssetId = *asset_id_range.last().ok_or(Error::::ZeroMints)?;\n\n ReservedIds::::insert(&who, (start_id, stop_id));\n Self::deposit_event(Event::::SBTReserved { who, start_id, stop_id });\n Ok(())\n}\n\nThus, if a user calls the method M times to reserve M*N SBTs, then they will only be able to mint N.\n\nImpact: Users can lose money because they may think they have reserved M*N SBTs when they in fact can only mint N. Furthermore, if a user calls reserve_sbt M times in a row, then M*(N-1) SBTs can no longer be minted. This is due to the fact that reserved ids are always incremented.\n\nFor example, suppose someone sets up a relayer account which users could use to purchase zkSBTs for themselves without being linked to the transaction. That relayer account would only receive SBTs on its last call reserve_sbt. Implementations in which the relayer reserves and sends the SBTs separately may be error-prone.\n\nRecommendation: If at most N SBTs can be reserved at a time for a user, then reserve_sbt should check whether a user has already reserved SBTs. If the protocol permits more than N SBTs to be reserved at a time for a user, then the mapping should be changed. One option might be to map each user to a set of intervals corresponding to ids reserved for them.\n\nDeveloper Response: The developers have acknowledged the issue and a fix has been proposed in this pull request. The fix changes the function to revert if the user has SBT ids already reserved.", - "description_summary": "SBT reservations can be overwritten, causing users to lose reserved tokens." - }, - { - "title": "Extrinsics charge static fees that do not account for Merkle tree updates", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": "f0ed5957c3bc87b4a4a2631f26aceefb9607f069", - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Bad Extrinsic Weight", - "description": "4.1.2 V-MSBT-VUL-002: Extrinsics charge static fees that do not account for Merkle tree updates\n\nSeverity: Low\nType: Bad Extrinsic Weight\nCommit: ceb9e46\nStatus: Fixed\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): to_private, mint_sbt_eth\n\nTransactions to_private and mint_sbt_eth take membership proofs and store UTXOs to a Merkle tree on the ledger. manta-sbt shards this Merkle tree into 256 buckets where each bucket has its own Merkle tree. Instead of storing the entire tree at each bucket, the Ledger just stores the last path added to the tree. When adding a UTXO, the Ledger first computes its corresponding bucket, then computes the new path pointing to that UTXO, and finally adds that path to the bucket.\n\nComputing the new path should take time proportional to log(n) where n is the size of the Merkle Tree. The current benchmarking scheme only covers cases where the previous path is small i.e, at most size 1. However, if the number of transactions gets large i.e, is on the order of hundreds of millions or billions, then the size of the path can get to 24-28 (taking shards into account). If the tree grows to this size, this means each execution of the extrinsic will perform 24-28 hashes, multiplied by the number of UTXOs to be added.\n\nThe benchmarking scheme should take into account the size of the tree to make sure that the existing weights are enough to offset the computation of the new Merkle tree path.\n\nImpact: In general, it is important to set the weights to account for both computation and storage; setting the weight too low can allow users to perform a large number of transactions with little cost. In particular, malicious users may take advantage of the low fee to launch a DOS attack.\n\nRecommendation: We recommend that the weights be computed with a larger database that reflects the state of the chain after a year\u2019s worth of use.\n\nDeveloper Response: In progress open PR for manta-pay right now that will be extended to pallet SBT. The manta-pay PR can be found here.", - "description_summary": "Static fees for extrinsics do not account for Merkle tree updates, risking underpriced transactions." - }, - { - "title": "Missing validation in pull_ledger_diff", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Warning", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "4.1.3 V-MSBT-VUL-003: Missing validation in pull_ledger_diff\n\nSeverity: Warning\nType: Data Validation\nCommit: ceb9e46\nStatus: Acknowledged\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): pull_ledger_diff\n\npull_ledger_diff takes as input a Checkpoint which is a struct of two fields receiver_index and sender_index and pulls receiver data from the ledger starting at receiver_index up till at most receiver_index + PULL_MAX_RECEIVER_UPDATE_SIZE. However, there is no check that this sum cannot overflow for both the sender and receiver index in pull_receivers and pull_receivers_for_shard.\n\nImpact: If the code is compiled without the --release flag then a malicious user could crash the node by passing in bad values. If it is built with --release then the call will be reported as successful and no senders or receivers will be returned. However, if a benign end user is calling the API with incorrect indexes it might be better to return an error informing them that the index is invalid.\n\nRecommendation: We recommend adding bounds checks to be safe and to return an Error.\n\nDeveloper Response: The developers acknowledged the issue and will fix this prior to release.", - "description_summary": "Missing bounds check in pull_ledger_diff could allow overflow and node crash." - }, - { - "title": "Off-by-one error in to_private", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Warning", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Logic Error", - "description": "4.1.4 V-MSBT-VUL-004: Off-by-one error in to_private\n\nSeverity: Warning\nType: Logic Error\nCommit: ceb9e46\nStatus: Acknowledged\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): to_private\n\nThe manta-sbt pallet has a user-callable extrinsic, to_private, which allows users to mint reserved SBT tokens. To facilitate minting tokens, the pallet maintains a mapping from users to an interval [l, u] where l \u2264 u and l and u refer to minimum and maximum asset ids that can be minted by the user. The function does two things. First, it mints an asset with id l and then updates the interval to [l + 1, u]. If l + 1 > u, it removes the user from the map since this indicates they don\u2019t have any more user ids to reserve.\n\nThere is an edge case where l = u = 2^128 - 1 where this function\u2019s behavior is incorrect. In this case, the extrinsic will revert because l + 1 would result in an overflow as it is of type u128. However, this prevents the user from minting a token reserved for them.\n\nCurrently this should not be a problem because it is unlikely that all 2^128 ids will get reserved anytime soon.\n\nImpact: A user may not be able to mint an SBT token reserved for them.\n\nRecommendation: We recommend adding an edge case for when l = 2^128-1. In that case, the token should be minted and user removed from the map.\n\nDeveloper Response: Not to be resolved. What the auditors point out is correct that the last id will not get minted; however, the resolution is unnecessary as once we hit this edge case the entire zkSBT protocol is unable to function. Furthermore, this value is extremely large and unlikely to ever be reached.", - "description_summary": "Off-by-one error in to_private may prevent users from minting reserved tokens." - }, - { - "title": "Unnecessary Storage Variable", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": "fa10f39afcd8882d14252f55ce08ffcab6321911", - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": [ - 382 - ] - } - ], - "reported_impact": "Warning", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Gas Optimization", - "description": "4.1.5 V-MSBT-VUL-005: Unnecessary Storage Variable\n\nSeverity: Warning\nType: Gas Optimization\nCommit: ceb9e46\nStatus: Fixed\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): 382\n\nThe storage variable UtxoAccumulatorOutputs keeps track of all Merkle roots generated by manta-sbt; however, this variable is unnecessary. It seems to be taken from the manta-pay pallet which uses it to keep track of previous Merkle roots so that when users provide membership proofs for asset transfers, the ledger can check that the root provided was a legitimate one.\n\nHowever, the tokens in manta-sbt are non-transferable so there isn\u2019t any logic which should use it. In particular, the only two places which use this storage variable are in the functions has_matching_utxo_accumulator_output and register_all. The former will never be invoked in manta-sbt because the asset is non-transferable. The latter just adds a newly created Merkle root to UtxoAccumulatorOutputs.\n\nSince this storage variable keeps track of every root generated, it is a non-trivial amount of storage to keep on-chain. Moreover, every minting transaction, namely to_private and mint_sbt_eth, will incur an extra cost of writing to storage, which is unnecessary.\n\nImpact: Unnecessarily high transaction fees for every mint transaction and bloated storage.\n\nRecommendation: We recommend removing this storage variable.\n\nDeveloper Response: This has been fixed in the following commit.", - "description_summary": "Unnecessary storage variable increases transaction fees and storage usage." - }, - { - "title": "Missing validation when setting mint info", - "repository": "https://github.com/Manta-Network/Manta", - "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "pallets/manta-sbt/src/lib.rs", - "lines": null - } - ], - "reported_impact": "Info", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": "Data Validation", - "description": "4.1.6 V-MSBT-VUL-006: Missing validation when setting mint info\n\nSeverity: Info\nType: Data Validation\nCommit: ceb9e46\nStatus: Acknowledged\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): new_mint_info, update_mint_info\n\nWhen setting metadata for a mint type, the functions new_mint_info and update_mint_info set the start_time and end_time associated with the mint type. The functions validate the start and end times by making sure start_time < end_time. However, they don\u2019t check whether those make sense with respect to the current time. In particular, it seems like you would want now < end_time.\n\nImpact: Setting end_time < now means that nobody can mint for that mint type until it gets changed again.\n\nRecommendation: We recommend adding the additional validation which shouldn\u2019t be too expensive.\n\nDeveloper Response: This is intended behavior for now. The only way to pause a mint is to set end_time < now. This however could be improved by using an Option<> and remove the need for this invariant. This is low priority, but we could fix it in the future.", - "description_summary": "Missing validation allows minting periods to be set in the past, blocking minting." - } - ] - }, - { - "audited_project_id": 10, - "project_name": "Astar", - "auditor": "Security Research Labs", - "audit_link": "https://github.com/polkadot-assurance-legion/pal-docs/blob/main/audits/24h1/astar-srl-2403.pdf", - "findings": [ - { - "title": "Missing benchmarking for the lockdrop precompile dispatch", - "repository": "https://github.com/AstarNetwork/Astar", - "audited_commit": "282485aa2d50f12f42463bba1d393fce4c57c2a3", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "precompiles/dispatch-lockdrop/src/lib.rs", - "lines": [ - { - "from": 88, - "to": 90 - } - ] - } - ], - "reported_impact": "Info", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.2.1 Missing benchmarking for the lockdrop precompile dispatch\nAttack scenario\nLocation\nTracking\nAttack impact\nSeverity\nStatus\nStatic ref_time used for weight calculation leading to underestimation\nof the weights can enable an attacker to perform denial of service\nprecompiles/dispatch-lockdrop\n[2]\nAn attacker may spam and conduct denial of service attacks cheaply in\ncomparison to the actual weight_to_gas price\nInfo\nClosed [4]\nThe precompile dispatch-lockdrop has un-benchmarked weight_to_gas estimation with\nref_time configured to 1_000_000_000 [5].\n// Record a fixed amount of weight to ensure there is no free execution\nhandle.record_cost(Runtime::GasWeightMapping::weight_to_gas(\n Weight::from_parts(1_000_000_000u64, 0),\n))?;\nThis estimation doesn\u2019t reflect the actual runtime environment and can aid an attacker to spam\nthe chain.\nWe suggest to appropriately benchmark the precompile dispatch to reflect the accurate\nweight_to_gas estimation for ref_time and POV_size.\nThe issue was acknowledged by the Astar team and remediation is currently in progress\nthrough collaboration with the Frontier team [6]", - "description_summary": "Underestimated weight_to_gas estimation due to missing benchmarking allows attackers to perform denial of service attacks." - }, - { - "title": "Unbounded call length limit in lockdrop dispatch call", - "repository": "https://github.com/AstarNetwork/Astar", - "audited_commit": "282485aa2d50f12f42463bba1d393fce4c57c2a3", - "reported_remediated_commit": null, - "location": [ - { - "file_path": "precompiles/dispatch-lockdrop/src/lib.rs", - "lines": [ - 74, - 104 - ] - } - ], - "reported_impact": "Low", - "reported_likelihood": null, - "cwe_classification": null, - "vulnerability_class_audit": null, - "description": "3.2.2 Unbounded call length limit in lockdrop dispatch call\nAttack scenario\nLocation\nTracking\nAttack impact\nSeverity\nStatus\nAn attacker may create multiple nested calls bloating the call_length\nbefore call decoding\nprecompiles/dispatch-lockdrop\n[3]\nUnbounded call length can aid an attacker to cause heap overflow when\ncall data is moved to the vector\nLow\nClosed [7]\nThe precompile dispatch call uses the UnboundedBytes [8] type without any call_length being\nset for the call object parameter. During runtime-call decoding, a stack overflow is prevented\nthrough usage of DecodeLimit [9] however, a heap overflow might occur even before decoding\nif an unbounded call with large call_length is moved into the u8 vector.\nAn attacker can use this to create multiple nested calls bloating the call_length and cause heap\noverflow even before call decoding.\nWe recommend using BoundedBytes instead of UnboundedBytes. A best practice\nimplementation from Moonbeam may be adopted for setting CallLengthLimit similar to\nGetProposalLimit [10] and implementing additional guard condition for call_length validity\nsimilar to proposal_length [11] before decoding the call for additional safety.", - "description_summary": "Unbounded call length in dispatch call allows attackers to cause heap overflow and perform denial of service attacks." - } - ] - }, - { - "audited_project_id": 11, - "project_name": "Astar", - "auditor": "Zellic", - "audit_link": "https://github.com/polkadot-assurance-legion/pal-docs/blob/main/audits/24h1/astar-zellic-2401.pdf", - "findings": [ - { - "title": "Weight calculation", - "repository": "https://github.com/AstarNetwork/Astar", - "audited_commit": "fc14b13401e1fb5e7391715fc76a308204173802", - "reported_remediated_commit": null, - "location": null, - "reported_impact": "Low", - "reported_likelihood": "Low", - "cwe_classification": null, - "vulnerability_class_audit": "Coding Mistakes", - "description": "All the assets pallet functions exposed by the chain extension are weighted with a constant amount. The weight of some operations is charged using the same quantity that the assets pallet benchmarks have computed. However, other operations only charge the weight of one runtime database read operation \u2014 T::DbWeight::get().reads(1_u64).\nTwo functions, MetadataSymbol and MetadataName, operate on a variable amount of data, but they also only account for one runtime database read operation.", - "description_summary": "Constant weight for functions, ignores variable data in MetadataSymbol & MetadataName." - } - ] - } -] \ No newline at end of file + { + "audited_project_id": 1, + "project_name": "Parallel", + "auditor": "Trail of Bits", + "audit_link": "https://github.com/parallel-finance/auditing-report/blob/main/Trail%20of%20Bits_Parallel%20Finance_Final%20Report.pdf", + "findings": [ + { + "title": "Vulnerable dependencies in the Substrate parachain", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": null, + "reported_impact": "Medium", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Patching", + "description": "1. Vulnerable dependencies in the Substrate parachain\nSeverity: Medium\nDifficulty: High\nType: Patching\nFinding ID: TOB-PLF-1\nTarget: parallel repository\nDescription\nThe Parallel Finance parachain node uses the following dependencies with known vulnerabilities. (All of the dependencies listed are inherited from the Substrate framework.)\nDependencyVersionIDDescription\nchrono 0.4.19 RUSTSEC-2020-0159 Potential segfault in localtime_r invocations\nlru 0.6.6 RUSTSEC-2021-0130 Use after free in lru crate\ntime 0.1.44 RUSTSEC-2020-0071 Potential segfault in the time crate\nnet2 0.2.37 RUSTSEC-2020-0016 net2 crate has been deprecated; use socket2 instead\nOther than chrono, all the dependencies can simply be updated to their newest versions to fix the vulnerabilities. The chrono crate issue has not been mitigated and remains problematic. A specific sequence of calls must occur to trigger the vulnerability, which is discussed in this GitHub thread in the chrono repository.\nExploit Scenario\nAn attacker exploits a known vulnerability in the Parallel Finance node and performs a denial-of-service attack on the network by taking down all nodes in the network.\nRecommendations\nShort term, update all dependencies to their newest versions. Monitor the referenced GitHub thread regarding the chrono crate segfault issue.\nLong term, run cargo-audit as part of the CI/CD pipeline and ensure that the team is alerted to any vulnerable dependencies that are detected.", + "description_summary": "The Substrate parachain uses dependencies with known vulnerabilities, including chrono, lru, time, and net2, which may lead to potential segfaults or memory issues." + }, + { + "title": "Users can avoid accruing interest by repaying a zero amount", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/loans/src/lib.rs", + "lines": [ + { + "from": 1057, + "to": 1087 + }, + { + "from": 1106, + "to": 1121 + } + ] + } + ], + "reported_impact": "Medium", + "reported_likelihood": "Difficulty-Low", + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "2. Users can avoid accruing interest by repaying a zero amount\nSeverity: Medium\nDifficulty: Low\nType: Data Validation\nFinding ID: TOB-PLF-2\nTarget: pallets/loans/src/lib.rs\nDescription\nTo repay borrowed funds, users call the repay_borrow extrinsic. The extrinsic implementation calls the Pallet::repay_borrow_internal method to recompute the loan balance. Pallet::repay_borrow_internal updates the loan balance for the account and resets the borrow index as part of the calculation.\nfn repay_borrow_internal(\nborrower: &T::AccountId,\nasset_id: AssetIdOf,\naccount_borrows: BalanceOf,\nrepay_amount: BalanceOf,\n) -> DispatchResult {\n// ... \nAccountBorrows::::insert(\nasset_id,\nborrower,\nBorrowSnapshot {\nprincipal: account_borrows_new,\nborrow_index: Self::borrow_index(asset_id),\n},\n);\nTotalBorrows::::insert(asset_id, total_borrows_new);\nOk(())\n}\nFigure 2.1: pallets/loans/src/lib.rs:1057-1087\nThe borrow index is used in the calculation of the accumulated interest for the loan in Pallet::current_balance_from_snapshot. Specifically, the outstanding balance, snapshot.principal, is multiplied by the quotient of borrow_index divided by snapshot.borrow_index.\npub fn current_balance_from_snapshot(\nasset_id: AssetIdOf,\nsnapshot: BorrowSnapshot>,\n) -> Result, DispatchError> {\nif snapshot.principal.is_zero() || snapshot.borrow_index.is_zero() {\nreturn Ok(Zero::zero());\n}\n// Calculate new borrow balance using the interest index:\n// recent_borrow_balance = snapshot.principal * borrow_index /\n// snapshot.borrow_index\nlet recent_borrow_balance = Self::borrow_index(asset_id)\n.checked_div(&snapshot.borrow_index)\n.and_then(|r| r.checked_mul_int(snapshot.principal))\n.ok_or(ArithmeticError::Overflow)?;\nOk(recent_borrow_balance)\n}\nFigure 2.2: pallets/loans/src/lib.rs:1106-1121\nTherefore, if the snapshot borrow index is updated to Self::borrow_index(asset_id), the resulting recent_borrow_balance in Pallet::current_balance_from_snapshot will always be equal to snapshot.principal. That is, no interest will be applied to the loan. It follows that the accrued interest is lost whenever part of the loan is repaid. In an extreme case, if the repaid amount passed to repay_borrow is 0, users could reset the borrow index without repaying anything.\nThe same issue is present in the implementations of the liquidated_transfer and borrow extrinsics as well.\nExploit Scenario\nA malicious user borrows assets from Parallel Finance and calls repay_borrow with a repay_amount of zero. This allows her to avoid paying interest on the loan.\nRecommendations\nShort term, modify the code so that the accrued interest is added to the snapshot principal when the snapshot is updated.\nLong term, add unit tests for edge cases (like repaying a zero amount) to increase the chances of discovering unexpected system behavior.", + "description_summary": "Users can avoid paying interest on loans by repaying a zero amount, resetting the borrow index without repaying anything." + }, + { + "title": "Missing validation in Pallet::force_update_market", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/loans/src/lib.rs", + "lines": [ + { + "from": 539, + "to": 556 + } + ] + } + ], + "reported_impact": "Informational", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "3. Missing validation in Pallet::force_update_market\nSeverity: Informational\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-3\nTarget: pallets/loans/src/lib.rs\nDescription\nThe Pallet::force_update_market method can be used to replace the stored market instance for a given asset. Other methods used to update market parameters perform extensive validation of the market parameters, but force_update_market checks only the rate model.\npub fn force_update_market(\norigin: OriginFor,\nasset_id: AssetIdOf,\nmarket: Market>,\n) -> DispatchResultWithPostInfo {\nT::UpdateOrigin::ensure_origin(origin)?;\nensure!(\nmarket.rate_model.check_model(),\nError::::InvalidRateModelParam\n);\nlet updated_market = Self::mutate_market(asset_id, |stored_market| {\n*stored_market = market;\nstored_market.clone()\n})?;\nSelf::deposit_event(Event::::UpdatedMarket(updated_market));\nOk(().into())\n}\nFigure 3.1: pallets/loans/src/lib.rs:539-556\nThis means that the caller (who is either the root account or half of the general council) could inadvertently change immutable market parameters like ptoken_id by mistake.\nExploit Scenario\nThe root account calls force_update_market to update a set of market parameters. By mistake, the ptoken_id market parameter is updated, which means that Pallet::ptoken_id and Pallet::underlying_id are no longer inverses.\nRecommendations\nShort term, consider adding more input validation to the force_update_market extrinsic. In particular, it may make sense to ensure that the ptoken_id market parameter has not changed. Alternatively, add validation to check whether the ptoken_id market parameter is updated and to update the UnderlyingAssetId map to ensure that the value matches the Markets storage map.", + "description_summary": "The force_update_market method lacks sufficient validation, potentially allowing unintended changes to immutable market parameters." + }, + { + "title": "Missing validation in multiple StakingLedger methods", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/liquid-staking/src/types.rs", + "lines": [ + { + "from": 199, + "to": 219 + }, + { + "from": 223, + "to": 226 + }, + { + "from": 230, + "to": 253 + } + ] + } + ], + "reported_impact": "Undetermined", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "4. Missing validation in multiple StakingLedger methods\nSeverity: Undetermined\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-4\nTarget: pallets/liquid-staking/src/types.rs\nDescription\nThe staking ledger is used to keep track of the total amount of staked funds in the system. It is updated in response to cross-consensus messaging (XCM) requests to the parent chain (either Polkadot or Kusama). A number of the StakingLedger methods lack sufficient input validation before they update the staking ledger\u2019s internal state. Even though the input is validated as part of the original XCM call, there could still be issues due to implementation errors or overlooked corner cases.\nFirst, the StakingLedger::rebond method does not use checked arithmetic to update the active balance. The method should also check that the computed unlocking_balance is equal to the input value at the end of the loop to ensure that the system remains consistent.\npub fn rebond(&mut self, value: Balance) {\nlet mut unlocking_balance: Balance = Zero::zero();\nwhile let Some(last) = self.unlocking.last_mut() {\nif unlocking_balance + last.value <= value {\nunlocking_balance += last.value;\nself.active += last.value;\nself.unlocking.pop();\n} else {\nlet diff = value - unlocking_balance;\nunlocking_balance += diff;\nself.active += diff;\nlast.value -= diff;\n}\nif unlocking_balance >= value {\nbreak;\n}\n}\n}\nFigure 4.1: pallets/liquid-staking/src/types.rs:199-219\nSecond, the StakingLedger::bond_extra method does not use checked arithmetic to update the total and active balances.\npub fn bond_extra(&mut self, value: Balance) {\nself.total += value;\nself.active += value;\n}\nFigure 4.2: pallets/liquid-staking/src/types.rs:223-226\nFinally, the StakingLedger::unbond method does not use checked arithmetic when updating the active balance.\npub fn unbond(&mut self, value: Balance, target_era: EraIndex) {\nif let Some(mut chunk) = self\n.unlocking\n.last_mut()\n.filter(|chunk| chunk.era == target_era)\n{\nchunk.value = chunk.value.saturating_add(value);\n} else {\nself.unlocking.push(UnlockChunk {\nvalue,\nera: target_era,\n});\n};\nself.active -= value;\n}\nFigure 4.3: pallets/liquid-staking/src/types.rs:230-253\nSince the staking ledger is updated by a number of the XCM response handlers, and XCM responses may return out of order, it is important to ensure that input to the staking ledger methods is validated to prevent issues due to race conditions and corner cases.\nWe could not find a way to exploit this issue, but we cannot rule out the risk that it could be used to cause a denial-of-service condition in the system.\nExploit Scenario\nThe staking ledger's state is updated as part of a WithdrawUnbonded request, leaving the unlocking vector in the staking ledger empty. Later, when the response to a previous call to rebond is handled, the ledger is updated again, which leaves it in an inconsistent state.\nRecommendations\nShort term, ensure that the balance represented by the staking ledger\u2019s unlocking vector is enough to cover the input balance passed to StakingLedger::rebond. Use checked arithmetic in all staking ledger methods that update the ledger\u2019s internal state to ensure that issues due to data races are detected and handled correctly.", + "description_summary": "StakingLedger methods lack sufficient input validation and unchecked arithmetic, potentially leading to inconsistent states or race conditions." + }, + { + "title": "Failed XCM requests left in storage", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/liquid-staking/src/lib.rs", + "lines": [ + { + "from": 1071, + "to": 1159 + } + ] + } + ], + "reported_impact": "Low", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "5. Failed XCM requests left in storage\nSeverity: Low\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-5\nTarget: pallets/liquid-staking/src/lib.rs\nDescription\nWhen the liquid-staking pallet generates an XCM request for the parent chain, the corresponding XCM response triggers a call to Pallet::notification_received. If the response is of the Response::ExecutionResult type, this method calls Pallet::do_notification_received to handle the result.\nThe Pallet::do_notification_received method checks whether the request was successful and then updates the local state according to the corresponding XCM request, which is obtained from the XcmRequests storage map.\nfn do_notification_received(\nquery_id: QueryId,\nrequest: XcmRequest,\nres: Option<(u32, XcmError)>,\n) -> DispatchResult {\nuse ArithmeticKind::*;\nuse XcmRequest::*;\nlet executed = res.is_none();\nif !executed {\nreturn Ok(());\n}\nmatch request {\nBond {\nindex: derivative_index,\namount,\n} => {\nensure!(\n!StakingLedgers::::contains_key(&derivative_index),\nError::::AlreadyBonded\n);\nlet staking_ledger =\n>>::new(\nSelf::derivative_sovereign_account_id(derivative_index),\namount,\n);\nStakingLedgers::::insert(derivative_index, staking_ledger);\nMatchingPool::::try_mutate(|p| -> DispatchResult {\np.update_total_stake_amount(amount, Subtraction)\n})?;\nT::Assets::burn_from(\nSelf::staking_currency()?,\n&Self::account_id(),\nAmount\n)?;\n}\n// ... \n}\nXcmRequests::::remove(&query_id);\nOk(())\n}\nFigure 5.1: pallets/liquid-staking/src/lib.rs:1071-1159\nIf the method completes without errors, the XCM request is removed from storage via a call to XcmRequests::remove(query_id). However, if any of the following conditions are true, the corresponding XCM request is left in storage indefinitely:\n1. The request fails and Pallet::do_notification_received exits early.\n2. Pallet::do_notification_received fails.\n3. The response type is not Response::ExecutionResult.\nThese three cases are currently unhandled by the codebase. The same issue is present in the crowdloans pallet implementation of Pallet::do_notification_received.\nRecommendations\nShort term, ensure that failed XCM requests are handled correctly by the crowdloans and liquid-staking pallets.", + "description_summary": "Failed XCM requests are left in storage indefinitely if not handled properly by the notification_received method." + }, + { + "title": "Risk of using stale oracle prices in loans pallet", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/loans/src/lib.rs", + "lines": [ + { + "from": 1430, + "to": 1441 + } + ] + } + ], + "reported_impact": "Low", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "6. Risk of using stale oracle prices in loans pallet\nSeverity: Low\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-6\nTarget: pallets/loans/src/lib.rs\nDescription\nThe loans pallet uses oracle prices to find a USD value of assets using the get_price function. The get_price function internally uses the T::PriceFeeder::get_price function, which returns a timestamp and the price. However, the returned timestamp is ignored.\npub fn get_price(asset_id: AssetIdOf) -> Result {\nlet (price, _) = T::PriceFeeder::get_price(&asset_id).ok_or(Error::::PriceOracleNotReady)?;\nif price.is_zero() {\nreturn Err(Error::::PriceIsZero.into());\n}\nlog::trace!(target: \"loans::get_price\", \"price: {:?}\", price.into_inner());\nOk(price)\n}\nFigure 6.1: pallets/loans/src/lib.rs:1430-1441\nExploit Scenario\nThe price feeding oracles fail to deliver prices for an extended period of time. The get_price function returns stale prices, causing the get_asset_value function to return a non-market asset value.\nRecommendations\nShort term, modify the code so that it compares the returned timestamp from the T::PriceFeeder::get_price function with the current timestamp, returns an error if the price is too old, and handles the emergency price, which currently has a timestamp of zero. This will stop the market if stale prices are returned and allow the governance process to intervene with an emergency price.", + "description_summary": "The loans pallet may use stale oracle prices, leading to incorrect asset valuations." + }, + { + "title": "Missing calculations in crowdloans extrinsics", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/crowdloans/src/lib.rs", + "lines": [ + { + "from": 718, + "to": 765 + } + ] + } + ], + "reported_impact": "Undetermined", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Undefined Behavior", + "description": "7. Missing calculations in crowdloans extrinsics\nSeverity: Undetermined\nDifficulty: High\nType: Undefined Behavior\nFinding ID: TOB-PLF-7\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe claim extrinsic in the crowdloans pallet is missing code to subtract the claimed amount from vault.contributed to update the total contribution amount. A similar bug exists in the refund extrinsic: there is no subtraction from vault.contributed after the Self::contribution_kill call.\npub fn claim(\norigin: OriginFor,\ncrowdloan: ParaId,\nlease_start: LeasePeriod,\nlease_end: LeasePeriod,\n) -> DispatchResult {\n// ... \nSelf::contribution_kill(\nvault.trie_index,\n&who,\nChildStorageKind::Contributed\n);\nSelf::deposit_event(Event::::VaultClaimed(\ncrowdloan,\n(lease_start, lease_end),\nctoken,\nwho,\namount,\nVaultPhase::Succeeded,\n));\nOk(())\n}\nFigure 7.1: pallets/crowdloans/src/lib.rs:718-765\nExploit Scenario\nThe claim extrinsic is called, but the total amount in vault.contributed is not updated, leading to incorrect calculations in other places.\nRecommendations\nShort term, update the claim and refund extrinsics so that they subtract the amount from vault.contributed.\nLong term, add a test suite to ensure that the vault state stays consistent after the claim and refund extrinsics are called.", + "description_summary": "The claim and refund extrinsics do not update the vault's total contribution, leading to incorrect calculations." + }, + { + "title": "Event emitted when update_vault and set_vrf calls do not make updates", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/crowdloans/src/lib.rs", + "lines": [ + { + "from": 424, + "to": 472 + }, + { + "from": 599, + "to": 616 + } + ] + } + ], + "reported_impact": "Informational", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Auditing and Logging", + "description": "8. Event emitted when update_vault and set_vrf calls do not make updates\nSeverity: Informational\nDifficulty: High\nType: Auditing and Logging\nFinding ID: TOB-PLF-8\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe update_vault extrinsic in the crowdloans pallet is responsible for updating the three values shown. It is possible to call update_vault in such a way that no update is performed, but the function emits an event regardless of whether an update occurred. The same situation occurs in the set_vrfs extrinsic.\npub fn update_vault(\norigin: OriginFor,\ncrowdloan: ParaId,\ncap: Option>,\nend_block: Option>,\ncontribution_strategy: Option,\n) -> DispatchResult {\nT::UpdateVaultOrigin::ensure_origin(origin)?;\nlet mut vault = Self::current_vault(crowdloan).ok_or(Error::::VaultDoesNotExist)?;\nif let Some(cap) = cap {\n// ... \n}\nif let Some(end_block) = end_block {\n// ... \n}\nif let Some(contribution_strategy) = contribution_strategy {\n// ... \n}\nSelf::deposit_event(Event::::VaultUpdated(crowdloan,(lease_start, lease_end),contribution_strategy,cap,end_block));\nOk(())\n}\nFigure 8.1: pallets/crowdloans/src/lib.rs:424-472\npub fn set_vrfs(origin: OriginFor, vrfs: Vec) -> DispatchResult {\nT::VrfOrigin::ensure_origin(origin)?;\nlog::trace!(target: \"crowdloans::set_vrfs\", \"pre-toggle. vrfs: {:?}\",vrfs);\nVrfs::::try_mutate(|b| -> Result<(), DispatchError> {\n*b = vrfs.try_into().map_err(|_| Error::::MaxVrfsExceeded)?;\nOk(())\n})?;\nSelf::deposit_event(Event::::VrfsUpdated(Self::vrfs()));\nOk(())\n}\nFigure 8.2: pallets/crowdloans/src/lib.rs:599-616\nExploit Scenario\nA system observes that the VaultUpdate event was emitted even though the vault state did not actually change. Based on this observation, it performs logic that should be executed only when the state has been updated.\nRecommendations\nShort term, modify the VaultUpdate event so that it is emitted only when the update_vault extrinsic makes an actual update. Optionally, have the update_vault extrinsic return an error to the caller when calling it results in no updates.", + "description_summary": "The update_vault and set_vrfs extrinsics emit events even when no actual updates are made." + }, + { + "title": "The referral code is a sequence of arbitrary bytes", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/crowdloans/src/lib.rs", + "lines": [ + { + "from": 502, + "to": 594 + } + ] + } + ], + "reported_impact": "Informational", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "9. The referral code is a sequence of arbitrary bytes\nSeverity: Informational\nDifficulty: High\nType: Data Validation\nFinding ID: TOB-PLF-9\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe referral code is used in a number of extrinsic calls in the crowdloans pallet. Because the referral code is never validated, it can be a sequence of arbitrary bytes. The referral code is logged by a number of extrinsics. However, it is currently impossible to perform log injection because the referral code is printed as a hexadecimal string rather than raw bytes (using the debug representation).\npub fn contribute(\norigin: OriginFor,\ncrowdloan: ParaId,\n#[pallet::compact] amount: BalanceOf,\nreferral_code: Vec,\n) -> DispatchResultWithPostInfo {\n// ... \nlog::trace!(\ntarget: \"crowdloans::contribute\",\n\"who: {:?}, para_id: {:?}, amount: {:?}, referral_code: {:?}\",\n&who,\n&crowdloan,\n&amount,\n&referral_code\n);\nOk(().into())\n}\nFigure 9.1: pallets/crowdloans/src/lib.rs:502-594\nExploit Scenario\nThe referral code is rendered as raw bytes in a vulnerable environment, introducing an opportunity to perform a log injection attack.\nRecommendations\nShort term, choose and implement a data type that models the referral code semantics as closely as possible.", + "description_summary": "The referral code in the crowdloans pallet is arbitrary and unvalidated, potentially allowing log injection attacks." + }, + { + "title": "Missing validation of referral code size", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/crowdloans/src/lib.rs", + "lines": [ + { + "from": 1429, + "to": 1464 + } + ] + } + ], + "reported_impact": "Low", + "reported_likelihood": "Difficulty-Low", + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "10. Missing validation of referral code size\nSeverity: Low\nDifficulty: Low\nType: Data Validation\nFinding ID: TOB-PLF-10\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nThe length of the referral code is not validated by the contribute extrinsic defined by the crowdloans pallet. Since the referral code is stored by the node, a malicious user could call contribute multiple times with a very large referral code. This would increase the memory pressure on the node, potentially leading to memory exhaustion.\nfn do_contribute(\nwho: &AccountIdOf,\ncrowdloan: ParaId,\nvault_id: VaultId,\namount: BalanceOf,\nreferral_code: Vec,\n) -> Result<(), DispatchError> {\nXcmRequests::::insert(\nquery_id,\nXcmRequest::Contribute {\ncrowdloan,\nvault_id,\nwho: who.clone(),\namount,\nreferral_code: referral_code.clone(),\n},\n);\nOk(())\n}\nFigure 10.1: pallets/crowdloans/src/lib.rs:1429-1464\nExploit Scenario\nA malicious user calls the contribute extrinsic multiple times with a very large referral code. This increases the memory pressure on the validator nodes and eventually causes all parachain nodes to run out of memory and crash.\nRecommendations\nShort term, add validation that limits the size of the referral code argument to the contribute extrinsic.", + "description_summary": "The contribute extrinsic lacks validation on the referral code size, potentially leading to memory exhaustion." + }, + { + "title": "Code duplication in crowdloans pallet", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "5ca8e13b7b4312855ae2ef1d39f14b38088dfdbd", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/crowdloans/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Informational", + "reported_likelihood": "Difficulty-High", + "cwe_classification": null, + "vulnerability_class_audit": "Patching", + "description": "11. Code duplication in crowdloans pallet\nSeverity: Informational\nDifficulty: High\nType: Patching\nFinding ID: TOB-PLF-11\nTarget: pallets/crowdloans/src/lib.rs\nDescription\nA number of extrinsics in the crowdloans pallet have duplicate code. The close, reopen, and auction_succeeded extrinsics have virtually identical logic. The migrate_pending and refund extrinsics are also fairly similar.\nExploit Scenario\nA vulnerability is found in the duplicate code, but it is patched in only one place.\nRecommendations\nShort term, refactor the close, reopen, and auction_succeeded extrinsics into one function, to be called with values specific to the extrinsics. Refactor common pieces of logic in the migrate_pending and refund extrinsics.\nLong term, avoid code duplication, as it makes the system harder to review and update. Perform regular code reviews and track any logic that is duplicated.", + "description_summary": "Code duplication in the crowdloans pallet increases the risk of inconsistent patches and complicates maintenance." + } + ] + }, + { + "audited_project_id": 2, + "project_name": "Parallel", + "auditor": "Slow Mist", + "audit_link": "https://github.com/parallel-finance/auditing-report/blob/main/Slow%20Mist%20-%20Parallel%20Security%20Audit%20Report.pdf", + "findings": [ + { + "title": "Need to upgrade the module", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "a223cd7910af3540048b58f958fea5b784876468", + "reported_remediated_commit": "fc6d8042ba51719e0f1bca40631e4649a6100510", + "location": null, + "reported_impact": "low-risk", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "5.1 Need to upgrade the module [low-risk]\nID:RUSTSEC-2021-0067\nCrate:cranelift-codegen\nVersion: 0.71.0\nDate:2021-05-21\nURL:https://rustsec.org/advisories/RUSTSEC-2021-0067\nTitle: Memory access due to code generation flaw in Cranelift module\nSolution: upgrade to >= 0.73.1 OR >= 0.74\nDependency tree: cranelift-codegen 0.71.0\nFixed in: https://github.com/parallel-finance/parallel/pull/210", + "description_summary": "Memory access vulnerability in Cranelift module due to code generation flaw, fixed by upgrading to >= 0.73.1 or >= 0.74." + }, + { + "title": "Numeric overflow", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "a223cd7910af3540048b58f958fea5b784876468", + "reported_remediated_commit": "386b0e7e1eea19c143900d4127daa8c475266dad", + "location": [ + { + "file_path": "parallel/pallets/loans/src/lib.rs", + "lines": null + } + ], + "reported_impact": "enhancement", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "5.2 Numeric overflow[enhancement]\nparallel/pallets/loans/src/lib.rs\nlet total_reserves_new = total_reserves - reduce_amount;\n//...snip code...//\nlet total_reserves_new = total_reserves + add_amount;\nIt is recommended to use `checked_add/checked_sub` to prevent numerical overflow.\nFixed in: https://github.com/parallel-finance/parallel/pull/241", + "description_summary": "Numeric overflow vulnerability due to lack of checked_add/checked_sub, fixed by adding these methods." + }, + { + "title": "Lack of bounds checking", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "a223cd7910af3540048b58f958fea5b784876468", + "reported_remediated_commit": "2bb80a4e75416e27a03ffb9f8d3100aca548d8b2", + "location": [ + { + "file_path": "parallel/pallets/loans/src/lib.rs", + "lines": null + } + ], + "reported_impact": "weakness", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "5.3 Lack of bounds checking[weakness]\n`mint_amount` has no boundary limit, it is recommended to enhance.\nFixed in: https://github.com/parallel-finance/parallel/pull/258", + "description_summary": "Lack of bounds checking for mint_amount, recommended to enhance." + }, + { + "title": "Oracle price feed risk", + "repository": "https://github.com/parallel-finance/parallel", + "audited_commit": "a223cd7910af3540048b58f958fea5b784876468", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "parallel/pallets/prices/src/lib.rs", + "lines": null + } + ], + "reported_impact": "weakness", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "5.4 Oracle price feed risk[weakness]\nDue to the lack of time parameter, if the price is not feed in time, the price may be inaccurate.\nparallel/pallets/prices/src/lib.rs\npub enum Event {\n/// Set emergency price. [currency_id, price_detail]\nSetPrice(CurrencyId, PriceWithDecimal),\n/// Reset emergency price. [currency_id]\nResetPrice(CurrencyId),\n}\nFeedback: At present, the source of price feeding is controlled by authority and credible. The range of trustworthiness includes the accuracy and real-time of the price, that is, outdated prices will not be sent to the chain, but the price feeding transaction sent is through operational on the chain. Transaction level guarantees will be packaged in real time, that is, the current block.", + "description_summary": "Oracle price feed may be inaccurate due to lack of a time parameter." + } + ] + }, + { + "audited_project_id": 3, + "project_name": "AvaProtocol", + "auditor": "Slow Mist", + "audit_link": "https://avaprotocol.org/docs/papers/SlowMist.Audit.Report.-.Turing.Network.-.June.2022.pdf", + "findings": [ + { + "title": "Calculate inaccurate risk", + "repository": "https://github.com/OAK-Foundation/OAK-blockchain", + "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", + "reported_remediated_commit": "cba1acd6961fce877cef95c6b6a198ea8b415a0f", + "location": [ + { + "file_path": "pallets/automation-time/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Suggestion", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Integer Overflow", + "description": "[N1] [Suggestion] Calculate inaccurate risk\nCategory: Integer Overflow Audit\nContent\npallets/automation-time/src/lib.rs\nThere are some risks of value overflow.\nsaturating_mul, saturating_sub, saturating_add and +-*/, +=, -=\nsaturating at the numeric bounds instead of overflowing, The returned result is inaccurate.\nSolution\nUse checked_add/checked_sub/checked_mul/checked_div instead of\nsaturating_add/saturating_sub/saturating_mul/saturating_div and +-*/, +=, -= .\nStatus\nFixed", + "description_summary": "Inaccurate calculation risk due to potential integer overflow; replace saturating operations with checked operations." + }, + { + "title": "User balance is not checked before transfer", + "repository": "https://github.com/OAK-Foundation/OAK-blockchain", + "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/automation-time/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Suggestion", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Others", + "description": "[N2] [Suggestion] User balance is not checked\nCategory: Others\nContent\npallets/automation-time/src/lib.rs\nThe amount transferred by the user is not compared with the user's balance here, and the user may not have enough NativeToken.\npub fn schedule_native_transfer_task(\norigin: OriginFor,\nprovided_id: Vec,\nexecution_times: Vec,\nrecipient_id: T::AccountId,\n#[pallet::compact] amount: BalanceOf,\n) -> DispatchResult {\nlet who = ensure_signed(origin)?;\n// check for greater than existential deposit\nif amount < T::NativeTokenExchange::minimum_balance() {\nErr(>::InvalidAmount)?\n}\n// check not sent to self\nif who == recipient_id {\nErr(>::TransferToSelf)?\n}\nlet action =\nAction::NativeTransfer { sender: who.clone(), recipient: recipient_id, amount };\nSelf::validate_and_schedule_task(action, who, provided_id, execution_times)?;\nOk(().into())\n}\nSolution\nCompare the user's transfer amount with the balance\nStatus\nIgnored; This is expected behaviour.", + "description_summary": "User balance is not checked before transfer, allowing potential insufficient funds." + }, + { + "title": "Return value not checked", + "repository": "https://github.com/OAK-Foundation/OAK-blockchain", + "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/parachain-staking/src/lib.rs", + "lines": [962, 1004, 1390] + }, + { + "file_path": "pallets/parachain-staking/src/types.rs", + "lines": [463] + }, + { + "file_path": "pallets/parachain-staking/src/delegation_requests.rs", + "lines": [ + 279, + { + "from": 648, + "to": 651 + } + ] + }, + { + "file_path": "pallets/parachain-staking/src/migrations.rs", + "lines": [404] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Others", + "description": "[N3] [Low] Return Value Not Checked\nCategory: Others\nContent\npallets/parachain-staking/src/lib.rs\n//#L962\nT::Currency::unreserve(&bond.owner, bond.amount);\n//#L1004\nT::Currency::unreserve(&candidate, state.bond);\n//#L1390\nT::Currency::unreserve(&delegator, amount);\npallets/parachain-staking/src/types.rs\n//#L463\nT::Currency::unreserve(&who, request.amount.into());\n//#L648:651\nT::Currency::unreserve(&lowest_bottom_to_be_kicked.owner, lowest_bottom_to_be_kicked.amount);\npallets/parachain-staking/src/delegation_requests.rs\n//#L279\nT::Currency::unreserve(&delegator, amount);\npallets/parachain-staking/src/migrations.rs\n//#L404\nT::Currency::unreserve(&owner, *amount);\nThe return value of unreserve needs to be checked.\nSolution\nCheck the return value.\nStatus\nIgnored; If the account has less than that locked up. Not only is this unlikely to happen, there's nothing for parachain-staking to do if it occurs.", + "description_summary": "Return value of unreserve function is not checked, risking unhandled errors." + }, + { + "title": "Calculate inaccurate risk", + "repository": "https://github.com/OAK-Foundation/OAK-blockchain", + "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/parachain-staking/src/types.rs", + "lines": null + }, + { + "file_path": "pallets/parachain-staking/src/lib.rs", + "lines": null + }, + { + "file_path": "pallets/parachain-staking/src/delegation_requests.rs", + "lines": null + } + ], + "reported_impact": "Suggestion", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Integer Overflow", + "description": "[N4] [Suggestion] Calculate inaccurate risk\nCategory: Integer Overflow Audit\nContent\npallets/parachain-staking/src/types.rs\npallets/parachain-staking/src/lib.rs\npallets/parachain-staking/src/delegation_requests.rs\nThere are some risks of value overflow.\nsaturating_mul, saturating_sub, saturating_add and +-*/, +=, -=\nsaturating at the numeric bounds instead of overflowing, The returned result is inaccurate.\nSolution\nUse checked_add/checked_sub/checked_mul/checked_div instead of\nsaturating_add/saturating_sub/saturating_mul/saturating_div and +-*/, +=, -= .\nStatus\nIgnored; These functions are performed on the Balance type. Since the same type is used for total_issuance I don't think we need to be worried about a portion of the issuance overflowing the data type.", + "description_summary": "Potential inaccurate calculation due to integer overflow; replace saturating operations with checked operations." + }, + { + "title": "Missing logic", + "repository": "https://github.com/OAK-Foundation/OAK-blockchain", + "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/parachain-staking/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Others", + "description": "[N5] [Low] Missing logic\nCategory: Others\nContent\npallets/parachain-staking/src/lib.rs\nIt is necessary to make a judgment in the case of deposit_into_existing failure. If the transfer fails, the entire transaction needs to be rolled back.\nfn prepare_staking_payouts(now: RoundIndex) {\n// payout is now - delay rounds ago => now - delay > 0 else return early\nlet delay = T::RewardPaymentDelay::get();\nif now <= delay {\nreturn;\n}\nlet round_to_payout = now.saturating_sub(delay);\nlet total_points = >::get(round_to_payout);\nif total_points.is_zero() {\nreturn;\n}\nlet total_staked = >::take(round_to_payout);\nlet total_issuance = Self::compute_issuance(total_staked);\nlet mut left_issuance = total_issuance;\n// reserve portion of issuance for parachain bond account\nlet bond_config = >::get();\nlet parachain_bond_reserve = bond_config.percent * total_issuance;\nif let Ok(imb) = T::Currency::deposit_into_existing(&bond_config.account, parachain_bond_reserve) {\n// update round issuance iff transfer succeeds\nleft_issuance = left_issuance.saturating_sub(imb.peek());\nSelf::deposit_event(Event::ReservedForParachainBond {\naccount: bond_config.account,\nvalue: imb.peek(),\n});\n}\nlet payout = DelayedPayout {\nround_issuance: total_issuance,\ntotal_staking_reward: left_issuance,\ncollator_commission: >::get(),\n};\n>::insert(round_to_payout, payout);\n}\npub(crate) fn pay_one_collator_reward(paid_for_round: RoundIndex, payout_info: DelayedPayout>, ) -> (Option<(T::AccountId, BalanceOf)>, Weight) {\n// TODO: it would probably be optimal to roll Points into the DelayedPayouts storage\n// item so that we do fewer reads each block\nlet total_points = >::get(paid_for_round);\nif total_points.is_zero() {\n// TODO: this case is obnoxious... it's a value query, so it could mean one\n// of two different logic errors:\n// 1. we removed it before we should have\n// 2. we called pay_one_collator_reward when we were actually done with deferred payouts\nlog::warn!(\"pay_one_collator_reward called with no > for the round!\");\nreturn (None, 0u64.into());\n}\nlet mint = |amt: BalanceOf, to: T::AccountId| {\nif let Ok(amount_transferred) = T::Currency::deposit_into_existing(&to, amt) {\nSelf::deposit_event(Event::Rewarded {\naccount: to.clone(),\nrewards: amount_transferred.peek(),\n});\n}\n};\nlet collator_fee = payout_info.collator_commission;\nlet collator_issuance = collator_fee * payout_info.round_issuance;\nif let Some((collator, pts)) = >::iter_prefix(paid_for_round).drain().next() {\nlet mut extra_weight = 0;\nlet pct_due = Perbill::from_rational(pts, total_points);\nlet total_paid = pct_due * payout_info.total_staking_reward;\nlet mut amt_due = total_paid;\n// Take the snapshot of block author and delegations\nlet state = >::take(paid_for_round, &collator);\nlet num_delegators = state.delegations.len();\nif state.delegations.is_empty() {\n// solo collator with no delegators\nmint(amt_due, collator.clone());\nextra_weight += T::OnCollatorPayout::on_collator_payout(paid_for_round, collator.clone(), amt_due, );\n} else {\n// pay collator first; commission + due_portion\nlet collator_pct = Perbill::from_rational(state.bond, state.total);\nlet commission = pct_due * collator_issuance;\namt_due = amt_due.saturating_sub(commission);\nlet collator_reward = (collator_pct * amt_due).saturating_add(commission);\nmint(collator_reward, collator.clone());\nextra_weight += T::OnCollatorPayout::on_collator_payout(paid_for_round, collator.clone(), collator_reward, );\n// pay delegators due portion\nfor Bond { owner, amount } in state.delegations {\nlet percent = Perbill::from_rational(amount, state.total);\nlet due = percent * amt_due;\nif !due.is_zero() {\nmint(due, owner.clone());\n}\n}\n}\n(Some((collator, total_paid)), T::WeightInfo::pay_one_collator_reward(num_delegators as u32) + extra_weight, )\n} else {\n// Note that we don't clean up storage here; it is cleaned up in handle_delayed_payouts()\n(None, 0u64.into())\n}\n}\nSolution\nIf the transfer fails, the transaction should be rolled back.\nStatus\nIgnored; The project party considers that it will be updated in subsequent versions.", + "description_summary": "Missing rollback logic for failed transfers in staking payouts." + }, + { + "title": "Missing error message", + "repository": "https://github.com/OAK-Foundation/OAK-blockchain", + "audited_commit": "643342e936bbc821d2fb91be69872e4fcecd2273", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/parachain-staking/src/types.rs", + "lines": null + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Malicious Event Log", + "description": "[N6] [Suggestion] Missing error message\nCategory: Malicious Event Log Audit\nContent\npallets/parachain-staking/src/types.rs\nlet new_bottom_delegation = top_delegations.delegations.pop().expect(\"\"); missing error message.\npub fn add_top_delegation(&mut self, candidate: &T::AccountId, delegation: Bond>) -> Option where BalanceOf: Into + From,\n{\nlet mut less_total_staked = None;\nlet mut top_delegations = >::get(candidate).expect(\"CandidateInfo existence => TopDelegations existence\");\nlet max_top_delegations_per_candidate = T::MaxTopDelegationsPerCandidate::get();\nif top_delegations.delegations.len() as u32 == max_top_delegations_per_candidate {\n// pop lowest top delegation\nlet new_bottom_delegation = top_delegations.delegations.pop().expect(\"\");\ntop_delegations.total = top_delegations.total.saturating_sub(new_bottom_delegation.amount);\nif matches!(self.bottom_capacity, CapacityStatus::Full) {\nless_total_staked = Some(self.lowest_bottom_delegation_amount);\n}\nself.add_bottom_delegation::(true, candidate, new_bottom_delegation);\n}\n// insert into top\ntop_delegations.insert_sorted_greatest_to_least(delegation);\n// update candidate info\nself.reset_top_data::(candidate.clone(), &top_delegations);\nif less_total_staked.is_none() {\n// only increment delegation count if we are not kicking a bottom delegation\nself.delegation_count = self.delegation_count.saturating_add(1u32);\n}\n>::insert(&candidate, top_delegations);\nless_total_staked\n}\nSolution\nRecord the corresponding error message.\nStatus\nIgnored; The project party considers that it will be updated in subsequent versions.", + "description_summary": "Missing error message in delegation handling, leaving potential issues unlogged." + } + ] + }, + { + "audited_project_id": 4, + "project_name": "Pendulum", + "auditor": "Hacken", + "audit_link": "https://audits.hacken.io/pendulum/l1-pendulum-pendulum-chain-jun2023/", + "findings": [ + { + "title": "ChainExtension Implementation Lacks Weight Charging", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": "5b922a210b2a6705d3ea6fefbf67b317698f7b80", + "location": [ + { + "file_path": "runtime/foucoco/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Denial-of-Service (DoS)", + "description": "ChainExtension Implementation Lacks Weight Charging\nThe current implementation of the ChainExtension trait fails to charge weight when allowing smart contracts to call into the runtime.\nIDPDM-006\nScope: Chain Extension\nSeverity: MEDIUM\nVulnerability Type: Denial-of-Service (DoS)\nStatus: Fixed (5b922a210b2a6705d3ea6fefbf67b317698f7b80)\nDescription:\nThe call method in the ChainExtension trait defines how smart contracts can interact with the runtime. When a contract makes state changes by calling into the runtime, the corresponding weight should be charged. However, the current implementation of ChainExtension lacks any weight charging mechanism. It allows smart contracts to make calls such as transfer, approve_transfer, and transfer_approved, which are not queries and result in state changes.\nBy not charging weight for these operations, the system becomes vulnerable to potential security issues, particularly Denial-of-Service (DoS) attacks. Malicious contracts can exploit this lack of weight charging by flooding the system with a high volume of calls, overloading the network and disrupting its normal operation.\nRecommendation:\nTo mitigate this vulnerability, it is crucial to incorporate weight calculations and invoke the charge_weight function before accessing contract memory. Here is an example of how to integrate this in the code:\nlet charged_weight = env.charge_weight(sufficient_weight)?;\ntrace!(target: \"runtime\", \"[ChainExtension]|call|func_id / charge_weight:{:?}\", charged_weight);\nlet input = env.read(256)?;\nBy implementing weight charging for contract calls, the system", + "description_summary": "The ChainExtension implementation does not charge weight for runtime calls, making the system vulnerable to Denial-of-Service (DoS) attacks." + }, + { + "title": "Vector of unlimited size in the pallet", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": "05607a1a9cd2ad3cebeff1294b2e4c34fa3e4721", + "location": [ + { + "file_path": "pallets/orml-currencies-allowance-extension/src/lib.rs", + "lines": [134] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Memory exhaustion / DoS", + "description": "Vector of unlimited size in the pallet\nThe orml-currencies-allowance-extension pallet employs the usage of the Vec data structure without incorporating any size checks.\nID: PDM-007\nScope: orml-currencies-allowance-extension pallet\nSeverity: LOW\nVulnerability Type: Memory exhaustion / DoS\nStatus: Fixed (c1a20acd965cc024ac756effbff8a12522dac87a and 05607a1a9cd2ad3cebeff1294b2e4c34fa3e4721)\n\nDescription:\nWithin the orml-currencies-allowance-extension pallet, the Vec structure is utilized in the enum Event and struct GenesisConfig. Additionally, the extrinsic functions add_allowed_currencies and remove_allowed_currencies accept a vector as a parameter. Here's an example:\npallets/orml-currencies-allowance-extension/src/lib.rs:134:\n\n#[pallet::call_index(0)]\n#[pallet::weight(::WeightInfo::add_allowed_currencies())]\n#[transactional]\npub fn add_allowed_currencies(\n origin: OriginFor,\n currencies: Vec>,\n) -> DispatchResult {\n ensure_root(origin)?;\n for i in currencies.clone() {\n AllowedCurrencies::::insert(i, ());\n }\n Self::deposit_event(Event::AllowedCurrenciesAdded { currencies });\n Ok(())\n}\n\nIf an excessively large vector is provided as input to these functions, it can result in node overload due to the necessity of iterating through the entire vector. This poses a significant risk, leading to potential memory exhaustion and creating a vulnerability for denial-of-service (DoS) attacks. Although the function is restricted to root access, it's important to note that if the root account is compromised or controlled by a malicious actor, they can exploit this vulnerability by deliberately supplying large vectors. This would cause the node to consume excessive resources, potentially disrupting the normal operation of the system.\n\nRecommendation:\nTo mitigate the risks associated with the use of unlimited-sized vectors, it is strongly recommended to revise the implementation in the orml-currencies-allowance-extension pallet. We advise against the use of the Vec data structure within any part of the pallet. Instead, consider utilizing frame_support::BoundedVec, which offers a bounded-size variant of a vector. Alternatively, it's important to impose a cap on the length of the vector during each of its instances. By ensuring a maximum limit, the vector's length stays within manageable bounds, preventing excessively long processing times or resource consumption during iterations over this data structure.\n\nBy proactively addressing this issue, you can enhance the stability and security of the orml-currencies-allowance-extension pallet, ensuring the robustness and reliability of the overall system.", + "description_summary": "Unrestricted vector size can lead to memory exhaustion and DoS attacks." + }, + { + "title": "Employment of Sudo Pallet", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "runtime/foucoco/src/lib.rs", + "lines": [1509] + }, + { + "file_path": "node/src/chain_spec.rs", + "lines": [229] + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Centralization", + "description": "Employment of Sudo Pallet\nThe current implementation of the sudo FRAME pallet in the runtime employs it as an alternative to the governance mechanism.\nID: PDM-010\nScope: Decentralization\nStatus: Acknowledged\n\nDescription:\nThe sudo pallet is integrated into the runtime configuration:\nruntime/foucoco/src/lib.rs:1509:\nconstruct_runtime!(\n pub enum Runtime where\n Block = Block,\n NodeBlock = opaque::Block,\n UncheckedExtrinsic = UncheckedExtrinsic,\n {\n /* ... */\n // Governance\n Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event} = 12,\n /* ... */\n }\n);\n\nThe root account, initially set at genesis, is defined as follows:\nnode/src/chain_spec.rs:229:\nlet sudo_account =\n pallet_multisig::Pallet::::multi_account_id(&signatories[..], 3);\n\nThis configuration allows the use of the ensure_root method for operations such as add_allowed_currencies and remove_allowed_currencies. Although there is no immediate security risk associated with this setup, it raises concerns regarding potential centralization.\n\nRecommendation:\nUtilizing a root account for governance purposes is considered a less favorable design choice due to its potential for centralization and the security risks associated with compromised private keys.\n\nTo address this issue, we recommend the following actions:\n1. Comprehensive Documentation: It is crucial to thoroughly document the intended use of the sudo pallet and the root account within the project. This documentation should provide clear explanations of the potential risks and limitations associated with their usage. Both the development team and end-users should be adequately informed to ensure transparency and informed decision-making. If there are plans to deactivate the sudo functionality after the network launch, a detailed process similar to the sudo removal outlined by Polkadot should be documented.\n\n2. Auditing and Monitoring: Prior to the removal of the sudo pallet, it is essential to establish regular auditing and monitoring processes. These measures will help ensure that the sudo pallet and the root account are not misused or compromising the system's security. By conducting thorough audits and continuous monitoring, any potential vulnerabilities or misuse can be identified and addressed promptly.\n\nBy following these recommendations, you will mitigate potential centralization concerns and enhance the overall governance and security aspects of the project.", + "description_summary": "The use of the sudo pallet risks centralization and security vulnerabilities due to potential misuse of root access." + }, + { + "title": "Error Handling in Chain Extension", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "runtime/foucoco/src/lib.rs", + "lines": [null] + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Code Quality", + "description": "Error Handling in Chain Extension\nImplementation of ChainExtension often utilizes DispatchError::Other(\"Explanatory string\") error, which makes error handling difficult.\nID: PDM-009\nScope: Code Quality\nStatus: Acknowledged\n\nDescription:\nThe current implementation of ChainExtension frequently relies on the generic DispatchError::Other() error. This approach makes error handling challenging, particularly for developers working with smart contracts. The indiscriminate use of this error type makes it difficult to monitor and diagnose specific errors, impeding efficient troubleshooting and code improvement.\n\nRecommendation:\nTo improve error handling in the ChainExtension implementation, we recommend implementing a custom enum that covers all the required error types. This custom enum will provide a more structured approach to error handling by encapsulating specific errors and enabling precise identification and resolution of issues.\n\nBy implementing the custom enum and incorporating it into the RetVal result, developers will have access to more detailed error information, allowing them to effectively identify and handle different error scenarios. This approach enhances the overall robustness and maintainability of the codebase, as well as facilitates troubleshooting and future improvements.", + "description_summary": "Use of generic DispatchError::Other hampers error handling and troubleshooting." + }, + { + "title": "Hardcoded Constants in match", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "runtime/foucoco/src/lib.rs", + "lines": [964] + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Code Quality", + "description": "Hardcoded Constants in match\nThe presence of hardcoded constants in the match statement within the implementation of ChainExtension hampers code readability.\nID: PDM-008\nScope: Code Quality\nStatus: Acknowledged\n\nDescription:\nThe following code segment has drawn our attention:\nruntime/foucoco/src/lib.rs:964:\n\nmatch func_id {\n // transfer\n 1105 => { /* ... */ },\n // balance\n 1106 => { /* ... */ },\n // total_supply\n 1107 => { /* ... */ },\n // approve_transfer\n 1108 => { /* ... */ },\n // transfer_approved\n 1109 => { /* ... */ },\n // allowance\n 1110 => { /* ... */ },\n // dia price feed\n 7777 => { /* ... */ },\n _ => { /* ... */ },\n}\n\nWhile the code comments provide some understanding of the functionality associated with each func_id, it is recommended to enhance the code logic in this section to improve comprehensibility, rather than solely relying on comments.\n\nRecommendation:\nWe recommend creating a separate enum to handle all the supported func_id options, using descriptive names that correspond to each function. This approach enhances code readability and maintainability:\n\nenum FuncId {\n Transfer,\n Balance,\n TotalSupply,\n ApproveTransfer,\n TransferApproved,\n Allowance,\n DiaPriceFeed,\n}\n\nimpl TryFrom for FuncId {\n type Error = DispatchError;\n fn try_from(func_id: u16) -> Result {\n let id = match func_id {\n 1105 => Self::Transfer,\n 1106 => Self::Balance,\n 1107 => Self::TotalSupply,\n 1108 => Self::ApproveTransfer,\n 1109 => Self::TransferApproved,\n 1110 => Self::Allowance,\n 7777 => Self::DiaPriceFeed,\n _ => {\n error!(\"Called an unregistered `func_id`: {:?}\", func_id);\n return Err(DispatchError::Other(\"Unimplemented func_id\"));\n }\n };\n Ok(id)\n }\n}\n\nThis enum allows for easier handling of the logic within the match statement in the call implementation.\n\nFurthermore, it is good practice to implement the logic for each function in a separate method instead of encapsulating all the code within the match arms. This promotes code modularity and readability.\n\nHere's an example of how the code would look with the recommended changes:\n\nlet func_id = FuncId::try_from(env.func_id())?;\nmatch func_id {\n FuncId::Transfer => /* Call method that performs transfer */,\n FuncId::Balance => /* Call method that returns balance */,\n FuncId::TotalSupply => /* ... */ ,\n FuncId::ApproveTransfer => /* ... */,\n FuncId::TransferApproved => /* ... */,\n FuncId::Allowance => /* ... */,\n FuncId::DiaPriceFeed => /* ... */,\n}\n\nWith these improvements, the code becomes more comprehensible, maintainable, and follows best practices.", + "description_summary": "Hardcoded constants in match statements reduce code readability and maintainability." + }, + { + "title": "Linter Warnings", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/orml-currencies-allowance-extension/src/lib.rs", + "lines": [245, 246] + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Linters" + }, + { + "title": "Logging in Runtime", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "runtime/foucoco/src/lib.rs", + "lines": null + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Code Quality", + "description": "Logging in Runtime\nThe implementation of ChainExtension contains a lot of unnecessary warn!() macros.\nID: PDM-011\nScope: Logging\nStatus: Acknowledged\n\nDescription:\nUpon reviewing the runtime code, it was observed that numerous instances of the warn!() macro are present. While the warn macro can be useful during testing or debugging phases, it is unnecessary in the final version of the code. These superfluous warn macros clutter the codebase and can impede code readability and maintenance.\n\nUnneeded logging statements add noise to the code and distract developers from critical information. In the absence of proper justification, excessive logging can obscure important log messages and make it more challenging to identify and address genuine issues.\n\nRecommendation:\nTo enhance the code quality and maintainability of the runtime, we recommend refining the use of warn macros. By limiting the use of these macros to critical and important situations, the codebase will become more concise and easier to comprehend. This targeted approach will enable developers to focus on essential log messages that offer meaningful insights into the system's behavior, rather than being overwhelmed by extraneous warnings. Therefore, unnecessary warn macros should be reviewed and removed, with a priority on retaining only those that provide critical and important warnings.", + "description_summary": "Excessive warn!() macros in the implementation of ChainExtension clutter code and reduce maintainability." + }, + { + "title": "Pendulum build", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "Cargo.toml", + "lines": null + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Build Process", + "description": "Pendulum build\nThe Pendulum chain demonstrates a smooth and error-free build process.\nID: PDM-001\nScope: Build Process\nStatus: Fixed\n\nDescription:\nThe build process for the Pendulum chain is efficient and error-free. When executing the cargo build --release command, the output confirms a successful build with only one minor compiler warning related to an unused import. The build process adheres to sound Rust coding practices and follows idiomatic conventions.\n\nDuring the assessment, it was observed that the orml-currencies-allowance-extension pallet is not included as a member of the workspace, although it should be.\n\nRecommendation:\nTo address this issue, we recommend adding pallets/orml-currencies-allowance-extension to the members list in the main Cargo.toml file of the workspace.\n\nPlease note that this issue does not encompass the results of linting tools, such as Clippy, which may provide additional warnings and recommendations for improving code quality. These will be addressed separately in PDM-002.", + "description_summary": "Missing a member in the workspace configuration." + }, + { + "title": "Superfluous Implementation of Hooks Trait", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/orml-currencies-allowance-extension/src/lib.rs", + "lines": [99] + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Code Quality", + "description": "Superfluous Implementation of Hooks Trait\nThe Hooks trait has been declared in the orml-currencies-allowance-extension pallet, but no custom implementations have been provided.\nID: PDM-005\nScope: Code Quality\nStatus: Acknowledged\n\nDescription:\nThe following code snippet raises concerns:\npallets/orml-currencies-allowance-extension/src/lib.rs:99:\n\n#[pallet::hooks]\nimpl Hooks for Pallet {}\n\nThe Hooks trait is typically used to perform logic on every block initialization, finalization, and other specific actions. However, in the case of the orml-currencies-allowance-extension pallet, no methods from the Hooks trait are implemented. While this does not cause any immediate detrimental effects, it can reduce code readability and increase complexity.\n\nRecommendation:\nTo improve code clarity and readability, it is recommended to eliminate the use of the Hooks trait in the orml-currencies-allowance-extension pallet. Since no custom implementations are provided, removing the Hooks trait declaration will simplify the code and remove unnecessary complexity.", + "description_summary": "The Hooks trait has been declared in the orml-currencies-allowance-extension pallet, but no custom implementations have been provided." + }, + { + "title": "Test Coverage", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/orml-currencies-allowance-extension/src/lib.rs", + "lines": null + }, + { + "file_path": "runtime/foucoco/src/lib.rs", + "lines": null + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Code Quality / Testing" + }, + { + "title": "Vulnerable and Unmaintained Dependencies", + "repository": "https://github.com/pendulum-chain/pendulum", + "audited_commit": "d01528d17b96bf3de72c36deb3800c2ed0cf2afb", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/orml-currencies-allowance-extension/Cargo.toml", + "lines": null + } + ], + "reported_impact": null, + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Dependency Management", + "description": "Vulnerable and Unmaintained Dependencies\nThe orml-currencies-allowance-extension pallet has dependencies that include one vulnerable crate plus three unmaintained crates.\nID: PDM-004\nScope: Dependencies\nStatus: Acknowledged\n\nDescription:\nThe orml-currencies-allowance-extension pallet has several dependencies that raise concerns regarding their security and maintenance. The following table provides details about these dependencies:\n\nDependency | Version | Id | Type | Remediation\n------------|---------|---------------|-----------------|--------------------------------------------------\ntime | 0.1.45 | RUSTSEC-2020-0071 | Vulnerability | Upgrade to >=0.2.23\nansi_term | 0.12.1 | RUSTSEC-2021-0139 | Unmaintained | Use alternative crates: anstyle, console, nu-ansi-term, owo-colors, stylish, yansi\nmach | 0.3.2 | RUSTSEC-2020-0168 | Unmaintained | Switch to mach2\nparity-wasm | 0.45.0 | RUSTSEC-2022-0061 | Unmaintained | Switch to wasm-tools\n\nAlthough these dependencies may not have an immediate impact on the security aspect and are part of the Substrate project, it is essential to regularly review dependencies and monitor for updates to ensure the overall security and maintenance of the project.\n\nRecommendation:\nTo address these concerns and maintain a secure codebase, it is recommended to take the following actions:\n\n1. Use cargo audit to check for any new vulnerabilities or outdated packages in your project's dependencies. Regularly perform these checks to stay updated on potential security issues.\n2. Monitor for new releases of Substrate and update your project accordingly. Keeping your project up to date with the latest Substrate releases ensures that you benefit from bug fixes, security patches, and improvements.\n\nAdditionally, it is worth noting that the latest available Substrate release at the time of writing is v0.9.43. Stay informed about new releases and evaluate the feasibility of updating your project to benefit from the latest features and improvements.", + "description_summary": "Dependency includes one vulnerable and three unmaintained crates requiring updates or replacements." + } + ] + }, + { + "audited_project_id": 5, + "project_name": "Nodle", + "auditor": "Halborn", + "audit_link": "https://github.com/HalbornSecurity/PublicReports/blob/master/Substrate%20Audits/Nodle_Nodl_Substrate_Pallet_Security_Audit_Report_Halborn_Final.pdf", + "findings": [ + { + "title": "TCR voting design should be improved", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": null, + "location": null, + "reported_impact": "3", + "reported_likelihood": "3", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.1 (HAL-01) HAL-01 TCR VOTING DESIGN SHOULD BE IMPROVED - MEDIUM \n Description:\nIt was observed that it is possible to:\n\u2022 Vote with 0 amount\n\u2022 Challenge yourself\n\u2022 Counter yourself\n\u2022 Vote for yourself\nBy combining these properties, some scenarios might be possible:\n\u2022 A whale can influence any challenge/counter decision by voting for itself.\nA whale can also farm additional tokens upon success by countering any application and then voting to itself.\nBy countering your application and voting with 0 amount, it is possible to fill up the storage since the values are pushed into vector\nTo remove yourself from members in the root of trust.\n\nRisk Level:\nLikelihood - 3\nImpact - 4\n\nRecommendation:\nConsider improving the design by not letting the same account to:\n\u2022 Vote to itself\n\u2022 Counter itself\n\u2022 Challenge itself\n\u2022 Vote with 0 deposit\n\nRemediation Plan:\nNOT APPLICABLE: The issue is marked as not applicable by the Nodle team as the TCR and root of trust pallets will be removed.", + "description_summary": "TCR voting design allows self-voting, zero-amount votes, and potential manipulation by large token holders." + }, + { + "title": "Denomination logic should be improved", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": "2db8b4707606bf88b8fdefbe616d67a720e434ea", + "location": [ + { + "file_path": "pallets/staking/src/lib.rs", + "lines": null + } + ], + "reported_impact": "2", + "reported_likelihood": "4", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.2 (HAL-02) HAL-02 DENOMINATION LOGIC SHOULD BE IMPROVED - MEDIUM\n Description:\nIt was observed that if a nominator has a single validator, it is not possible to remove a validator through nominator_denominate since it has a check for >. In that case, nominator_denominate_all has to be used, which bypasses that check, which is not intentional.\n\nCode Location:\nListing 1: pallets/staking/src/lib.rs\n1 if !do_force {\n ensure!(\n remaining >= >::get(),\n >::NominatorBondBelowMin\n );\n}\n\nRisk Level:\nLikelihood - 4\nImpact - 2\n\nRecommendation:\nConsider having a conditional statement in nominator_denominate that allows the forced removal of a validator if the nominator has only one validator.\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", + "description_summary": "Denomination logic bypasses minimum bond check when only one validator is nominated." + }, + { + "title": "Emergency shutdown not used in critical functions", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets", + "lines": null + } + ], + "reported_impact": "4", + "reported_likelihood": "3", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.3 (HAL-03) HAL-03 EMERGENCY SHUTDOWN NOT USED IN MANY CRITICAL FUNCTIONS - MEDIUM\n Description:\nIt was observed that the emergency shutdown pallet is used only in the allocate function in the allocations pallet. However, there are more public functions across different pallets that might be problematic if, at any point in time, there is a bug (security/non-security) discovered within them. There should be a functionality to shut them down before new fixes are pushed.\n\nCode Location:\nThese functions should have a shutdown functionality:\n\nGrants pallet\n\u2022 add_vesting_schedule\n\nStaking pallet\n\u2022 validator_join_pool\n\u2022 validator_exit_pool\n\u2022 validator_bond_more\n\u2022 validator_bond_less\n\u2022 nominator_nominate\n\u2022 nominator_denominate\n\u2022 nominator_bond_more\n\u2022 nominator_bond_less\n\u2022 nominator_move_nomination\n\u2022 unbond_frozen\n\u2022 withdraw_unbonded\n\u2022 withdraw_staking_rewards\n\nRisk Level:\nLikelihood - 3\nImpact - 4\n\nRecommendation:\nConsider enabling shutdown functionality in critical public functions.\n\nExample Code:\nListing 2\n1 ensure!(\n !pallet_emergency_shutdown :: Pallet::::shutdown(),\n Error::::UnderShutdown\n);\n\nRemediation Plan:\nPENDING: In a future release, the Nodle team will modify the emergency shutdown pallet to better generalize.", + "description_summary": "Emergency shutdown functionality is missing in multiple critical public functions across different pallets." + }, + { + "title": "Missing sanity checks", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": "2db8b4707606bf88b8fdefbe616d67a720e434ea", + "location": [ + { + "file_path": "pallets/staking/src/lib.rs", + "lines": [ + { + "from": 201, + "to": 208 + } + ] + } + ], + "reported_impact": "2", + "reported_likelihood": "3", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.4 (HAL-04) HAL-04 MISSING SANITY CHECKS - LOW\n Description:\nIt was observed that the set_staking_limits privileged function is missing sanity checks on provided values. Even though it is a protected function, it is still advised to have some sanity checks to avoid any human error.\n\nCode Location:\nListing 3: pallets/staking/src/lib.rs\n201 pub fn set_staking_limits (\n origin: OriginFor,\n max_stake_validators: u32,\n min_stake_session_selection: BalanceOf,\n min_validator_bond: BalanceOf,\n min_nominator_total_bond: BalanceOf,\n min_nominator_chill_threshold: BalanceOf\n) -> DispatchResultWithPostInfo {\n\nRisk Level:\nLikelihood - 3\nImpact - 2\n\nRecommendation:\nIt is recommended to add sanity checks to ensure:\n\u2022 max_stake_validators != 0\n\u2022 min_stake_session_selection != 0\n\u2022 min_validator_bond != 0\n\u2022 min_nominator_total_bond != 0\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", + "description_summary": "Missing sanity checks in set_staking_limits function may allow invalid parameter values." + }, + { + "title": "Vesting to yourself is allowed", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": "91cfe0cb3300811bb02a47c4cf70f68c7e48f94d", + "location": [ + { + "file_path": "pallets/grants/src/lib.rs", + "lines": [ + { + "from": 157, + "to": 168 + } + ] + } + ], + "reported_impact": "2", + "reported_likelihood": "3", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.5 (HAL-05) HAL-05 VESTING TO YOURSELF IS ALLOWED - LOW\n Description:\nIt was observed that you can create a vesting schedule to yourself.\n\nCode Location:\nListing 4: pallets/grants/src/lib.rs\n157 pub fn add_vesting_schedule (\n origin: OriginFor,\n dest: ::Source,\n schedule: VestingScheduleOf\n) -> DispatchResultWithPostInfo {\n let from = ensure_signed(origin)?;\n let to = T::Lookup::lookup(dest)?;\n Self::do_add_vesting_schedule(&from, &to, schedule.clone())?;\n Self::deposit_event(Event::VestingScheduleAdded(from, to, schedule));\n Ok(().into())\n}\n\nRisk Level:\nLikelihood - 3\nImpact - 2\n\nRecommendation:\nPlease add a check that ensures that from != to in fn add_vesting_schedule.\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", + "description_summary": "Vesting schedule allows setting up a vesting to oneself without restrictions." + }, + { + "title": "Missing zero value check", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": "f31c19a06ab0047f0c533c83ba67654eedfa6147", + "location": [ + { + "file_path": "pallets/grants/src/lib.rs", + "lines": [88] + } + ], + "reported_impact": "2", + "reported_likelihood": "2", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.6 (HAL-06) HAL-06 MISSING ZERO VALUE CHECK - LOW\n Description:\nIt was observed that the allocate function should have a zero value check on the amount argument.\n\nCode Location:\nListing 5: pallets/grants/src/lib.rs (Line 88)\n85 pub fn allocate (\n origin: OriginFor,\n to: T::AccountId,\n amount: BalanceOf,\n proof: Vec\n) -> DispatchResultWithPostInfo {\n Self::ensure_oracle(origin)?;\n ...\n\nRisk Level:\nLikelihood - 2\nImpact - 2\n\nRecommendation:\nConsider adding zero value checks to these functions to avoid performing redundant operations if a zero value is received.\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", + "description_summary": "Allocate function lacks a zero-value check for the amount argument." + }, + { + "title": "Vesting schedules less than a current block can be created", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": null, + "location": null, + "reported_impact": "1", + "reported_likelihood": "1", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.7 (HAL-07) HAL-07 VESTING SCHEDULES LESS THAN A CURRENT BLOCK CAN BE CREATED - INFORMATIONAL\n Description:\nIt was observed that the pallet allows the creation of vesting schedules that are less than the current block number. Those vesting schedules are not more than the regular transfers with extra steps. Therefore, those are redundant.\n\nExample:\nListing 6\n1 Current Block: 100\n\n3 Vesting Schedule Start: 1st Block\n5 Period: 10 Blocks\n7 Period_count: 2\n9 Per Period: 1 knodl\n\n13 Vesting Duration: 10 * 2 + 1 = 21 Blocks\n14 Initial Transfer sent: 2 knodl\n16 Next Claims: 0 since Vesting Duration < Current Block\n\nRisk Level:\nLikelihood - 1\nImpact - 1\n\nRecommendation:\nConsider adding a check that ensures that:\n(period * period_count) + start > current_block_number\n\nRemediation Plan:\nNOT APPLICABLE: The issue was marked as not applicable by the Nodle team, stating:\nThis can be useful to keep as it is. In fact, we may have to create retroactive awards that may have been partially vested.", + "description_summary": "Vesting schedules can be created with start blocks earlier than the current block, making them redundant." + }, + { + "title": "Redundant check", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": "91cfe0cb3300811bb02a47c4cf70f68c7e48f94d", + "location": [ + { + "file_path": "pallets/grants/src/lib.rs", + "lines": [253] + } + ], + "reported_impact": "1", + "reported_likelihood": "1", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.8 (HAL-08) HAL-08 REDUNDANT CHECK - INFORMATIONAL\n Description:\nIt was observed that the grants pallet contains a redundant check.\n\nCode Location:\nThere is no need for a second new_lock.is_zero() since it was already checked prior. Removing of the vestingSchedule can be performed within the first check.\n\nListing 7: pallets/grants/src/lib.rs (Line 253)\n247 if new_lock . is_zero () {\n248 T :: Currency :: remove_lock ( VESTING_LOCK_ID , & target );\n249 }\n250 else {\n251 T :: Currency :: set_lock ( VESTING_LOCK_ID , & target , new_lock , WithdrawReasons :: all () );\n252 }\n253 if new_lock . is_zero () {\n254 // No more claimable , clear\n255 VestingSchedules :: :: remove ( target . clone () );\n256 }\n257 else {\n258 T :: Currency :: set_lock ( VESTING_LOCK_ID , & target , new_lock , WithdrawReasons :: all () );\n}\n\nRisk Level:\nLikelihood - 1\nImpact - 1\n\nRecommendation:\nPlease remove the second new_lock.is_zero() check and remove the vestingSchedule within the first check.\n\nListing 8: pallets/grants/src/lib.rs\n247 if new_lock . is_zero () {\n248 T :: Currency :: remove_lock ( VESTING_LOCK_ID , & target );\n249 VestingSchedules :: :: remove ( target . clone () );\n250 }\n251 else {\n252 T :: Currency :: set_lock ( VESTING_LOCK_ID , & target , new_lock , WithdrawReasons :: all () );\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", + "description_summary": "Redundant check of new_lock.is_zero() found in grants pallet." + }, + { + "title": "Redundant variable", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": "91cfe0cb3300811bb02a47c4cf70f68c7e48f94d", + "location": [ + { + "file_path": "pallets/tcr/src/lib.rs", + "lines": [ + { + "from": 138, + "to": 152 + }, + 478 + ] + } + ], + "reported_impact": "1", + "reported_likelihood": "1", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.9 (HAL-09) HAL-09 REDUNDANT VARIABLE - INFORMATIONAL\n Description:\nIt was observed that the old1 variable in on_finalize function in tcr pallet is redundant. Tuple returned from commit_applications is Ok(( new_members, Vec::new())). Therefore, old1 is always going to be an empty vector. Hence, extending it with old2 does not make any difference. In this scenario, we only care about new_1.\n\nCode Location:\nListing 9: pallets/tcr/src/lib.rs\n138 fn on_finalize ( block : T :: BlockNumber ) {\n139 let ( mut new_1 , mut old_1 ) =\n140 Self :: commit_applications ( block ) . unwrap_or (( Vec :: new () , Vec :: new () ) );\n141 let ( new_2 , old_2 ) =\n142 Self :: resolve_challenges ( block ). unwrap_or (( Vec :: new () , Vec :: new () ) );\n143 // Should never be the same , so should not need some uniq checks\n145 new_1 . extend ( new_2 ) ;\n146 old_1 . extend ( old_2 ) ;\n148 new_1 . sort () ;\n149 old_1 . sort () ;\n151 Self :: notify_members_change ( new_1 , old_1 ) ;\n\nListing 10: pallets/tcr/src/lib.rs (Line 478)\n460 fn commit_applications ( block : T :: BlockNumber ) -> FinalizeHelperResultFrom {\n461 let new_members = < Applications >:: iter ()\n462 . filter (|( _account_id , application )| {\n463 block\n464 . checked_sub (& application . clone () . created_block )\n465 . expect ( \" created_block should always be smaller than block ; qed \")\n466 >= T :: FinalizeApplicationPeriod :: get ()\n})\n468 . map (|( account_id , application ) | {\n469 < Applications >:: remove ( account_id . clone () );\n470 < Members >:: insert ( account_id . clone () , application . clone () ) ;\n471 Self :: unreserve_for ( account_id . clone () , application . candidate_deposit );\n472 Self :: deposit_event ( Event :: ApplicationPassed ( account_id . clone () ) ) ;\n474 account_id\n475 })\n476 . collect :: < Vec >() ;\n478 Ok (( new_members , Vec :: new () ) ) // === HERE ===\n}\n\nRisk Level:\nLikelihood - 1\nImpact - 1\n\nRecommendation:\nConsider omitting old1 and remove all actions performed on it.\n\nListing 11: pallets/tcr/src/lib.rs\n247 fn on_finalize ( block : T :: BlockNumber ) {\n248 let ( mut new_1 , _ ) =\n249 Self :: commit_applications ( block ) . unwrap_or (( Vec :: new () , Vec :: new () ) );\n250 let ( new_2 , mut old ) =\n251 Self :: resolve_challenges ( block ). unwrap_or (( Vec :: new () , Vec :: new () ) );\n253 // Should never be the same , so should not need some uniq checks\n254 new_1 . extend ( new_2 ) ;\n257 new_1 . sort () ;\n258 old . sort () ;\n260 Self :: notify_members_change ( new_1 , old ) ;\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", + "description_summary": "Redundant variable old1 in on_finalize function." + }, + { + "title": "Usage of vulnerable crates", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": null, + "location": null, + "reported_impact": "1", + "reported_likelihood": "2", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.10 (HAL-10) HAL-10 USAGE OF VULNERABLE CRATES - INFORMATIONAL\n Description:\nIt was observed that the project uses crates with known vulnerabilities.\n\nCode Location:\nID\nRUSTSEC-2020-0159\nRUSTSEC-2020-0071\nRUSTSEC-2021-0130\nRUSTSEC-2021-0067\nRUSTSEC-2021-009\nRUSTSEC-2021-0079\nRUSTSEC-2021-0078\nRUSTSEC-2021-0076\nRUSTSEC-2021-0070\nRUSTSEC-2021-0073\nRUSTSEC-2021-0013\nRUSTSEC-2021-0089\nRUSTSEC-2021-0124\nRUSTSEC-2021-0110\nRUSTSEC-2021-0115\n\nFINDINGS & TECH DETAILS\npackage: chrono\nShort Description: Potential segfault in 'localtime_r' invocations\npackage: time\nShort Description: Potential segfault in the time crate\npackage: lru\nShort Description: Use after free in lru crate\npackage: cranelift-codegen\nShort Description: Memory access due to code generation flaw in Cranelift module\npackage: crossbeam-deque\nShort Description: Data race in crossbeam-deque\npackage: hyper\nShort Description: Integer overflow in hyper's parsing of the Transfer-Encoding header leads to data loss\npackage: hyper\nShort Description: Lenient hyper header parsing of Content-Length could allow request smuggling\npackage: libsecp256k1\nShort Description: libsecp256k1 allows overflowing signatures\npackage: nalgebra\nShort Description: VecStorage Deserialize Allows Violation of Length Invariant\npackage: prost-types\nShort Description: Conversion from prost_types::Timestamp to SystemTime can cause an overflow and panic\npackage: raw-cpuid\nShort Description: Soundness issues in raw-cpuid\npackage: raw-cpuid\nShort Description: Optional Deserialize implementations lacking validation\npackage: tokio\nShort Description: Data race when sending and receiving after closing a oneshot channel\npackage: wasmtime\nShort Description: Multiple Vulnerabilities in Wasmtime\npackage: zeroize-derive\nShort Description: #[zeroize(drop)] doesn\u2019t implement Drop for enums\n\nRisk Level:\nLikelihood - 2\nImpact - 1\n\nRecommendation:\nEven if those vulnerable crates cannot impact the underlying application, it is advised to be aware of them and attempt to update them to non-vulnerable versions. Furthermore, it is necessary to set up dependency monitoring to always be alerted when a new vulnerability is disclosed in one of the project\u2019s crates.\n\nRemediation Plan:\nACKNOWLEDGED: The issue was acknowledged by the Nodle team and will be fixed later.", + "description_summary": "Usage of crates with known vulnerabilities identified." + }, + { + "title": "Outdated Rust edition", + "repository": "https://github.com/NodleCode/chain/", + "audited_commit": "de356170bfe2eb9f537e3c4861d6752dd099f43e", + "reported_remediated_commit": "41ce7de56ff6dd701de1f5d247c91aaff6803954", + "location": [ + { + "file_path": "Cargo.toml", + "lines": null + } + ], + "reported_impact": "1", + "reported_likelihood": "1", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.11 (HAL-11) HAL-11 OUTDATED RUST EDITION - INFORMATIONAL\n Description:\nIt was observed that the project is using outdated rust edition (2018). Recently, 2021 rust edition came out, which includes a lot of stability improvements and new features that might make the code more readable.\n\nCode Location:\n\u2022 Cargo.toml\n\nRisk Level:\nLikelihood - 1\nImpact - 1\n\nRecommendation:\nConsider updating the Rust to the latest edition to use the latest features and stability improvements.\n\nReference:\nRust 2021 Edition Guide\n\nRemediation Plan:\nSOLVED: The issue was solved by the Nodle team.\n\u2022 Fix Commit", + "description_summary": "Project is using an outdated Rust edition (2018)." + } + ] + }, + { + "audited_project_id": 6, + "project_name": "ReefChain", + "auditor": "Halborn", + "audit_link": "https://github.com/HalbornSecurity/PublicReports/blob/master/Substrate%20Audits/Reef_Chain_Substrate_Security_Audit_Report_Halborn_v1_1.pdf", + "findings": [ + { + "title": "Integer overflow", + "repository": "https://github.com/reef-defi/reef-chain", + "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", + "reported_remediated_commit": "6e4153498a28d03b8600739709cb200065c88781", + "location": [ + { + "file_path": "modules/evm-accounts/src/lib.rs", + "lines": [182, 313, 314] + }, + { + "file_path": "modules/evm-bridge/src/lib.rs", + "lines": [183, 186, 191] + } + ], + "reported_impact": "3", + "reported_likelihood": "3", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.1 (HAL-01) INTEGER OVERFLOW - MEDIUM\nDescription:\nAn overflow happens when an arithmetic operation reaches the maximum size of a type. For instance, in the ethereum_signable_message() method, an if statement is summing up a few u32 values, which may end up overflowing the integer. In computer programming, an integer overflow occurs when an arithmetic operation attempts to create a numeric value that is outside of the range that can be represented with a given number of bits\u2014either larger than the maximum or lower than the minimum representable value.\n\nCode Location:\nListing 1: modules/evm-accounts/src/lib.rs (Lines 182)\n180 pub fn ethereum_signable_message(what: &[u8], extra: &[u8]) -> Vec {\n181 let prefix = b\"reef evm:\";\n182 let mut l = prefix.len() + what.len() + extra.len();\n183 let mut rev = Vec::new();\n\nListing 2: modules/evm-accounts/src/lib.rs (Lines 313,314)\n312 pub fn to_ascii_hex(data: &[u8]) -> Vec {\n313 let mut r = Vec::with_capacity(data.len() * 2);\n314 let mut push_nibble = |n| r.push(if n < 10 { b'0' + n } else { b'a' - 10 + n });\n\nListing 3: modules/evm-bridge/src/lib.rs (Lines 183,186,191)\n182 let offset = U256::from_big_endian(&output[0..32]);\n183 let length = U256::from_big_endian(&output[offset.as_usize()..offset.as_usize() + 32]);\n184 ensure!(\n// output is 32-byte aligned. ensure total_length >= offset + string length + string data length.\n186 output.len() >= offset.as_usize() + 32 + length.as_usize(),\n187 Error::::InvalidReturnValue\n188 );\n189 let mut data = Vec::new();\n191 data.extend_from_slice(&output[offset.as_usize() + 32..offset.as_usize() + 32 + length.as_usize()]);\n\nRisk Level:\nLikelihood - 3\nImpact - 3\n\nRecommendations:\nIt is recommended to use vetted safe math libraries for arithmetic operations consistently throughout the smart contract system. Consider replacing the addition and multiplication operators with Rust\u2019s checked_add and checked_mul methods.\n\nRemediation:\nSOLVED: Reef fixed the issue in commit 6e4153498a28d03b8600739709cb200065c88781.", + "description_summary": "Integer overflow vulnerability due to unchecked arithmetic operations in ethereum_signable_message and to_ascii_hex functions." + }, + { + "title": "Total issuance not updated on mint", + "repository": "https://github.com/reef-defi/reef-chain", + "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "modules/currencies/src/lib.rs", + "lines": [168] + } + ], + "reported_impact": "3", + "reported_likelihood": "3", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.2 (HAL-02) TOTAL ISSUANCE NOT UPDATED ON MINT - MEDIUM\nDescription:\nThe update_balance dispatchable defined in modules/currencies/src/lib.rs does not update the total issuance of the currency (identified by user-supplied ID) which is minted to the target address. This may lead to discrepancies in token data.\n\nCode Location:\nListing 4: modules/currencies/src/lib.rs (Lines 168)\n159 #[pallet::weight(T::WeightInfo::update_balance_non_native_currency())]\n160 pub fn update_balance(\n161 origin: OriginFor,\n162 who: ::Source,\n163 currency_id: CurrencyIdOf,\n164 amount: AmountOf,\n165 ) -> DispatchResultWithPostInfo {\n166 ensure_root(origin)?;\n167 let dest = T::Lookup::lookup(who)?;\n168 >::update_balance(currency_id, &dest, amount)?;\n169 Ok(().into())\n170 }\n\nRisk Level:\nLikelihood - 3\nImpact - 3\n\nRecommendations:\nTotal issuance should be updated every time tokens are minted or burned.\n\nRemediation Plan:\nACKNOWLEDGED: Reef states that the affected function is sudo only and will be deprecated in a future release.", + "description_summary": "Total issuance is not updated on mint, causing potential discrepancies in token data." + }, + { + "title": "Casting overflow", + "repository": "https://github.com/reef-defi/reef-chain", + "audited_commit": "26ed9e88e773f5d628c01d558945cd38cd5a7d5a", + "reported_remediated_commit": "313439bb7940afa0f0d5060fbcbbe26d5a3e5298", + "location": [ + { + "file_path": "modules/evm-bridge/src/lib.rs", + "lines": [183, 186, 191] + } + ], + "reported_impact": "3", + "reported_likelihood": "2", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.3 (HAL-03) CASTING OVERFLOW - LOW\nDescription:\nWhen converting or casting between types, an \u201coverflow\u201d/wrapping may occur and result in logic bugs leading to thread panic. The decode_string utility method defined in modules/evm-bridge/src/lib.rs does not validate if the values of the offset and length variables can be cast to the usize type. Although the method is not exported and available externally, the method is vulnerable still and the risk could increase in the future if the method is used before it\u2019s patched.\n\nCode Location:\nListing 5: modules/evm-bridge/src/lib.rs (Lines 183,186,191)\n182 let offset = U256::from_big_endian(&output[0..32]);\n183 let length = U256::from_big_endian(&output[offset.as_usize()..offset.as_usize() + 32]);\n184 ensure!(\n// output is 32-byte aligned. ensure total_length >= offset + string length + string data length.\n186 output.len() >= offset.as_usize() + 32 + length.as_usize(),\n187 Error::::InvalidReturnValue\n188 );\n189 let mut data = Vec::new();\n191 data.extend_from_slice(&output[offset.as_usize() + 32..offset.as_usize() + 32 + length.as_usize()]);\n\nRisk Level:\nLikelihood - 2\nImpact - 3\n\nRecommendations:\nCheck the value against maximum type value before casting.\n\nListing 6:\n1 if (x <= usize::MAX) {\n2 // logic ...\n3 }\n\nRemediation:\nSOLVED: Reef fixed the issue in commit 313439bb7940afa0f0d5060fbcbbe26d5a3e5298.", + "description_summary": "Casting overflow vulnerability due to unchecked type conversion in decode_string method." + }, + { + "title": "Slash amount validation missing", + "repository": "https://github.com/reef-defi/reef-chain", + "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", + "reported_remediated_commit": "bd43bec58890be763b32bfdfd18ba85a8c0ef9e5", + "location": [ + { + "file_path": "modules/currencies/src/lib.rs", + "lines": [396] + } + ], + "reported_impact": "2", + "reported_likelihood": "2", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.4 (HAL-04) SLASH AMOUNT VALIDATION MISSING - LOW\nDescription:\nThe slash_reserved method defined in modules/currencies/src/lib.rs does not validate if the value of the user-supplied value parameter exceeds the actual balance of the account owned by the address that is to have its ERC20 tokens slashed.\n\nCode Location:\nListing 7: modules/currencies/src/lib.rs (Lines 396)\n394 fn slash_reserved(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance) -> Self::Balance {\n395 match currency_id {\n396 CurrencyId::ERC20(_) => value,\n397 CurrencyId::Token(TokenSymbol::REEF) => T::NativeCurrency::slash_reserved(who, value),\n398 _ => T::MultiCurrency::slash_reserved(currency_id, who, value),\n399 }\n400 }\n\nRisk Level:\nLikelihood - 2\nImpact - 2\n\nRecommendations:\nThe slashed amount should always be lesser or equal to the account balance that is to be slashed.\n\nRemediation:\nSOLVED: Reef fixed the issue in commit bd43bec58890be763b32bfdfd18ba85a8c0ef9e5.", + "description_summary": "Missing validation in slash_reserved method allows slashing beyond account balance." + }, + { + "title": "Currency ID validation missing", + "repository": "https://github.com/reef-defi/reef-chain", + "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "modules/currencies/src/lib.rs", + "lines": [ + 125, 178, 186, 199, 217, 235, 290, 303, 316, 324, 336, 376, 386, + 394, 402, 423, 445 + ] + } + ], + "reported_impact": "2", + "reported_likelihood": "2", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.5 (HAL-05) CURRENCY ID VALIDATION MISSING - LOW\nDescription:\nMany dispatchables and helper methods defined in modules/currencies/src/lib.rs do not check if the user-supplied currency ID matches any of the existing ones before calling the possibly resource-intensive underlying utility functions.\n\nCode Location:\nListing 8: modules/evm-accounts/src/lib.rs (Lines 125)\n121 #[pallet::weight(T::WeightInfo::transfer_non_native_currency())]\n122 pub fn transfer(\n123 origin: OriginFor,\n124 dest: ::Source,\n125 currency_id: CurrencyIdOf,\n126 #[pallet::compact] amount: BalanceOf,\n127 ) -> DispatchResultWithPostInfo {\n128 let from = ensure_signed(origin)?;\n129 let to = T::Lookup::lookup(dest)?;\n130 >::transfer(currency_id, &from, &to, amount)?;\n131 Ok(().into())\n132 }\n\nList of all the functions that fail to validate the currency ID:\nListing 9: (Lines 2,3)\n1 auditor@halborn:~/projects/reef/reef-chain/modules/currencies$ \\\n2 > grep -ne 'fn.*CurrencyId' src/lib.rs \\\n3 > | cut -d '-' -f 1\n4 178: fn minimum_balance(currency_id: Self::CurrencyId)\n5 186: fn total_issuance(currency_id: Self::CurrencyId)\n6 199: fn total_balance(currency_id: Self::CurrencyId, who: &T::AccountId)\n7 217: fn free_balance(currency_id: Self::CurrencyId, who: &T::AccountId)\n8 235: fn ensure_can_withdraw(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n9 290: fn deposit(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n10 303: fn withdraw(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n11 316: fn can_slash(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n12 324: fn slash(currency_id: Self::CurrencyId, who: &T::AccountId, amount: Self::Balance)\n13 336: fn update_balance(currency_id: Self::CurrencyId, who: &T::AccountId, by_amount: Self::Amount)\n14 376: fn remove_lock(lock_id: LockIdentifier, currency_id: Self::CurrencyId, who: &T::AccountId)\n15 386: fn can_reserve(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance)\n16 394: fn slash_reserved(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance)\n17 402: fn reserved_balance(currency_id: Self::CurrencyId, who: &T::AccountId)\n18 423: fn reserve(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance)\n19 445: fn unreserve(currency_id: Self::CurrencyId, who: &T::AccountId, value: Self::Balance)\n\nRisk Level:\nLikelihood - 2\nImpact - 2\n\nRecommendations:\nIt is recommended to validate all user-supplied input in order to avoid executing unnecessary operations and mitigate the risk of resource exhaustion.\n\nRemediation Plan:\nACKNOWLEDGED: Reef states that there is only 1 currency id in use, and there likely won\u2019t be more going forward.", + "description_summary": "Missing validation for currency ID in multiple methods could lead to resource exhaustion." + }, + { + "title": "Vector capacity validation missing", + "repository": "https://github.com/reef-defi/reef-chain", + "audited_commit": "393d0c0821cc25ea5c6912d9cac8f61a9232c9a3", + "reported_remediated_commit": "6b826f7ca16d1a30f3fa55f0606d0b94b69b2b3a", + "location": [ + { + "file_path": "modules/evm-accounts/src/lib.rs", + "lines": [313] + } + ], + "reported_impact": "2", + "reported_likelihood": "1", + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.6 (HAL-06) VECTOR CAPACITY VALIDATION MISSING - INFORMATIONAL\nDescription:\nThe to_ascii_hex utility function defined in modules/evm-accounts/src/lib.rs when creating a new Vec from the user-supplied data slice with a Vec::with_capacity method does not validate if the capacity of the new vector exceeds the maximum allowed capacity.\n\nCode Location:\nListing 10: modules/currencies/src/lib.rs (Lines 313)\n312 pub fn to_ascii_hex(data: &[u8]) -> Vec {\n313 let mut r = Vec::with_capacity(data.len() * 2);\n314 let mut push_nibble = |n| r.push(if n < 10 { b'0' + n } else { b'a' - 10 + n });\n315 for &b in data.iter() {\n316 push_nibble(b / 16);\n317 push_nibble(b % 16);\n318 }\n319 r\n320 }\n\nRisk Level:\nLikelihood - 1\nImpact - 2\n\nRecommendations:\nValidate if the new capacity (data.len() * 2) does not exceed isize::MAX bytes.\n\nRemediation:\nSOLVED: Reef fixed the issue in commit 6b826f7ca16d1a30f3fa55f0606d0b94b69b2b3a.", + "description_summary": "Missing validation in to_ascii_hex function allows vector capacity to exceed maximum limit." + } + ] + }, + { + "audited_project_id": 7, + "project_name": "MantaNetwork", + "auditor": "Veridise", + "audit_link": "https://github.com/Manta-Network/Atlantic-Audits/blob/main/Atlantic-Veridise-Chain.pdf", + "findings": [ + { + "title": "Static fee charged despite dynamic storage accesses", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "parachain-staking/lib.rs", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Bad Extrinsic Weight", + "description": "4.1.1 V-MANC-VUL-001: Static fee charged despite dynamic storage accesses\n\nSeverity: Medium\nType: Bad Extrinsic Weight\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): parachain-staking/lib.rs\nLocation(s): go_online, go_offline, candidate_bond_more\n\nBlockchain computations must have appropriate fees to prevent network congestion. For substrate extrinsics, these fees are set by computing an associated weight for the operation where the weight is intended to capture the maximum computational cost. As reads from and writes to storage are expensive, these weights should consider the number of these operations that are performed. The following extrinsics, however, have a fixed weight despite requiring a dynamic number of reads or writes due to insert or remove operations being performed on CandidatePool.\n\n- go_online\n- go_offline\n- candidate_bond_more\n- execute_candidate_bond_less\n- delegate\n- execute_leave_delegators\n- delegator_bond_more\n- execute_delegation_request\n- schedule_leave_delegators\n- schedule_delegator_bond_less\n- cancel_leave_delegators\n\nAlso note that similar functions in the same pallet, such as schedule_leave_candidates charge the users dynamic fees. An example can be seen in Snippet 4.1.\n\nImpact: As the size of the CandidatePool grows, the cost of insert and remove will increase linearly since vector inserts in Rust are linear in the size of the vector. This allows malicious actors to add many candidates to the pool for a fixed monetary cost despite an increasing computational cost. If the size of the pool becomes too large, this could effectively create a DoS.\n\nRecommendation: Similar to schedule_leave_candidates, calculate the weights dynamically rather than charging a fixed cost.\n\nDeveloper Response: The developers have acknowledged the issue and are determining how to address it.\n\nSnippet 4.1: go_offline calls remove on the CandidatePool but charges users a fixed weight in WeightInfo::go_offline\n\n#[pallet::call_index(12)]\n#[pallet::weight(::WeightInfo::go_offline())]\n/// Temporarily leave the set of collator candidates without unbonding\npub fn go_offline(origin: OriginFor) -> DispatchResultWithPostInfo {\n let collator = ensure_signed(origin)?;\n let mut state = >::get(&collator).ok_or(Error::::CandidateDNE)?;\n ensure!(state.is_active(), Error::::AlreadyOffline);\n state.go_offline();\n let mut candidates = >::get();\n if candidates.remove(&Bond::from_owner(collator.clone())) {\n >::put(candidates);\n }\n >::insert(&collator, state);\n Self::deposit_event(Event::CandidateWentOffline { candidate: collator });\n Ok(().into())\n}", + "description_summary": "Fixed fees are charged for dynamic storage operations, potentially leading to DoS as CandidatePool size grows." + }, + { + "title": "Users can use any previously seen Merkle root", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-pay/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Hash Collision", + "description": "4.1.2 V-MANC-VUL-002: Users can use any previously seen Merkle root\n\nSeverity: Medium\nType: Hash Collision\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): has_matching_utxo_accumulator_output\n\nThe MantaPay protocol maintains a Merkle tree on the ledger where the leaves of the ledger are the hashes of the UTXOs generated during the protocol\u2019s lifetime. In order to spend a UTXO, users must supply a ZK proof that the UTXO belongs to the Merkle tree on the ledger. The membership proof takes as input the root of the Merkle tree (public input), the inner node hashes (private inputs), and proves that the root can be derived from the inner node hashes and leaf.\n\nIdeally, the ledger would check that the root provided is equal to the latest root on-chain. However, this isn\u2019t done in practice as the transaction could easily be front-runned since every transaction changes the root. Instead, the ledger maintains a set of all previously generated roots and just checks that the root provided belongs to that set.\n\nHowever, by allowing the root provided by the user to be any previously generated root, an attacker simply needs to find a hash collision with any previously generated root to steal assets. The likelihood of finding a collision grows quadratically with the number of previously seen hashes. In particular, given an output size of b bits and n previously generated hashes, the likelihood of finding a collision with any of the n hashes is approximately n^2/2^(b+1).\n\nThe current version of the Protocol uses the Poseidon hash function, which produces 255-bit hashes and, in theory, should be safe even with billions of previously seen roots. However, this is contingent on the safety of the Poseidon hash. While there has been a significant amount of research and analysis conducted on the function, including various attacks and optimizations, there is no formal proof of its security and correctness, let alone any proofs about concrete implementations.\n\nImpact: Storing all previously seen roots significantly increases the likelihood of collisions. If any attack or weakness is found in the Poseidon hash, this can be an additional means of attacking the protocol.\n\nRecommendation: There are a few ways to mitigate this. Protocols like Semaphore maintain a timeout period TIMEOUT and associate each root with a timestamp indicating when it was created. Any root created before now() - TIMEOUT is rejected. Another option is to only store the N previously generated roots and only allow a root if it belongs to the set of N previously generated roots. The latter option would have the additional benefit of not needing to store every root on-chain.\n\nDeveloper Response: The developers acknowledged the issue and will either use a timestamp or only maintain the N previously generated roots.", + "description_summary": "Allowing any previously seen Merkle root increases the risk of hash collisions, potentially allowing asset theft." + }, + { + "title": "MantaPay weights calculated with a small database", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-pay/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Bad Extrinsic Weight", + "description": "4.1.3 V-MANC-VUL-003: MantaPay weights calculated with a small database\n\nSeverity: Medium\nType: Bad Extrinsic Weight\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): to_private, to_public, private_transfer\n\nTransactions to_public, to_private, and private_transfer take as input nullifiers and membership proofs and generate UTXOs. These UTXOs are then added to a Merkle tree on the ledger.\n\nMantaPay shards this Merkle tree into 256 buckets where each bucket has its own Merkle tree. Instead of storing the entire tree at each bucket, the Ledger just stores the last path added to the tree. When adding a UTXO, the Ledger first computes its corresponding bucket, then computes the new path pointing to that UTXO, and finally adds that path to the bucket.\n\nComputing the new path should take time proportional to log(n) where n is the size of the Merkle Tree. The current benchmarking scheme only covers cases where the previous path is small, i.e., at most size 1. However, if the number of transactions gets large, on the order of hundreds of millions or billions, then the size of the path can get to 24-28 (taking shards into account). If the tree grows to this size, this means each execution of the extrinsic will perform 24-28 hashes, multiplied by the number of UTXOs to be added.\n\nThe benchmarking scheme should take into account the size of the tree to ensure that the existing weights are sufficient to offset the computation of the new Merkle tree path.\n\nImpact: Setting the weight too low can allow users to perform a large number of transactions with little cost, potentially allowing malicious users to launch a DoS attack.\n\nRecommendation: There are several ways to address this. One strategy is to take an additional parameter corresponding to the logarithm of the Merkle Tree's size on the ledger. The weight charged can be proportional to this value. In the implementation, this value (technically 2^value) can be compared against the actual size, and the transaction will only proceed if it is larger than or equal to the actual size.\n\nAnother strategy is to benchmark the pallets by taking into account the tree's size. If MantaPay is expected not to exceed more than a billion transactions, then the pallet could be benchmarked assuming the current path length is around 24-28.\n\nDeveloper Response: The developers acknowledged that the weights should be recalculated with a more saturated database.", + "description_summary": "Inadequate benchmarking of weights for MantaPay's Merkle tree could lead to DoS attacks as the tree grows." + }, + { + "title": "Total supply of native assets can exceed the set limit", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/asset-manager/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1.4 V-MANC-VUL-004: Total supply of native assets can exceed the set limit\n\nSeverity: Medium\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): mint_asset\n\nOne invariant underlying the correctness of MantaPay is that the total supply of an asset cannot exceed the maximum amount that can be held in a particular account. This is because MantaPay uses a dedicated account A to store the value of all the private assets. As such, A should, in principle, be able to hold all the supply in the case where all of that asset is privatized.\n\nIn more detail, when privatizing a user\u2019s public assets (via to_private), MantaPay constructs opaque UTXOs to encode the amount privatized, and then transfers those public assets into A. This transfer is expected not to fail because of the invariant described above. However, we found a case where the transfer can fail.\n\nManta enforces this invariant for NonNative assets because every time an asset is minted into an account, the total supply is increased. If the total supply would exceed the maximum that can be held in an account, the mint fails with the error Overflow. However, there is no such check for Native assets. Thus, if the total supply of Native assets exceeds the maximum that can be held in an account, u128::MAX, then to_private calls that should succeed can fail if the amount held in A is close to the maximum allowed. This is demonstrated in Snippet 4.2.\n\nImpact: By not constraining the amount of Native assets to be less than the maximum amount that can be held in an account, to_private transactions that should succeed will fail.\n\nRecommendation: We recommend a similar check be done for Native assets as is done for NonNative assets to enforce that the total supply cannot exceed the maximum that can be held in a given account.\n\nDeveloper Response: The developers have acknowledged the issue and are determining how to address it.\n\nSnippet 4.2: Failed to_private due to count of native assets exceeding u128::MAX\n\n#[test]\nfn public_account_issue() {\nlet mut rng = OsRng;\nnew_test_ext().execute_with(|| {\nlet asset_id = NATIVE_ASSET_ID;\nlet value = 1000u128;\nlet id = NATIVE_ASSET_ID;\nlet metadata = AssetRegistryMetadata {\nmetadata: AssetStorageMetadata {\nname: b\"Calamari\".to_vec(),\nsymbol: b\"KMA\".to_vec(),\ndecimals: 12,\nis_frozen: false,\n},\nmin_balance: TEST_DEFAULT_ASSET_ED2,\nis_sufficient: true,\n};\nassert_ok!(MantaAssetRegistry::create_asset(\nid, metadata.into(), TEST_DEFAULT_ASSET_ED2,\ntrue\n));\nassert_ok!(FungibleLedger::::deposit_minting(id, &ALICE, 2*value));\nassert_ok!(FungibleLedger::::deposit_minting(id, &MantaPay::account_id(), u128::MAX));\nlet mut utxo_accumulator = UtxoAccumulator::new(UTXO_ACCUMULATOR_MODEL.clone());\nlet spending_key = rng.gen();\nlet address = PARAMETERS.address_from_spending_key(&spending_key);\nlet mut authorization =\nAuthorization::from_spending_key(&PARAMETERS, &spending_key, &mut rng);\nlet asset_0 = Asset::new(Fp::from(asset_id), value);\nlet (to_private_0, pre_sender_0) = ToPrivate::internal_pair(\n&PARAMETERS, &mut authorization.context,\naddress, asset_0,\nDefault::default(), &mut rng,\n);\nlet to_private_0 = to_private_0\n.into_post(\nFullParametersRef::new(&PARAMETERS, utxo_accumulator.model()),\n&PROVING_CONTEXT.to_private,\nNone, Vec::new(), &mut rng,\n)\n.expect(\"Unable to build TO_PRIVATE proof.\")\n.expect(\"Did not match transfer shape.\");\nassert_ok!(MantaPay::to_private(\nMockOrigin::signed(ALICE),\nPalletTransferPost::try_from(to_private_0).unwrap()\n));\n}\n}\n", + "description_summary": "Native asset supply can exceed the maximum limit for an account, causing to_private transactions to fail." + }, + { + "title": "Missing updates in update_asset_metadata", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/asset-manager/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1.5 V-MANC-VUL-005: Missing updates in update_asset_metadata\n\nSeverity: Medium\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): update_asset_metadata\n\nManta Chain has an asset-manager pallet responsible for registering and minting assets. Each asset has a unique id and is associated with various metadata like a name, symbol, decimal places, etc. One important metadata is called min_balance. To store an account with some quantity of assets on the ledger, it must have more than min_balance quantity. This metadata is also used when validating transfers.\n\nIn particular, many asset transfers take an \u201cexistential parameter\u201d as input, called KeepAlive, which decides what to do if the transfer would take the account\u2019s balance (with respect to the asset) below min_balance. If KeepAlive is set, the transfer will fail if the amount goes below the min_balance. If it is not set, other configurations come into play, and the account may be removed and the remaining balance burned.\n\nThe asset-manager pallet exposes an extrinsic called update_asset_metadata, which takes as input the new metadata for that asset and updates the ledger to associate the asset with that metadata. While the implementation took a new min_balance as input, it did not update the ledger to associate the asset with this metadata.\n\nAdditionally, this API also took as input a new value for the metadata is_sufficient, but similarly did not update the ledger to associate the asset with this metadata.\n\nImpact: While it is rare for the min_balance to be changed, it is sometimes necessary if it was originally set too high, for example. The current API made it appear that min_balance could be changed, so users might think the min_balance was changed when, in fact, it wasn\u2019t.\n\nRecommendation: The main issue with this extrinsic is its interface makes it appear as though the metadata min_balance and is_sufficient could be changed when it actually didn\u2019t. Either the API should be changed to only take the metadata that should be changed, or it should appropriately update min_balance and is_sufficient.\n\nDeveloper Response: The developers acknowledged this issue and are discussing two possible fixes. The first is to update both parameters in the asset pallet, and the second is to change the interface to prevent updating the min_balance or is_sufficient parameters.", + "description_summary": "update_asset_metadata does not apply updates to min_balance and is_sufficient metadata as expected." + }, + { + "title": "No slashing mechanism for collators", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "parachain-staking", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Consensus", + "description": "4.1.6 V-MANC-VUL-006: No slashing mechanism for collators\n\nSeverity: Medium\nType: Consensus\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): parachain-staking\nLocation(s): N/A\n\nProof of Stake blockchains often have a slashing mechanism to detect poorly performing stakers and punish them. Typically, a significant portion of the staker\u2019s stake is taken by the chain as punishment for poor performance.\n\nCurrently, Manta Chain does not have any slashing mechanism. Instead, it uses a combination of social pressure and manual slashing to incentivize good behavior. Specifically, when the owners detect a poorly performing collator, they contact the collator over Discord and warn them of the poor performance. If their performance does not improve, the owners manually slash the collator\u2019s funds.\n\nWhile this approach may work when the blockchain is small, it will be challenging to enforce as the chain grows. Therefore, we recommend that Manta Chain implement a slashing mechanism.\n\nImpact: Manta Chain\u2019s current method of using social pressure will only work with a small set of trusted collators. However, as the chain expands, this mechanism is unlikely to sufficiently incentivize collators to perform well.\n\nRecommendation: We recommend that Manta Chain establish a slashing mechanism to implement if/when the current process becomes inadequate.\n\nDeveloper Response: The developers acknowledged the lack of a slashing mechanism and plan to include one in the future if/when the current process stops working.", + "description_summary": "No automated slashing mechanism exists to penalize poorly performing collators, relying on social pressure and manual intervention." + }, + { + "title": "Collators given full rewards regardless of quality", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/parachain-staking/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Consensus", + "description": "4.1.7 V-MANC-VUL-007: Collators given full rewards regardless of quality\n\nSeverity: Medium\nType: Consensus\nCommit: 45ba60e1d\nStatus: Open\n\nFile(s): pallets/parachain-staking/src/lib.rs\nLocation(s): pay_one_collator_reward\n\nManta Chain rewards collators by first allocating a fixed number of points (20) for every block they author and then giving the collator a fixed percentage of those allocated points as rewards. However, there is no check on the quality of the blocks authored by the collator: an empty block will result in just as many rewards as a full block.\n\nCurrently, Manta relies on the owners to monitor the blocks on-chain and manually punish collators who perform poorly. However, as the chain grows, this misbehavior may not be easy to detect.\n\nOne relatively simple way to address this issue is to adjust the reward system to incentivize high quality blocks.\n\nImpact: Collators can effectively steal funds from Manta by authoring low-quality blocks (i.e., empty or partial blocks) and reaping full rewards.\n\nRecommendation: We recommend the developers adjust the rewards system to either reward the collators for high-quality blocks or punish them for authoring poor ones.\n\nDeveloper Response: TBD", + "description_summary": "Collators receive full rewards regardless of block quality, allowing low-quality blocks to earn the same as high-quality ones." + }, + { + "title": "Missing validation in pull_ledger_diff", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-pay/src/lib.rs", + "lines": [593] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "4.1.8 V-MANC-VUL-008: Missing validation in pull_ledger_diff\n\nSeverity: Low\nType: Data Validation\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): Line 593\n\npull_ledger_diff takes as input a Checkpoint, which is a struct with two fields: receiver_index and sender_index. It pulls sender and receiver data from the ledger, starting at sender_index (resp. receiver_index) up to at most sender_index + PULL_MAX_SENDER_UPDATE_SIZE (resp. receiver_index + PULL_MAX_RECEIVER_UPDATE_SIZE). However, there is no check to ensure that this sum cannot overflow for both the sender and receiver index in pull_senders, pull_receivers, pull_senders_for_shard, and pull_receivers_for_shard.\n\nImpact: If the code is compiled without the --release flag, a malicious user could crash the node by passing in bad values. If it is built with --release, the call will be reported as successful and no senders or receivers will be returned. However, if a benign end user calls the API with incorrect indexes, it might be preferable to return an Error informing them that the index is invalid.\n\nRecommendation: We recommend adding bounds checks for safety and to return an Error.\n\nDeveloper Response: The developers are aware and agree that it would be better to check and return an error.", + "description_summary": "Missing bounds checks in pull_ledger_diff could lead to overflow, potentially crashing the node." + }, + { + "title": "increase_count_of_associated_assets can overflow", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/asset-manager/src/lib.rs", + "lines": [590] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1.9 V-MANC-VUL-009: increase_count_of_associated_assets can overflow\n\nSeverity: Low\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): Line 590\n\nThe asset_manager pallet maintains a mapping of paraids to a count of assets associated with that paraid. Each paraid can be associated with at most u32::MAX assets. When registering an asset or moving its location, the pallet calls increase_count_of_associated_assets, which takes as input a paraid and increments the number of assets associated with that paraid. However, this function does not check whether increasing the number of assets will result in an overflow.\n\nImpact: If the runtime is compiled using --debug, this can crash the node. However, if built under --release, the asset count will go to zero.\n\nRecommendation: Make this function check if the addition will result in an overflow, i.e., check if the current count is u32::MAX and return an error.\n\nDeveloper Response: Acknowledged", + "description_summary": "increase_count_of_associated_assets lacks overflow check, risking node crash or reset to zero in certain cases." + }, + { + "title": "Account checks are incorrect", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-pay/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1.10 V-MANC-VUL-010: Account checks are incorrect.\n\nSeverity: Low\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): check_sink_accounts, check_source_accounts\n\nWhen validating a transaction, the source and sink accounts are checked by check_sink_accounts and check_source_accounts. These functions iterate over pairs (account, value) and check that value can be safely deposited (withdrawn) from account. The logic is correct only if every account appears in at most one pair. While this is fine for the current APIs, if the APIs change to allow multiple sink or multiple source accounts, then this code needs to be refactored or the uniqueness needs to be enforced elsewhere.\n\nImpact: Currently there is no impact since the current APIs only allow one account for the source and sink accounts.\n\nRecommendation: To be safe, we recommend adding an additional check in the validation step to ensure the accounts are distinct for both sources and sinks.\n\nDeveloper Response: The developers acknowledged the issue and plan to add a check during validation to ensure that the accounts are distinct.", + "description_summary": "Account checks in transaction validation assume unique accounts, which may lead to issues if APIs change to allow multiple accounts." + }, + { + "title": "Unstaked user may be selected as collator", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "parachain-staking/lib.rs", + "lines": null + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1.11 V-MANC-VUL-011: Unstaked user may be selected as collator\n\nSeverity: Low\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): parachain-staking/lib.rs\nLocation(s): select_top_candidates\n\nParachains use collators to combine transactions into blocks that are then checked by Validators on the relay chain. Notably, this allows collators to remain relatively untrusted as validators ensure blocks are created correctly. On Manta\u2019s chain, collators are selected from a group of staked users who receive rewards for creating blocks. Requiring that collators be staked provides additional security guarantees as, if a collator misbehaves (e.g., submits no blocks for validation, submits multiple conflicting blocks), governance can step in and slash the user\u2019s staked funds. As such, unstaked collators have less incentive to maintain parachain stability and should be avoided.\n\nHowever, in the collator selection process, if no sufficiently staked collator can be found, collators from the previous round will be selected. As there is no validation on the current state of the previous collators\u2019 stake, this could select unstaked validators who lack the incentive to ensure network stability.\n\nHere is a simple test case demonstrating this:\n\n#[test]\nfn test_failed_candidate_selection() {\n ExtBuilder::default()\n .with_balances(vec![(10, 10)])\n .with_candidates(vec![(10, 10)])\n .build()\n .execute_with(|| {\n roll_to(2);\n assert_ok!(ParachainStaking::schedule_leave_candidates(Origin::signed(10), 6u32));\n roll_to(5);\n let candidate: Vec = ParachainStaking::selected_candidates();\n assert_ne!(candidate[0], 10u64);\n });\n}\n\nImpact: Collators will not be incentivized to ensure network stability. Another set of partially staked or \u201ctrusted\u201d collators could provide better stability.\n\nRecommendation: Consider maintaining a set of \u201ctrusted\u201d collators to fall back on if no staked collators can be found.\n\nDeveloper Response: Acknowledged.", + "description_summary": "Unstaked users may be selected as collators if no sufficiently staked collators are available, reducing network stability." + }, + { + "title": "XCM instructions can charge 0 weight", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "runtime/calamari/src/weights/xcm/mod.rs", + "lines": null + }, + { + "file_path": "runtime/dolphin/src/weights/xcm/mod.rs", + "lines": null + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Bad Extrinsic Weight", + "description": "4.1.12 V-MANC-VUL-012: XCM instructions can charge 0 weight\n\nSeverity: Low\nType: Bad Extrinsic Weight\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): runtime/(calamari, dolphin)/src/weights/xcm/mod.rs\nLocation(s): Every use of weigh_multi_assets\n\nThe Polkadot ecosystem uses the XCM messaging standard to enable parachains and the relay chain to communicate with each other. For example, if a parachain P1 wants to deposit an asset onto another parachain P2, they can construct an XCM message stating they wish to deposit an asset into an account associated with P1 and send it to P2.\n\nEach XCM message consists of a sequence of low-level XCM instructions that get executed by the XCM executor on the destination parachain. To offset the cost of executing these instructions, parachains set weights for each instruction, so the sender of the XCM message is charged fees for the destination parachain executing their message.\n\nManta Chain configured the weights of multiple instructions so that senders could generate messages with a total weight of 0. For example, in the code snippet below, the function deposit_asset sets the weight for the XCM instruction deposit_asset based on a parameter called assets. When assets is an empty vector, the weight_multi_assets function returns 0, resulting in a total weight of 0 for the instruction:\n\nfn deposit_asset(\n assets: &MultiAssetFilter,\n _max_assets: &u32,\n _dest: &MultiLocation,\n) -> Weight {\n let hardcoded_weight: u64 = 1_000_000_000;\n let weight = assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset());\n cmp::min(hardcoded_weight, weight)\n}\n\nThis setup allows malicious or incompetent senders to spam Manta with messages costing 0, even though the instruction is successfully executed by the XCM executor. While a denial of service may be unlikely due to the fast execution of 0-length vectors, spam prevention is recommended. Adding a minimal base fee for instructions that can be executed with 0 weight would help prevent spam.\n\nImpact: Malicious users may spam Manta with XCM messages of weight 0, potentially slowing down blockchain performance and risking denial of service.\n\nRecommendation: We recommend always charging a base fee to prevent spam.\n\nDeveloper Response: The developers acknowledged the issue and plan to change weigh_multi_assets to charge the benchmarked weight of a single asset execution for any successfully executed multiasset XCM message.", + "description_summary": "XCM instructions can be executed with zero weight, allowing potential spam through 0-cost messages." + }, + { + "title": "Missing validation in set_units_per_second", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/asset-manager/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "4.1.13 V-MANC-VUL-013: Missing validation in set_units_per_second\n\nSeverity: Low\nType: Data Validation\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): set_units_per_second\n\nThe asset-manager pallet manages a hashmap called UnitsPerSecond which maps assetIds to a u128 value units_per_second, used to determine the price for an XCM transfer. It provides a function, set_units_per_second, to set the units_per_second for a given asset. This value determines the cost (in the corresponding asset) to purchase a certain weight for a transaction. The following code snippet calculates the amount:\n\nlet units_per_second = M::units_per_second(&asset_id).ok_or({\n log::debug!(\n target: \"FirstAssetTrader::buy_weight\",\n \"units_per_second missing for asset with id: {:?}\",\n id,\n );\n XcmError::TooExpensive\n})?;\n\nlet amount = units_per_second * (weight as u128) / (WEIGHT_PER_SECOND as u128);\n\nif amount.is_zero() {\n return Ok(payment);\n}\n\nThis calculation of amount uses multiplication, which can overflow. Currently, both units_per_second and amount are of type u128. If units_per_second exceeds u128::MAX / u64::MAX, a large weight (u64::MAX) can be bought for a small asset amount, allowing a malicious parachain to potentially perform a DoS attack.\n\nThere is currently no validation in set_units_per_second to ensure units_per_second is small enough. Although only the root can call set_units_per_second, there is a risk if the root user mistakenly sets an excessive value or is tricked into doing so.\n\nImpact: If units_per_second is set above u128::MAX / u64::MAX, large amounts of weight can be purchased at low cost, potentially enabling a DoS attack on the chain.\n\nRecommendation: Change the type of units_per_second to map assetId to a u64 value or validate that the amount is sufficiently small.\n\nDeveloper Response: The developers acknowledged the issue and plan to fix the weight calculation to use saturating arithmetic, which should address this vulnerability.", + "description_summary": "Missing validation in set_units_per_second allows potential overflow, enabling weight to be purchased at low cost, risking DoS." + }, + { + "title": "Collator is a single point of failure for a round", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": null, + "lines": null + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Consensus", + "description": "4.1.14 V-MANC-VUL-014: Collator is a single point of failure for a round\n\nSeverity: Low\nType: Consensus\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nThe Manta parachain uses the Aura consensus mechanism to select collators to author blocks. Aura selects a primary collator for a round, and only that collator is allowed to produce blocks in that round. However, if that collator goes down, then no blocks will be produced, making that collator a single point of failure.\n\nOther parachains, like Moonbeam, address this by selecting multiple collators for a given round.\n\nImpact: If a collator goes down, no blocks will be produced for that round, thereby impacting the transaction throughput of Manta.\n\nRecommendation: We recommend that Manta adopt a consensus mechanism that selects multiple collators, ideally geographically separated, so if one collator fails, the likelihood of others failing remains low.\n\nDeveloper Response: The developers are aware of this issue and have plans to move away from the Aura consensus mechanism.", + "description_summary": "Single collator selection creates a point of failure, risking block production halt if the collator goes down." + }, + { + "title": "Unchecked index calculation in spend_all", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-pay/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Warning", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1.15 V-MANC-VUL-015: Unchecked index calculation in spend_all\n\nSeverity: Warning\nType: Logic Error\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): spend_all\n\nThe spend_all function in the Manta-Pay pallet does the following:\n1. Adds the nullifier commitments in the TransactionPost to the NullifierCommitmentSet.\n2. Inserts each (nullifier, outgoingNote) pair into the NullifierSetInsertionOrder structure.\n3. Updates a global variable, NullifierSetSize, which stores the size of the nullifier commitment set.\n\nThe index where the pair gets inserted, along with the new nullifier size, is calculated as index + i, where i is the index of the corresponding SenderPost, and index is the current size of the set. However, this arithmetic is unchecked and could result in an overflow.\n\nImpact: When the size of the commitment set reaches u64::MAX, the index calculation may overflow, causing the pair to be inserted at the beginning of the list and setting the nullifier set size to 1. However, reaching this value through normal execution is extremely unlikely.\n\nRecommendation: Add an overflow check and return an error.\n\nDeveloper Response: TBD.", + "description_summary": "Unchecked index calculation in spend_all may cause overflow, leading to incorrect data insertion." + }, + { + "title": "Excess fees not refunded", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "parachain-staking/lib.rs", + "lines": null + } + ], + "reported_impact": "Warning", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Bad Extrinsic Weight", + "description": "4.1.16 V-MANC-VUL-016: Excess fees not refunded\n\nSeverity: Warning\nType: Bad Extrinsic Weight\nFile(s): parachain-staking/lib.rs\nLocation(s): (cancel_leave, execute_leave, schedule_leave, join)_candidates\nCommit: 45ba60e1d\nStatus: Intended Behavior\n\nWhen a substrate extrinsic is created, its weight must be carefully considered to ensure it correctly reflects the computational cost of the operation as extrinsic weight is directly related to the fees that are charged to the user. This weight should capture the maximum number of computational resources that will be consumed by the extrinsic as excess fees can be returned. In several functions, though, the weights are computed based on the value of an argument provided by the user which might not always reflect the true cost of the computation.\n\nFor example, consider the following:\n\n1#[pallet::call_index(11)]\n2#[pallet::weight(::WeightInfo::cancel_leave_candidates(*candidate_count))]\n3/// Cancel open request to leave candidates\n4/// - only callable by collator account\n5/// - result upon successful call is the candidate is active in the candidate pool\npub fn cancel_leave_candidates(\n7origin: OriginFor,\n8#[pallet::compact] candidate_count: u32,\n) -> DispatchResultWithPostInfo {\n...\n12let mut candidates = >::get();\n13ensure!(\n14candidates.0.len() as u32 <= candidate_count,\n15Error::::TooLowCandidateCountWeightHintCancelLeaveCandidates\n);\n...\n18Ok(().into())\n}\n\nIn this function, the weight is computed using the candidate_count argument, and in order for the function to execute successfully, candidate_count must be greater than or equal to the current size of the candidate pool. A user might need to call this function with a candidate_count that is larger than the size of the pool to prevent a front-running attack where a malicious user would add candidates to prevent the transaction from executing successfully. In such a case, the weight would be larger than necessary, but no fees are returned to the user.\n\nImpact: Such functions can charge unnecessary fees to the user.\n\nRecommendation: Refund the user additional fees that are not consumed.\n\nDeveloper Response: Since these extrinsics are likely to be executed sparingly and since the additional fees are likely to be small, we feel like the additional computational cost of determining the excess does not.", + "description_summary": "Extrinsics may charge unnecessary fees without refunding excess." + }, + { + "title": "Assets can be registered at unsupported locations", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/asset-manager/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Warning", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "4.1.17 V-MANC-VUL-017: Assets can be registered at unsupported locations\n\nSeverity: Warning\nType: Data Validation\nFile(s): pallets/asset-manager/src/lib.rs\nLocation(s): register_asset\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nThe asset-manager pallet allows assets to be registered, managed, and minted. In particular, register_asset takes as input an asset, location, and corresponding asset_metadata and registers the asset. Every asset must be associated with a location; however, Manta only supports assets from specific locations. The current implementation of asset-manager does not perform any validation on the locations passed into register_asset, potentially allowing assets to be registered from untested locations. The pallet also exposes a method called update_asset_location, which is supposed to update the location of an asset. It similarly does not perform any validation on the new location of the asset.\n\nImpact: The current implementation allows assets to be registered from untested locations.\n\nRecommendation: The asset-manager pallet already implements the Contains trait, which exposes a method contains that takes as input a location and returns true if and only if the location is supported. Currently, that method is unused and can be used to validate the locations passed in.\n\nDeveloper Response: The developers acknowledged the issue and plan to add a check in register_asset.", + "description_summary": "Assets can be registered without validating their location." + }, + { + "title": "Minimum delegator funds is not MinDelegatorStk", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "parachain-staking/lib.rs", + "lines": null + } + ], + "reported_impact": "Warning", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1.18 V-MANC-VUL-018: Minimum delegator funds is not MinDelegatorStk\n\nSeverity: Warning\nType: Logic Error\nFile(s): parachain-staking/lib.rs\nLocation(s): N/A\nCommit: 45ba60e1d\nStatus: Acknowledged\n\nIn the case where MinDelegation < MinDelegatorStk, it is possible for the delegator\u2019s staked funds to be less than MinDelegatorStk. This can occur through the following sequence of calls:\n\n1. delegate amount N from delegator D to candidate C1 where N >= MinDelegatorStk\n2. delegate amount M from delegator D to candidate C2 where M < MinDelegatorStk and M >= MinDelegation\n3. schedule_leave_candidates and execute_leave_candidates for C1\n\nThis results in D having M funds staked, where M < MinDelegatorStk.\n\nImpact: If MinDelegation is less than MinDelegatorStk, a delegator may end up with less than MinDelegatorStk funds actually staked.\n\nNote: This is not currently exploitable because MinDelegation == MinDelegatorStk in all production runtimes. However, if these values are adjusted in the future, this bug may become exploitable.\n\nRecommendation: There are two options:\n\n1. When starting a runtime, ensure that MinDelegation >= MinDelegatorStk\n2. Whenever a delegation is removed (such as in execute_leave_candidates), ensure that the remaining locked funds for the delegator are at least MinDelegatorStk.\n\nDeveloper Response: The developers acknowledged the issue and are considering removing MinDelegation as there is no apparent reason for it being different from MinDelegatorStk.", + "description_summary": "Delegators can have staked funds below the required MinDelegatorStk." + }, + { + "title": "Unintended test crashes", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "45ba60e1d940dbf3491ce0f1223e44c84d5b7218", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-pay/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Info", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Maintainability", + "description": "4.1.19 V-MANC-VUL-019: Unintended test crashes\n\nSeverity: Info\nType: Maintainability\nFile(s): pallets/manta-pay/src/lib.rs\nLocation(s): to_private_should_work\nCommit: 45ba60e1d\nStatus: Open\n\nMany of the manta-pay tests randomly generate an asset id, total supply, and an amount to make private. To ensure the total supply of the asset is greater than the minimum balance, the minimum balance is always added to the randomly generated total supply, as seen in this test:\n\nfn to_private_should_work() {\n let mut rng = OsRng;\n for _ in 0..RANDOMIZED_TESTS_ITERATIONS {\n new_test_ext().execute_with(|| {\n let asset_id = rng.gen();\n let total_free_supply = rng.gen();\n initialize_test(asset_id, total_free_supply + TEST_DEFAULT_ASSET_ED);\n mint_private_tokens(\n asset_id,\n &value_distribution(5, total_free_supply, &mut rng),\n &mut rng,\n );\n });\n }\n}\n\nIf the random number generator generates a value for the total_free_supply which is greater than u128::MAX - TEST_DEFAULT_ASSET_ED, then the test will fail even though it is expected to succeed.\n\nImpact: May cause tests to fail when they are expected to succeed.\n\nRecommendation: Change the test to generate a value for total_free_supply between [0, u128::MAX - TEST_DEFAULT_ASSET_ED).\n\nDeveloper Response: TBD", + "description_summary": "Randomized test may fail due to unintended overflow in total supply generation." + } + ] + }, + { + "audited_project_id": 8, + "project_name": "MantaNetwork", + "auditor": "Halborn", + "audit_link": "https://github.com/Manta-Network/Atlantic-Audits/blob/main/Atlantic-Halborn-zkSBT.pdf", + "findings": [ + { + "title": "Loss of Reserved SBT IDs", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": [376] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "4.1 (HAL-01) LOSS OF RESERVED SBT IDS - LOW (2.5)\\nDescription:\\nCode Location:\\nThe reserve_sbt function calculates a range of IDs and stores this range in the ReservedIds storage map, using the caller\u2019s address as the key. It was identified that users lose their reserved SBT IDs when they call the reserve_sbt function without first minting their previously reserved SBT IDs. This occurs because the previous reserved range is overwritten.\\nBody of the reserve_sbt function:\\nListing 1: pallets/manta-sbt/src/lib.rs (Line 376)\\n368 let asset_id_range : Vec < StandardAssetId > = (0.. T :: MintsPerReserve :: get () )\\n369 . map (| _ | Self :: next_sbt_id_and_increment () )\\n370 . collect :: < Result < Vec < StandardAssetId > , _ > >() ?;\\n\\n372 // The range of ` AssetIds ` that are reserved as SBTs\\n373 let start_id : StandardAssetId = * asset_id_range . first () . ok_or ( Error :: :: ZeroMints ) ?;\\n374 let stop_id : StandardAssetId = * asset_id_range . last () . ok_or ( Error :: :: ZeroMints ) ?;\\n375 ReservedIds :: :: insert (& who , ( start_id , stop_id ) ) ;\\nBVSS:\\nAO:A/AC:L/AX:L/C:N/I:N/A:N/D:L/Y:N/R:N/S:U (2.5)\\nProof Of Concept:\\nThis test reserves ids two times and mints one zkSBT. The first zkSBT token will not have the id 1, it will have the id 6 instead.\\nListing 2: pallets/manta-sbt/src/tests.rs\\n1 #[ test ]\\n2 fn hal01 () {\\n3 let mut rng = OsRng ;\\n4 new_test_ext () . execute_with (|| {\\n5 assert_ok! ( Balances :: set_balance (\\n6 MockOrigin :: root () ,\\n7 ALICE ,\\n8 1 _000_000_000_000_000 ,\\n9 0\\n10 )) ;\\n11 // Reserve IDs from 1 to 5\\n12 assert_ok! ( MantaSBTPallet :: reserve_sbt ( MockOrigin :: signed ( ALICE ) ) ) ;\\n13 // Reserve IDs from 6 to 10\\n14 assert_ok! ( MantaSBTPallet :: reserve_sbt ( MockOrigin :: signed ( ALICE ) ) ) ;\\n15\\n16 let value = 1;\\n17 let id = field_from_id ( ReservedIds :: < Test >:: get ( ALICE ) . unwrap () .0) ;\\n18 let post = sample_to_private ( id , value , & mut rng ) ;\\n19 assert_ok! ( MantaSBTPallet :: to_private (\\n20 MockOrigin :: signed ( ALICE ) ,\\n21 Box :: new ( post ) ,\\n22 bvec! [0]\\n23 )) ;\\n\\n25 // The first zkSBT minted has the id 6.\\n26 assert_eq! (\\n27 SbtMetadata :: < Test >:: get (6) . unwrap () . extra ,\\n28 Some ( bvec! [0])\\n29 );\\n30 });\\n31 }\\nRecommendation:\\nTo resolve this issue, it is recommended to restrict users from reserving additional SBT IDs if they have not minted their previously reserved IDs.", + "description_summary": "Users lose previously reserved SBT IDs if they reserve new ones without minting the old." + }, + { + "title": "Last SBT IDs Cannot Be Reserved", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": [369, 883] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "4.2 (HAL-02) LAST SBT IDS CANNOT BE RESERVED - LOW (2.5)\\nDescription:\\nWhen users invoke the reserve_sbt function, it reserves a specific number of IDs - quantified by MintPerReserve. The reserve_sbt function achieves this by repeatedly calling the next_sbt_id_and_increment function - as many times as the MintPerReserve value. This next_sbt_id_and_increment function serves to return the next available ID and concurrently increment the NextSbtId storage value by 1.\\nA potential problem arises if the incrementing process results in an overflow, causing the next_sbt_id_and_increment function to throw an overflow exception, which in turn fails the ongoing transaction. In this scenario, previously identified IDs that did not contribute to the overflow situation remain unreserved. This issue presents a concern as it could potentially lead to resource allocation inefficiencies and transaction failures.\\nCode Location:\\nBody of the reserve_sbt function, where the next zkSBT id is incremented.\\nListing 3: pallets/manta-sbt/src/lib.rs (Line 369)\\n356 pub fn reserve_sbt ( origin : OriginFor ) -> DispatchResult {\\nlet who = ensure_signed ( origin ) ?;\\n// Charges fee to reserve AssetIds\\n:: Currency :: transfer (\\n& who ,\\n& Self :: account_id () ,\\nT :: ReservePrice :: get () ,\\nExistenceRequirement :: KeepAlive ,\\n) ?;\\n// Reserves uniques AssetIds to be used later to mint SBTs\\n368 let asset_id_range : Vec < StandardAssetId > = (0.. T :: MintsPerReserve :: get () )\\n369 . map (| _ | Self :: next_sbt_id_and_increment () )\\n370 . collect :: < Result < Vec < StandardAssetId > , _ > >() ?;\\nnext_sbt_id_and_increment function will overflow if the max number for u128 is surpassed\\nListing 4: pallets/manta-sbt/src/lib.rs (Line 883)\\n875 fn next_sbt_id_and_increment () -> Result < StandardAssetId , DispatchError > {\\n876 NextSbtId :: :: try_mutate (| maybe_val | {\\n877 match maybe_val {\\n878 Some ( current ) = > {\\n879 let id = * current ;\\n880 * maybe_val = Some (\\n881 current\\n882 . checked_add ( One :: one () )\\n883 . ok_or ( ArithmeticError :: Overflow )? ,\\n884 );\\n885 Ok ( id )\\n886 }\\n887 // If storage is empty , starts at value of one ( Field cannot be zero )\\n888 None = > {\\n889 * maybe_val = Some (2) ;\\n890 Ok ( One :: one () )\\n891 }\\n892 }\\n893 })\\n894 }\\nBVSS:\\nAO:A/AC:L/AX:L/C:N/I:N/A:L/D:N/Y:N/R:N/S:U (2.5)\\nProof Of Concept:\\nNote: For this Proof of Concept (PoC), the codebase was modified such that the zkSBT IDs are now u8 instead of u128. This alteration reduces the time needed to demonstrate that the function fails in this edge-case scenario.\\nListing 5: pallets/manta-sbt/src/tests.rs\\n236 #[ test ]\\n237 fn hal02 () {\\n238 new_test_ext () . execute_with (|| {\\n239 assert_ok! ( Balances :: set_balance (\\n240 MockOrigin :: root () ,\\n241 ALICE ,\\n242 1 _000_000_000_000_000 ,\\n243 0\\n244 )) ;\\n245 for i in (1..51) {\\n246 assert_ok! ( MantaSBTPallet :: reserve_sbt_bis ( MockOrigin :: signed ( ALICE )) );\\n247 println! (\" First id : {} - Last id : {} \" , ReservedIdsBis :: < Test >:: get ( ALICE ) . unwrap () .0 , ReservedIdsBis :: < Test >:: get ( ALICE ) . unwrap () .1) ;\\n248 }\\n250 assert_noop! ( MantaSBTPallet :: reserve_sbt_bis ( MockOrigin :: signed ( ALICE )) , ArithmeticError :: Overflow );\\n251 });\\n252 }\\nIn this test, we reserve all available IDs, excluding the last five. Attempting to reserve the last ID will cause the StandardAssetId value to overflow, resulting in a failure.\\nRecommendation:\\nTo address this issue, it is recommended to implement a check to determine whether the value of the StandardAssetId has reached the maximum value for u128 can prevent overflow. This measure will stop the occurrence of an exception.", + "description_summary": "Overflow in next_sbt_id_and_increment causes unreserved IDs and transaction failure." + }, + { + "title": "Downcasting of 64-Bit Integer", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": [783] + }, + { + "file_path": "pallets/manta-support/src/manta_pay.rs", + "lines": [874, 1095, 1096] + }, + { + "file_path": "runtime/calamari/src/migrations/staking.rs", + "lines": [70] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "4.3 (HAL-03) DOWNCASTING OF 64-BIT INTEGER - LOW (2.5)\\nDescription:\\nIt was observed that in certain circumstances, usize values are cast to types such as u8 and u32. The usize data type in the Rust programming language represents a pointer-sized unsigned integer. The actual size of usize is dependent on the platform: it\u2019s 32 bits on a 32-bit platform and 64 bits on a 64-bit platform. Consequently, depending on the system, there could be a cast from an u64 to an u32. This implies that an attempt could be made to store a value larger than the maximum value that can be held in an u32, leading to unexpected consequences.\\nCode Location:\\nFINDINGS & TECH DETAILS\\nUsize is casted to u8:\\nListing 6: pallets/manta-sbt/src/lib.rs (Line 783)\\n768 fn pull_receivers (\\n769 receiver_indices : [ usize ; MerkleTreeConfiguration :: FOREST_WIDTH ],\\n770 max_update_request : u64 ,\\n771 ) -> ( bool , ReceiverChunk ) {\\n772 let mut more_receivers = false ;\\n773 let mut receivers = Vec :: new () ;\\n774 let mut receivers_pulled : u64 = 0;\\n775 let max_update = if max_update_request > Self :: PULL_MAX_RECEIVER_UPDATE_SIZE {\\n776 Self :: PULL_MAX_RECEIVER_UPDATE_SIZE\\n777 } else {\\n778 max_update_request\\n779 };\\n781 for ( shard_index , utxo_index ) in receiver_indices . into_iter () . enumerate () {\\n782 more_receivers |= Self :: pull_receivers_for_shard (\\n783 shard_index as u8 ,\\n784 utxo_index ,\\n785 max_update ,\\n786 & mut receivers ,\\n787 & mut receivers_pulled ,\\n788 );\\n790 if receivers_pulled == max_update && more_receivers {\\n791 break ;\\n792 }\\n794 ( more_receivers , receivers )\\n795 }\\nUsize is casted to u32:\\nListing 7: pallets/manta-support/src/manta_pay.rs (Line 860)\\n867 impl TryFrom < merkle_tree :: CurrentPath < MerkleTreeConfiguration > > for CurrentPath {\\n868 type Error = Error ;\\n871 fn try_from ( path : merkle_tree :: CurrentPath < MerkleTreeConfiguration >) -> Result < Self , Error > {\\n872 Ok ( Self {\\n873 sibling_digest : fp_encode ( path . sibling_digest )? ,\\n874 leaf_index : path . inner_path . leaf_index .0 as u32 ,\\n875 inner_path : path . inner_path . path . into_iter () . map ( fp_encode ) . collect :: < Result <_ , _ > >() ? ,\\n881 })\\n883 }\\nListing 8: pallets/manta-support/src/manta_pay.rs (Lines 1095,1096)\\n1091 impl From < RawCheckpoint > for Checkpoint {\\n1094 Self :: new ( checkpoint . receiver_index . map (| i| i as usize ) . into () , checkpoint . sender_index as usize ,)\\nListing 9: runtime/calamari/src/migrations/staking.rs (Line 70)\\n70 let n_of_candidates = manta_collator_selection :: Pallet :: :: candidates () . len () as u32 ;\\nRecommendation:\\nTo address this issue, it is recommended to check the value against the maximum value before casting.", + "description_summary": "Casting usize to smaller types may cause data loss and unexpected behavior." + }, + { + "title": "Unchecked Math Could Impact Weight Calculation", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "runtime/calamari/src/fee.rs", + "lines": [76] + }, + { + "file_path": "primitives/manta/src/xcm.rs", + "lines": [183, 251, 252] + }, + { + "file_path": "runtime/manta/src/fee.rs", + "lines": [47, 52] + }, + { + "file_path": "runtime/common/src/lib.rs", + "lines": [115] + }, + { + "file_path": "primitives/manta/src/constants.rs", + "lines": [110] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "4.4 (HAL-04) UNCHECKED MATH COULD IMPACT WEIGHT CALCULATION - LOW (2.5)\\nDescription:\\nIt was identified that several areas in the buy_weight and the refund_weight functions that could potentially benefit from enhanced computational checks. Currently, despite numerous instances of proven arithmetic calculations, the function does not have a mechanism to handle situations where underflow or overflow states might occur.\\nWhile these states haven\u2019t been identified as potential risks for exploitation, implementing additional safeguards to account for them will be beneficial.\\nAnother point of consideration pertains to the WEIGHT_PER_SECOND value. This value serves as a divisor in computing the number of tokens required for payment or refund during the weight purchasing procedure. While it is predetermined as a constant during the system\u2019s compilation, it currently lacks a constraint to assure that it never equals zero. This is a significant potential risk as it could result in a system panic if the value happens to be zero, causing a division by zero error. Moreover, as the WEIGHT_PER_SECOND value is also used in calculations elsewhere in the system, this issue could potentially affect other sections of the codebase as well.\\nCode Location:\\nUnsafe multiplication in the tests multiplier_growth_simulator_and_congestion_budget_test:\\nListing 10: runtime/calamari/src/fee.rs (Line 76)\\n69 #[ test ]\\n#[ ignore ] // This test should not fail CI\\n71 fn multiplier_growth_simulator_and_congestion_budget_test () {\\n72 let target_daily_congestion_cost_usd = 100 _000 ;\\n73 let kma_price = fetch_kma_price () . unwrap () ;\\n74 println! (\" KMA / USD price as read from CoinGecko = { kma_price } \") ;\\n75 let target_daily_congestion_cost_kma =\\n76 ( target_daily_congestion_cost_usd as f32 / kma_price * KMA as f32 ) as u128 ;\\nUnsafe multiplication in buy_weight function\\nListing 11: primitives/manta/src/xcm.rs (Line 183)\\n146 fn buy_weight (& mut self , weight : Weight , payment : Assets ) -> Result < Assets > {\\n153 let first_asset = payment . fungible_assets_iter () . next () . ok_or ({\\n160 XcmError :: TooExpensive }) ?;\\n183 let amount = units_per_second * ( weight as u128 ) / ( WEIGHT_PER_SECOND as u128 ) ;\\nUnsafe subtraction in refund_weight function\\nListing 12: primitives/manta/src/xcm.rs (Line 251)\\n248 fn refund_weight (& mut self , weight : Weight ) -> Option < MultiAsset > {\\n251 self . weight -= weight ;\\n252 let amount = * units_per_second * ( weight as u128 ) / ( WEIGHT_PER_SECOND as u128 ) ;\\nPlaces where WEIGHT_PER_SECOND is used as a divisor:\\n\u2022 Function refund_weight\\nListing 13: primitives/manta/src/xcm.rs (Line 252)\\n\u2022 Function buy_weight:\\nListing 14: primitives/manta/src/xcm.rs (Line 183)\\nThe following snippets show how the q divisor is calculated and how it\u2019s equal to zero if WEIGHT_PER_SECOND is zero too.\\nListing 15: runtime/manta/src/fee.rs (Lines 47,52)\\nRecommendation:\\nWe recommend a review of these identified areas to ensure that adequate arithmetic checks are in place and a safety constraint is set for WEIGHT_PER_SECOND to prevent it from reaching zero. These improvements will further fortify the system, ensuring stability, reliability, and secure operation.\\n\u2022 It is recommended to add a constraint to ensure that WEIGHT_PER_SECOND is never 0.\\n\u2022 In \u201crelease\u201d mode, Rust does not panic! due to overflows and overflowed values simply \u201cwrap\u201d without any explicit feedback to the user. It is recommended to use vetted safe math libraries for arithmetic operations consistently throughout the smart contract system. Consider replacing the multiplication operator with Rust\u2019s checked_mul method, the subtraction operator with Rust\u2019s checked_subs method, and so on.", + "description_summary": "Unchecked math in weight calculations may lead to overflow, underflow, or division by zero errors." + } + ] + }, + { + "audited_project_id": 9, + "project_name": "MantaNetwork", + "auditor": "Veridise", + "audit_link": "https://github.com/Manta-Network/Atlantic-Audits/blob/main/Atlantic-Veridise-zkSBT.pdf", + "findings": [ + { + "title": "SBT reservations can be overwritten", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": "be7f1c7a8b7d35a84f657854032b2bf3b7e01ab8", + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Medium", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1 Detailed Description of Issues\n\nV-MSBT-VUL-001: SBT reservations can be overwritten\n\nSeverity: Medium\nType: Logic Error\nCommit: ceb9e46\nStatus: Fixed\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): reserve_sbt\n\nThe manta-sbt pallet exposes a method reserve_sbt whereby callers pay MANTA tokens to reserve the right to mint N SBTs. This reservation is enforced by allocating reservation ids to the caller of the method so that when users mint an SBT, Manta Chain uses one of the reserved ids to track the number reserved. In particular, Manta Chain maintains a map called ReservedIds which maps users to an interval of reservation ids such that the length of the interval indicates the number of SBTs they can mint.\n\nHowever, reserve_sbt does not check whether a user has already reserved SBTs to mint when calling the method and simply updates the mapping to a new interval of length N. This can be seen in the method implementation below:\n\n/// Reserves AssetIds to be used subsequently in 'to_private' above.\n\n/// Increments AssetManager\u2019s AssetId counter.\n\n#[pallet::call_index(1)]\n#[pallet::weight(::WeightInfo::reserve_sbt())]\n#[transactional]\npub fn reserve_sbt(origin: OriginFor) -> DispatchResult {\n let who = ensure_signed(origin)?;\n\n // Charges fee to reserve AssetIds\n ::Currency::transfer(\n &who,\n &Self::account_id(),\n T::ReservePrice::get(),\n ExistenceRequirement::KeepAlive,\n )?;\n\n // Reserves unique AssetIds to be used later to mint SBTs\n let asset_id_range: Vec = (0..T::MintsPerReserve::get())\n .map(|_| Self::next_sbt_id_and_increment())\n .collect::, _>>()?;\n\n // The range of 'AssetIds' that are reserved as SBTs\n let start_id: StandardAssetId = *asset_id_range.first().ok_or(Error::::ZeroMints)?;\n let stop_id: StandardAssetId = *asset_id_range.last().ok_or(Error::::ZeroMints)?;\n\n ReservedIds::::insert(&who, (start_id, stop_id));\n Self::deposit_event(Event::::SBTReserved { who, start_id, stop_id });\n Ok(())\n}\n\nThus, if a user calls the method M times to reserve M*N SBTs, then they will only be able to mint N.\n\nImpact: Users can lose money because they may think they have reserved M*N SBTs when they in fact can only mint N. Furthermore, if a user calls reserve_sbt M times in a row, then M*(N-1) SBTs can no longer be minted. This is due to the fact that reserved ids are always incremented.\n\nFor example, suppose someone sets up a relayer account which users could use to purchase zkSBTs for themselves without being linked to the transaction. That relayer account would only receive SBTs on its last call reserve_sbt. Implementations in which the relayer reserves and sends the SBTs separately may be error-prone.\n\nRecommendation: If at most N SBTs can be reserved at a time for a user, then reserve_sbt should check whether a user has already reserved SBTs. If the protocol permits more than N SBTs to be reserved at a time for a user, then the mapping should be changed. One option might be to map each user to a set of intervals corresponding to ids reserved for them.\n\nDeveloper Response: The developers have acknowledged the issue and a fix has been proposed in this pull request. The fix changes the function to revert if the user has SBT ids already reserved.", + "description_summary": "SBT reservations can be overwritten, causing users to lose reserved tokens." + }, + { + "title": "Extrinsics charge static fees that do not account for Merkle tree updates", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": "f0ed5957c3bc87b4a4a2631f26aceefb9607f069", + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Bad Extrinsic Weight", + "description": "4.1.2 V-MSBT-VUL-002: Extrinsics charge static fees that do not account for Merkle tree updates\n\nSeverity: Low\nType: Bad Extrinsic Weight\nCommit: ceb9e46\nStatus: Fixed\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): to_private, mint_sbt_eth\n\nTransactions to_private and mint_sbt_eth take membership proofs and store UTXOs to a Merkle tree on the ledger. manta-sbt shards this Merkle tree into 256 buckets where each bucket has its own Merkle tree. Instead of storing the entire tree at each bucket, the Ledger just stores the last path added to the tree. When adding a UTXO, the Ledger first computes its corresponding bucket, then computes the new path pointing to that UTXO, and finally adds that path to the bucket.\n\nComputing the new path should take time proportional to log(n) where n is the size of the Merkle Tree. The current benchmarking scheme only covers cases where the previous path is small i.e, at most size 1. However, if the number of transactions gets large i.e, is on the order of hundreds of millions or billions, then the size of the path can get to 24-28 (taking shards into account). If the tree grows to this size, this means each execution of the extrinsic will perform 24-28 hashes, multiplied by the number of UTXOs to be added.\n\nThe benchmarking scheme should take into account the size of the tree to make sure that the existing weights are enough to offset the computation of the new Merkle tree path.\n\nImpact: In general, it is important to set the weights to account for both computation and storage; setting the weight too low can allow users to perform a large number of transactions with little cost. In particular, malicious users may take advantage of the low fee to launch a DOS attack.\n\nRecommendation: We recommend that the weights be computed with a larger database that reflects the state of the chain after a year\u2019s worth of use.\n\nDeveloper Response: In progress open PR for manta-pay right now that will be extended to pallet SBT. The manta-pay PR can be found here.", + "description_summary": "Static fees for extrinsics do not account for Merkle tree updates, risking underpriced transactions." + }, + { + "title": "Missing validation in pull_ledger_diff", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Warning", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "4.1.3 V-MSBT-VUL-003: Missing validation in pull_ledger_diff\n\nSeverity: Warning\nType: Data Validation\nCommit: ceb9e46\nStatus: Acknowledged\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): pull_ledger_diff\n\npull_ledger_diff takes as input a Checkpoint which is a struct of two fields receiver_index and sender_index and pulls receiver data from the ledger starting at receiver_index up till at most receiver_index + PULL_MAX_RECEIVER_UPDATE_SIZE. However, there is no check that this sum cannot overflow for both the sender and receiver index in pull_receivers and pull_receivers_for_shard.\n\nImpact: If the code is compiled without the --release flag then a malicious user could crash the node by passing in bad values. If it is built with --release then the call will be reported as successful and no senders or receivers will be returned. However, if a benign end user is calling the API with incorrect indexes it might be better to return an error informing them that the index is invalid.\n\nRecommendation: We recommend adding bounds checks to be safe and to return an Error.\n\nDeveloper Response: The developers acknowledged the issue and will fix this prior to release.", + "description_summary": "Missing bounds check in pull_ledger_diff could allow overflow and node crash." + }, + { + "title": "Off-by-one error in to_private", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Warning", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Logic Error", + "description": "4.1.4 V-MSBT-VUL-004: Off-by-one error in to_private\n\nSeverity: Warning\nType: Logic Error\nCommit: ceb9e46\nStatus: Acknowledged\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): to_private\n\nThe manta-sbt pallet has a user-callable extrinsic, to_private, which allows users to mint reserved SBT tokens. To facilitate minting tokens, the pallet maintains a mapping from users to an interval [l, u] where l \u2264 u and l and u refer to minimum and maximum asset ids that can be minted by the user. The function does two things. First, it mints an asset with id l and then updates the interval to [l + 1, u]. If l + 1 > u, it removes the user from the map since this indicates they don\u2019t have any more user ids to reserve.\n\nThere is an edge case where l = u = 2^128 - 1 where this function\u2019s behavior is incorrect. In this case, the extrinsic will revert because l + 1 would result in an overflow as it is of type u128. However, this prevents the user from minting a token reserved for them.\n\nCurrently this should not be a problem because it is unlikely that all 2^128 ids will get reserved anytime soon.\n\nImpact: A user may not be able to mint an SBT token reserved for them.\n\nRecommendation: We recommend adding an edge case for when l = 2^128-1. In that case, the token should be minted and user removed from the map.\n\nDeveloper Response: Not to be resolved. What the auditors point out is correct that the last id will not get minted; however, the resolution is unnecessary as once we hit this edge case the entire zkSBT protocol is unable to function. Furthermore, this value is extremely large and unlikely to ever be reached.", + "description_summary": "Off-by-one error in to_private may prevent users from minting reserved tokens." + }, + { + "title": "Unnecessary Storage Variable", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": "fa10f39afcd8882d14252f55ce08ffcab6321911", + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": [382] + } + ], + "reported_impact": "Warning", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Gas Optimization", + "description": "4.1.5 V-MSBT-VUL-005: Unnecessary Storage Variable\n\nSeverity: Warning\nType: Gas Optimization\nCommit: ceb9e46\nStatus: Fixed\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): 382\n\nThe storage variable UtxoAccumulatorOutputs keeps track of all Merkle roots generated by manta-sbt; however, this variable is unnecessary. It seems to be taken from the manta-pay pallet which uses it to keep track of previous Merkle roots so that when users provide membership proofs for asset transfers, the ledger can check that the root provided was a legitimate one.\n\nHowever, the tokens in manta-sbt are non-transferable so there isn\u2019t any logic which should use it. In particular, the only two places which use this storage variable are in the functions has_matching_utxo_accumulator_output and register_all. The former will never be invoked in manta-sbt because the asset is non-transferable. The latter just adds a newly created Merkle root to UtxoAccumulatorOutputs.\n\nSince this storage variable keeps track of every root generated, it is a non-trivial amount of storage to keep on-chain. Moreover, every minting transaction, namely to_private and mint_sbt_eth, will incur an extra cost of writing to storage, which is unnecessary.\n\nImpact: Unnecessarily high transaction fees for every mint transaction and bloated storage.\n\nRecommendation: We recommend removing this storage variable.\n\nDeveloper Response: This has been fixed in the following commit.", + "description_summary": "Unnecessary storage variable increases transaction fees and storage usage." + }, + { + "title": "Missing validation when setting mint info", + "repository": "https://github.com/Manta-Network/Manta", + "audited_commit": "ceb9e46cd53b77eb914ba6c17452fc238bc3a28f", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "pallets/manta-sbt/src/lib.rs", + "lines": null + } + ], + "reported_impact": "Info", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": "Data Validation", + "description": "4.1.6 V-MSBT-VUL-006: Missing validation when setting mint info\n\nSeverity: Info\nType: Data Validation\nCommit: ceb9e46\nStatus: Acknowledged\nFile(s): pallets/manta-sbt/src/lib.rs\nLocation(s): new_mint_info, update_mint_info\n\nWhen setting metadata for a mint type, the functions new_mint_info and update_mint_info set the start_time and end_time associated with the mint type. The functions validate the start and end times by making sure start_time < end_time. However, they don\u2019t check whether those make sense with respect to the current time. In particular, it seems like you would want now < end_time.\n\nImpact: Setting end_time < now means that nobody can mint for that mint type until it gets changed again.\n\nRecommendation: We recommend adding the additional validation which shouldn\u2019t be too expensive.\n\nDeveloper Response: This is intended behavior for now. The only way to pause a mint is to set end_time < now. This however could be improved by using an Option<> and remove the need for this invariant. This is low priority, but we could fix it in the future.", + "description_summary": "Missing validation allows minting periods to be set in the past, blocking minting." + } + ] + }, + { + "audited_project_id": 10, + "project_name": "Astar", + "auditor": "Security Research Labs", + "audit_link": "https://github.com/polkadot-assurance-legion/pal-docs/blob/main/audits/24h1/astar-srl-2403.pdf", + "findings": [ + { + "title": "Missing benchmarking for the lockdrop precompile dispatch", + "repository": "https://github.com/AstarNetwork/Astar", + "audited_commit": "282485aa2d50f12f42463bba1d393fce4c57c2a3", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "precompiles/dispatch-lockdrop/src/lib.rs", + "lines": [ + { + "from": 88, + "to": 90 + } + ] + } + ], + "reported_impact": "Info", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.2.1 Missing benchmarking for the lockdrop precompile dispatch\nAttack scenario\nLocation\nTracking\nAttack impact\nSeverity\nStatus\nStatic ref_time used for weight calculation leading to underestimation\nof the weights can enable an attacker to perform denial of service\nprecompiles/dispatch-lockdrop\n[2]\nAn attacker may spam and conduct denial of service attacks cheaply in\ncomparison to the actual weight_to_gas price\nInfo\nClosed [4]\nThe precompile dispatch-lockdrop has un-benchmarked weight_to_gas estimation with\nref_time configured to 1_000_000_000 [5].\n// Record a fixed amount of weight to ensure there is no free execution\nhandle.record_cost(Runtime::GasWeightMapping::weight_to_gas(\n Weight::from_parts(1_000_000_000u64, 0),\n))?;\nThis estimation doesn\u2019t reflect the actual runtime environment and can aid an attacker to spam\nthe chain.\nWe suggest to appropriately benchmark the precompile dispatch to reflect the accurate\nweight_to_gas estimation for ref_time and POV_size.\nThe issue was acknowledged by the Astar team and remediation is currently in progress\nthrough collaboration with the Frontier team [6]", + "description_summary": "Underestimated weight_to_gas estimation due to missing benchmarking allows attackers to perform denial of service attacks." + }, + { + "title": "Unbounded call length limit in lockdrop dispatch call", + "repository": "https://github.com/AstarNetwork/Astar", + "audited_commit": "282485aa2d50f12f42463bba1d393fce4c57c2a3", + "reported_remediated_commit": null, + "location": [ + { + "file_path": "precompiles/dispatch-lockdrop/src/lib.rs", + "lines": [74, 104] + } + ], + "reported_impact": "Low", + "reported_likelihood": null, + "cwe_classification": null, + "vulnerability_class_audit": null, + "description": "3.2.2 Unbounded call length limit in lockdrop dispatch call\nAttack scenario\nLocation\nTracking\nAttack impact\nSeverity\nStatus\nAn attacker may create multiple nested calls bloating the call_length\nbefore call decoding\nprecompiles/dispatch-lockdrop\n[3]\nUnbounded call length can aid an attacker to cause heap overflow when\ncall data is moved to the vector\nLow\nClosed [7]\nThe precompile dispatch call uses the UnboundedBytes [8] type without any call_length being\nset for the call object parameter. During runtime-call decoding, a stack overflow is prevented\nthrough usage of DecodeLimit [9] however, a heap overflow might occur even before decoding\nif an unbounded call with large call_length is moved into the u8 vector.\nAn attacker can use this to create multiple nested calls bloating the call_length and cause heap\noverflow even before call decoding.\nWe recommend using BoundedBytes instead of UnboundedBytes. A best practice\nimplementation from Moonbeam may be adopted for setting CallLengthLimit similar to\nGetProposalLimit [10] and implementing additional guard condition for call_length validity\nsimilar to proposal_length [11] before decoding the call for additional safety.", + "description_summary": "Unbounded call length in dispatch call allows attackers to cause heap overflow and perform denial of service attacks." + } + ] + }, + { + "audited_project_id": 11, + "project_name": "Astar", + "auditor": "Zellic", + "audit_link": "https://github.com/polkadot-assurance-legion/pal-docs/blob/main/audits/24h1/astar-zellic-2401.pdf", + "findings": [ + { + "title": "Weight calculation", + "repository": "https://github.com/AstarNetwork/Astar", + "audited_commit": "fc14b13401e1fb5e7391715fc76a308204173802", + "reported_remediated_commit": null, + "location": null, + "reported_impact": "Low", + "reported_likelihood": "Low", + "cwe_classification": null, + "vulnerability_class_audit": "Coding Mistakes", + "description": "All the assets pallet functions exposed by the chain extension are weighted with a constant amount. The weight of some operations is charged using the same quantity that the assets pallet benchmarks have computed. However, other operations only charge the weight of one runtime database read operation \u2014 T::DbWeight::get().reads(1_u64).\nTwo functions, MetadataSymbol and MetadataName, operate on a variable amount of data, but they also only account for one runtime database read operation.", + "description_summary": "Constant weight for functions, ignores variable data in MetadataSymbol & MetadataName." + } + ] + } +] From ec81f1e7e2612dc2f5271c83533cb9cb39215174 Mon Sep 17 00:00:00 2001 From: arturoBeccar Date: Fri, 1 Nov 2024 19:56:33 -0300 Subject: [PATCH 3/4] Add missing descriptions --- audited-projects/4-Pendulum/findings-4-Pendulum.json | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/audited-projects/4-Pendulum/findings-4-Pendulum.json b/audited-projects/4-Pendulum/findings-4-Pendulum.json index 4f9ad54..509ffba 100644 --- a/audited-projects/4-Pendulum/findings-4-Pendulum.json +++ b/audited-projects/4-Pendulum/findings-4-Pendulum.json @@ -112,7 +112,9 @@ "reported_impact": null, "reported_likelihood": null, "cwe_classification": null, - "vulnerability_class_audit": "Linters" + "vulnerability_class_audit": "Linters", + "description": "Linter Warnings\nThe codebase has generated several warnings when analyzed using cargo clippy, indicating areas where improvements can be made to enhance the code quality.\n\nID: PDM-002\nScope: Linters\nStatus: Acknowledged\n\nDescription:\nDuring the static analysis process using cargo clippy, the following warnings within the scope of the audit were identified:\n- //rust-lang.github.io/rust-clippy/master/index.html#needless_return\">needless_return\n- //rust-lang.github.io/rust-clippy/master/index.html#needless_borrow\">needless_borrow\n\nLocation:\npallets/orml-currencies-allowance-extension/src/lib.rs:245-246:\n&owner,\n&destination,\n\nRecommendation:\nTo maintain a high-quality and easily maintainable codebase, it is advised to address the warnings generated by cargo clippy. Resolving these linter warnings will improve the overall code quality, facilitate troubleshooting, and potentially enhance performance and security within the project.", + "description_summary": "Codebase has linter warnings that impact maintainability and should be resolved to enhance quality and security." }, { "title": "Logging in Runtime", @@ -186,7 +188,9 @@ "reported_impact": null, "reported_likelihood": null, "cwe_classification": null, - "vulnerability_class_audit": "Code Quality / Testing" + "vulnerability_class_audit": "Code Quality / Testing", + "description": "Test Coverage\nThe current test coverage of the orml-currencies-allowance-extension pallet is insufficient, with only 22.22% coverage. Moreover, the implementation of ChainExtension in the runtime lacks any test coverage.\n\nID: PDM-003\nScope: Code Quality / Testing\nStatus: Acknowledged\n\nDescription:\nTo evaluate the test coverage, we recommend using the cargo tarpaulin command:\ncargo tarpaulin --out Html --output-dir ./tarpaulin-report\n\nRunning this command generates an HTML file that provides detailed coverage information for all packages. Specifically, it highlights the orml-currencies-allowance-extension pallet, which currently has a coverage of only 22.22%. Additionally, it reveals the absence of tests for the methods utilized in the runtime to implement ChainExtension, including is_allowed_currency, allowance, do_approve_transfer, and do_transfer_approved, as well as the lack of testing for the overall ChainExtension implementation in the foucoco runtime.\n\nRecommendation:\nTo address the low test coverage in the orml-currencies-allowance-extension pallet and the lack of test coverage in the runtime implementation of ChainExtension, it is essential to develop a comprehensive test suite. This suite should include thorough testing to ensure the security, stability, and maintainability of the project.\n\nImplementing continuous integration (CI) systems to automate the execution of the test suite is highly recommended. This practice enables the team to identify areas with insufficient test coverage, detect regressions, and uncover areas that require improvement. By incorporating CI into the development workflow, valuable feedback is obtained, ultimately enhancing the overall quality of the codebase.", + "description_summary": "Insufficient test coverage in key areas affects security, stability, and maintainability." }, { "title": "Vulnerable and Unmaintained Dependencies", From c15e61edabbd6675bc44a6f068b9012ba2e49edc Mon Sep 17 00:00:00 2001 From: arturoBeccar Date: Fri, 1 Nov 2024 20:11:37 -0300 Subject: [PATCH 4/4] Update findings.json --- dataset/findings.json | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/dataset/findings.json b/dataset/findings.json index d0d3555..fd390fc 100644 --- a/dataset/findings.json +++ b/dataset/findings.json @@ -592,7 +592,9 @@ "reported_impact": null, "reported_likelihood": null, "cwe_classification": null, - "vulnerability_class_audit": "Linters" + "vulnerability_class_audit": "Linters", + "description": "Linter Warnings\nThe codebase has generated several warnings when analyzed using cargo clippy, indicating areas where improvements can be made to enhance the code quality.\n\nID: PDM-002\nScope: Linters\nStatus: Acknowledged\n\nDescription:\nDuring the static analysis process using cargo clippy, the following warnings within the scope of the audit were identified:\n- //rust-lang.github.io/rust-clippy/master/index.html#needless_return\">needless_return\n- //rust-lang.github.io/rust-clippy/master/index.html#needless_borrow\">needless_borrow\n\nLocation:\npallets/orml-currencies-allowance-extension/src/lib.rs:245-246:\n&owner,\n&destination,\n\nRecommendation:\nTo maintain a high-quality and easily maintainable codebase, it is advised to address the warnings generated by cargo clippy. Resolving these linter warnings will improve the overall code quality, facilitate troubleshooting, and potentially enhance performance and security within the project.", + "description_summary": "Codebase has linter warnings that impact maintainability and should be resolved to enhance quality and security." }, { "title": "Logging in Runtime", @@ -666,7 +668,9 @@ "reported_impact": null, "reported_likelihood": null, "cwe_classification": null, - "vulnerability_class_audit": "Code Quality / Testing" + "vulnerability_class_audit": "Code Quality / Testing", + "description": "Test Coverage\nThe current test coverage of the orml-currencies-allowance-extension pallet is insufficient, with only 22.22% coverage. Moreover, the implementation of ChainExtension in the runtime lacks any test coverage.\n\nID: PDM-003\nScope: Code Quality / Testing\nStatus: Acknowledged\n\nDescription:\nTo evaluate the test coverage, we recommend using the cargo tarpaulin command:\ncargo tarpaulin --out Html --output-dir ./tarpaulin-report\n\nRunning this command generates an HTML file that provides detailed coverage information for all packages. Specifically, it highlights the orml-currencies-allowance-extension pallet, which currently has a coverage of only 22.22%. Additionally, it reveals the absence of tests for the methods utilized in the runtime to implement ChainExtension, including is_allowed_currency, allowance, do_approve_transfer, and do_transfer_approved, as well as the lack of testing for the overall ChainExtension implementation in the foucoco runtime.\n\nRecommendation:\nTo address the low test coverage in the orml-currencies-allowance-extension pallet and the lack of test coverage in the runtime implementation of ChainExtension, it is essential to develop a comprehensive test suite. This suite should include thorough testing to ensure the security, stability, and maintainability of the project.\n\nImplementing continuous integration (CI) systems to automate the execution of the test suite is highly recommended. This practice enables the team to identify areas with insufficient test coverage, detect regressions, and uncover areas that require improvement. By incorporating CI into the development workflow, valuable feedback is obtained, ultimately enhancing the overall quality of the codebase.", + "description_summary": "Insufficient test coverage in key areas affects security, stability, and maintainability." }, { "title": "Vulnerable and Unmaintained Dependencies",