Skip to content

Commit

Permalink
Merge branch 'master' into donal-coretime-assignments-mitigation
Browse files Browse the repository at this point in the history
  • Loading branch information
seadanda authored Oct 16, 2024
2 parents 1db4e32 + 4cd7e86 commit 4e0babd
Show file tree
Hide file tree
Showing 15 changed files with 519 additions and 259 deletions.
1 change: 0 additions & 1 deletion .github/workflows/check-cargo-check-runtimes.yml
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ jobs:
- check-runtime-coretime
- check-runtime-bridge-hubs
- check-runtime-contracts
- check-runtime-starters
- check-runtime-testing
if: always() && !cancelled()
steps:
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/check-labels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ on:
jobs:
check-labels:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Check labels
env:
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/check-links.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ permissions:
jobs:
link-checker:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Restore lychee cache
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v3.3.2 (7. Sep 2023)
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/check-prdoc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ env:
jobs:
check-prdoc:
runs-on: ubuntu-latest
timeout-minutes: 10
if: github.event.pull_request.number != ''
steps:
- name: Checkout repo
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/check-semver.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ jobs:
uses: ./.github/workflows/reusable-preflight.yml
check-semver:
runs-on: ubuntu-latest
timeout-minutes: 90
needs: [preflight]
container:
image: ${{ needs.preflight.outputs.IMAGE }}
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/command-backport.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on:
# This trigger can be problematic, see: https://securitylab.github.com/resources/github-actions-preventing-pwn-requests/
# In our case it is fine since we only run it on merged Pull Requests and do not execute any of the repo code itself.
pull_request_target:
types: [ closed, labeled ]
types: [closed, labeled]

permissions:
contents: write # so it can comment
Expand Down Expand Up @@ -66,7 +66,7 @@ jobs:
with:
script: |
const pullNumbers = '${{ steps.backport.outputs.created_pull_numbers }}'.split(' ');
for (const pullNumber of pullNumbers) {
await github.rest.issues.addLabels({
issue_number: parseInt(pullNumber),
Expand All @@ -84,7 +84,7 @@ jobs:
script: |
const pullNumbers = '${{ steps.backport.outputs.created_pull_numbers }}'.split(' ');
const reviewer = '${{ github.event.pull_request.user.login }}';
for (const pullNumber of pullNumbers) {
await github.pulls.createReviewRequest({
owner: context.repo.owner,
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ jobs:

test-doc:
runs-on: ${{ needs.preflight.outputs.RUNNER }}
timeout-minutes: 60
needs: [preflight]
container:
image: ${{ needs.preflight.outputs.IMAGE }}
Expand All @@ -29,6 +30,7 @@ jobs:

build-rustdoc:
runs-on: ${{ needs.preflight.outputs.RUNNER }}
timeout-minutes: 40
if: ${{ needs.preflight.outputs.changes_rust }}
needs: [preflight]
container:
Expand Down
16 changes: 8 additions & 8 deletions .github/workflows/fork-sync-action.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# This Workflow is not supposed to run in the paritytech/polkadot-sdk repo.
# This Workflow is supposed to run only in the forks of the repo,
# This Workflow is supposed to run only in the forks of the repo,
# paritytech-release/polkadot-sdk specifically,
# to automatically maintain the critical fork synced with the upstream.
# This Workflow should be always disabled in the paritytech/polkadot-sdk repo.
Expand All @@ -11,10 +11,10 @@ on:
workflow_dispatch:

jobs:
job_sync_branches:
uses: paritytech-release/sync-workflows/.github/workflows/sync-with-upstream.yml@latest
with:
fork_writer_app_id: ${{ vars.UPSTREAM_CONTENT_SYNC_APP_ID}}
fork_owner: ${{ vars.RELEASE_ORG}}
secrets:
fork_writer_app_key: ${{ secrets.UPSTREAM_CONTENT_SYNC_APP_KEY }}
job_sync_branches:
uses: paritytech-release/sync-workflows/.github/workflows/sync-with-upstream.yml@latest
with:
fork_writer_app_id: ${{ vars.UPSTREAM_CONTENT_SYNC_APP_ID}}
fork_owner: ${{ vars.RELEASE_ORG}}
secrets:
fork_writer_app_key: ${{ secrets.UPSTREAM_CONTENT_SYNC_APP_KEY }}
7 changes: 6 additions & 1 deletion .github/workflows/tests-misc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,11 @@ jobs:
- name: script
id: compare
run: |
if [ "${{ github.ref_name }}" = "master" ]; then
echo -e "Exiting on master branch"
exit 0
fi
docker run --rm \
-v $PWD/artifacts/master:/artifacts/master \
-v $PWD/artifacts/current:/artifacts/current \
Expand Down Expand Up @@ -299,7 +304,7 @@ jobs:
# name: hfuzz-${{ github.sha }}

cargo-check-each-crate:
timeout-minutes: 140
timeout-minutes: 70
needs: [preflight]
runs-on: ${{ needs.preflight.outputs.RUNNER }}
if: ${{ needs.preflight.outputs.changes_rust }}
Expand Down
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -1016,7 +1016,7 @@ people-rococo-runtime = { path = "cumulus/parachains/runtimes/people/people-roco
people-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend" }
people-westend-runtime = { path = "cumulus/parachains/runtimes/people/people-westend" }
pin-project = { version = "1.1.3" }
platforms = { version = "3.0" }
platforms = { version = "3.4" }
polkadot-approval-distribution = { path = "polkadot/node/network/approval-distribution", default-features = false }
polkadot-availability-bitfield-distribution = { path = "polkadot/node/network/bitfield-distribution", default-features = false }
polkadot-availability-distribution = { path = "polkadot/node/network/availability-distribution", default-features = false }
Expand Down
13 changes: 13 additions & 0 deletions prdoc/pr_6025.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json

title: Refactor staking pallet benchmarks to `v2`

doc:
- audience: Runtime Dev
description: |
Update benchmarks in staking pallet to the second version of the `frame_benchmarking` runtime benchmarking framework.

crates:
- name: pallet-staking
bump: patch
10 changes: 10 additions & 0 deletions prdoc/pr_6045.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
title: '[pallet-revive] ensure the return data is reset if no frame was instantiated'

doc:
- audience:
- Runtime Dev
description: Failed call frames do not produce new return data but still reset it.

crates:
- name: pallet-revive
bump: patch
82 changes: 80 additions & 2 deletions substrate/frame/revive/src/exec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1331,14 +1331,18 @@ where
// is caught by it.
self.top_frame_mut().allows_reentry = allows_reentry;

let dest = T::AddressMapper::to_account_id(dest);
let value = value.try_into().map_err(|_| Error::<T>::BalanceConversionFailed)?;
// We reset the return data now, so it is cleared out even if no new frame was executed.
// This is for example the case for balance transfers or when creating the frame fails.
*self.last_frame_output_mut() = Default::default();

let try_call = || {
let dest = T::AddressMapper::to_account_id(dest);
if !self.allows_reentry(&dest) {
return Err(<Error<T>>::ReentranceDenied.into());
}

let value = value.try_into().map_err(|_| Error::<T>::BalanceConversionFailed)?;

// We ignore instantiate frames in our search for a cached contract.
// Otherwise it would be possible to recursively call a contract from its own
// constructor: We disallow calling not fully constructed contracts.
Expand Down Expand Up @@ -1378,6 +1382,10 @@ where
}

fn delegate_call(&mut self, code_hash: H256, input_data: Vec<u8>) -> Result<(), ExecError> {
// We reset the return data now, so it is cleared out even if no new frame was executed.
// This is for example the case for unknown code hashes or creating the frame fails.
*self.last_frame_output_mut() = Default::default();

let executable = E::from_storage(code_hash, self.gas_meter_mut())?;
let top_frame = self.top_frame_mut();
let contract_info = top_frame.contract_info().clone();
Expand Down Expand Up @@ -1406,6 +1414,10 @@ where
input_data: Vec<u8>,
salt: Option<&[u8; 32]>,
) -> Result<H160, ExecError> {
// We reset the return data now, so it is cleared out even if no new frame was executed.
// This is for example the case when creating the frame fails.
*self.last_frame_output_mut() = Default::default();

let executable = E::from_storage(code_hash, self.gas_meter_mut())?;
let sender = &self.top_frame().account_id;
let executable = self.push_frame(
Expand Down Expand Up @@ -4340,6 +4352,72 @@ mod tests {
});
}

#[test]
fn last_frame_output_is_always_reset() {
let code_bob = MockLoader::insert(Call, |ctx, _| {
let invalid_code_hash = H256::from_low_u64_le(u64::MAX);
let output_revert = || ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1] };

// A value of u256::MAX to fail the call on the first condition.
*ctx.ext.last_frame_output_mut() = output_revert();
assert_eq!(
ctx.ext.call(
Weight::zero(),
U256::zero(),
&H160::zero(),
U256::max_value(),
vec![],
true,
false,
),
Err(Error::<Test>::BalanceConversionFailed.into())
);
assert_eq!(ctx.ext.last_frame_output(), &Default::default());

// An unknown code hash to fail the delegate_call on the first condition.
*ctx.ext.last_frame_output_mut() = output_revert();
assert_eq!(
ctx.ext.delegate_call(invalid_code_hash, Default::default()),
Err(Error::<Test>::CodeNotFound.into())
);
assert_eq!(ctx.ext.last_frame_output(), &Default::default());

// An unknown code hash to fail instantiation on the first condition.
*ctx.ext.last_frame_output_mut() = output_revert();
assert_eq!(
ctx.ext.instantiate(
Weight::zero(),
U256::zero(),
invalid_code_hash,
U256::zero(),
vec![],
None,
),
Err(Error::<Test>::CodeNotFound.into())
);
assert_eq!(ctx.ext.last_frame_output(), &Default::default());

exec_success()
});

ExtBuilder::default().build().execute_with(|| {
place_contract(&BOB, code_bob);
let origin = Origin::from_account_id(ALICE);
let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();

let result = MockStack::run_call(
origin,
BOB_ADDR,
&mut GasMeter::<Test>::new(GAS_LIMIT),
&mut storage_meter,
0,
vec![],
None,
);
assert_matches!(result, Ok(_));
});
}

#[test]
fn immutable_data_access_checks_work() {
let dummy_ch = MockLoader::insert(Constructor, move |ctx, _| {
Expand Down
Loading

0 comments on commit 4e0babd

Please sign in to comment.