1
// This file is part of Substrate.
2

            
3
// Copyright (C) Parity Technologies (UK) Ltd.
4
// SPDX-License-Identifier: Apache-2.0
5

            
6
// Licensed under the Apache License, Version 2.0 (the "License");
7
// you may not use this file except in compliance with the License.
8
// You may obtain a copy of the License at
9
//
10
// 	http://www.apache.org/licenses/LICENSE-2.0
11
//
12
// Unless required by applicable law or agreed to in writing, software
13
// distributed under the License is distributed on an "AS IS" BASIS,
14
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
// See the License for the specific language governing permissions and
16
// limitations under the License.
17

            
18
//! > Made with *Substrate*, for *Polkadot*.
19
//!
20
//! [![github]](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/scheduler) -
21
//! [![polkadot]](https://polkadot.network)
22
//!
23
//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white
24
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
25
//!
26
//! # Scheduler Pallet
27
//!
28
//! A Pallet for scheduling runtime calls.
29
//!
30
//! ## Overview
31
//!
32
//! This Pallet exposes capabilities for scheduling runtime calls to occur at a specified block
33
//! number or at a specified period. These scheduled runtime calls may be named or anonymous and may
34
//! be canceled.
35
//!
36
//! __NOTE:__ Instead of using the filter contained in the origin to call `fn schedule`, scheduled
37
//! runtime calls will be dispatched with the default filter for the origin: namely
38
//! `frame_system::Config::BaseCallFilter` for all origin types (except root which will get no
39
//! filter).
40
//!
41
//! If a call is scheduled using proxy or whatever mechanism which adds filter, then those filter
42
//! will not be used when dispatching the schedule runtime call.
43
//!
44
//! ### Examples
45
//!
46
//! 1. Scheduling a runtime call at a specific block.
47
#![doc = docify::embed!("src/tests.rs", basic_scheduling_works)]
48
//!
49
//! 2. Scheduling a preimage hash of a runtime call at a specific block
50
#![doc = docify::embed!("src/tests.rs", scheduling_with_preimages_works)]
51

            
52
//!
53
//! ## Pallet API
54
//!
55
//! See the [`pallet`] module for more information about the interfaces this pallet exposes,
56
//! including its configuration trait, dispatchables, storage items, events and errors.
57
//!
58
//! ## Warning
59
//!
60
//! This Pallet executes all scheduled runtime calls in the [`on_initialize`] hook. Do not execute
61
//! any runtime calls which should not be considered mandatory.
62
//!
63
//! Please be aware that any scheduled runtime calls executed in a future block may __fail__ or may
64
//! result in __undefined behavior__ since the runtime could have upgraded between the time of
65
//! scheduling and execution. For example, the runtime upgrade could have:
66
//!
67
//! * Modified the implementation of the runtime call (runtime specification upgrade).
68
//!     * Could lead to undefined behavior.
69
//! * Removed or changed the ordering/index of the runtime call.
70
//!     * Could fail due to the runtime call index not being part of the `Call`.
71
//!     * Could lead to undefined behavior, such as executing another runtime call with the same
72
//!       index.
73
//!
74
//! [`on_initialize`]: frame_support::traits::Hooks::on_initialize
75

            
76
// Ensure we're `no_std` when compiling for Wasm.
77
#![cfg_attr(not(feature = "std"), no_std)]
78

            
79
#[cfg(feature = "runtime-benchmarks")]
80
mod benchmarking;
81
pub mod migration;
82
#[cfg(test)]
83
mod mock;
84
#[cfg(test)]
85
mod tests;
86
pub mod weights;
87

            
88
extern crate alloc;
89

            
90
use alloc::{boxed::Box, vec::Vec};
91
use codec::{Decode, Encode, MaxEncodedLen};
92
use core::{borrow::Borrow, cmp::Ordering, marker::PhantomData};
93
use frame_support::{
94
	dispatch::{DispatchResult, GetDispatchInfo, Parameter, RawOrigin},
95
	ensure,
96
	traits::{
97
		schedule::{self, DispatchTime, MaybeHashed},
98
		Bounded, CallerTrait, EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess,
99
		PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage,
100
	},
101
	weights::{Weight, WeightMeter},
102
};
103
use frame_system::{
104
	pallet_prelude::BlockNumberFor,
105
	{self as system},
106
};
107
use scale_info::TypeInfo;
108
use sp_io::hashing::blake2_256;
109
use sp_runtime::{
110
	traits::{BadOrigin, Dispatchable, One, Saturating, Zero},
111
	BoundedVec, DispatchError, RuntimeDebug,
112
};
113

            
114
pub use pallet::*;
115
pub use weights::WeightInfo;
116

            
117
/// Just a simple index for naming period tasks.
118
pub type PeriodicIndex = u32;
119
/// The location of a scheduled task that can be used to remove it.
120
pub type TaskAddress<BlockNumber> = (BlockNumber, u32);
121

            
122
pub type CallOrHashOf<T> =
123
	MaybeHashed<<T as Config>::RuntimeCall, <T as frame_system::Config>::Hash>;
124

            
125
pub type BoundedCallOf<T> =
126
	Bounded<<T as Config>::RuntimeCall, <T as frame_system::Config>::Hashing>;
127

            
128
/// The configuration of the retry mechanism for a given task along with its current state.
129
#[derive(Clone, Copy, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
130
pub struct RetryConfig<Period> {
131
	/// Initial amount of retries allowed.
132
	total_retries: u8,
133
	/// Amount of retries left.
134
	remaining: u8,
135
	/// Period of time between retry attempts.
136
	period: Period,
137
}
138

            
139
#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))]
140
#[derive(Clone, RuntimeDebug, Encode, Decode)]
141
struct ScheduledV1<Call, BlockNumber> {
142
	maybe_id: Option<Vec<u8>>,
143
	priority: schedule::Priority,
144
	call: Call,
145
	maybe_periodic: Option<schedule::Period<BlockNumber>>,
146
}
147

            
148
/// Information regarding an item to be executed in the future.
149
#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))]
150
#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)]
151
pub struct Scheduled<Name, Call, BlockNumber, PalletsOrigin, AccountId> {
152
	/// The unique identity for this task, if there is one.
153
	maybe_id: Option<Name>,
154
	/// This task's priority.
155
	priority: schedule::Priority,
156
	/// The call to be dispatched.
157
	call: Call,
158
	/// If the call is periodic, then this points to the information concerning that.
159
	maybe_periodic: Option<schedule::Period<BlockNumber>>,
160
	/// The origin with which to dispatch the call.
161
	origin: PalletsOrigin,
162
	_phantom: PhantomData<AccountId>,
163
}
164

            
165
impl<Name, Call, BlockNumber, PalletsOrigin, AccountId>
166
	Scheduled<Name, Call, BlockNumber, PalletsOrigin, AccountId>
167
where
168
	Call: Clone,
169
	PalletsOrigin: Clone,
170
{
171
	/// Create a new task to be used for retry attempts of the original one. The cloned task will
172
	/// have the same `priority`, `call` and `origin`, but will always be non-periodic and unnamed.
173
	pub fn as_retry(&self) -> Self {
174
		Self {
175
			maybe_id: None,
176
			priority: self.priority,
177
			call: self.call.clone(),
178
			maybe_periodic: None,
179
			origin: self.origin.clone(),
180
			_phantom: Default::default(),
181
		}
182
	}
183
}
184

            
185
use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2};
186

            
187
pub type ScheduledV2Of<T> = ScheduledV2<
188
	Vec<u8>,
189
	<T as Config>::RuntimeCall,
190
	BlockNumberFor<T>,
191
	<T as Config>::PalletsOrigin,
192
	<T as frame_system::Config>::AccountId,
193
>;
194

            
195
pub type ScheduledV3Of<T> = ScheduledV3<
196
	Vec<u8>,
197
	CallOrHashOf<T>,
198
	BlockNumberFor<T>,
199
	<T as Config>::PalletsOrigin,
200
	<T as frame_system::Config>::AccountId,
201
>;
202

            
203
pub type ScheduledOf<T> = Scheduled<
204
	TaskName,
205
	BoundedCallOf<T>,
206
	BlockNumberFor<T>,
207
	<T as Config>::PalletsOrigin,
208
	<T as frame_system::Config>::AccountId,
209
>;
210

            
211
pub(crate) trait MarginalWeightInfo: WeightInfo {
212
	fn service_task(maybe_lookup_len: Option<usize>, named: bool, periodic: bool) -> Weight {
213
		let base = Self::service_task_base();
214
		let mut total = match maybe_lookup_len {
215
			None => base,
216
			Some(l) => Self::service_task_fetched(l as u32),
217
		};
218
		if named {
219
			total.saturating_accrue(Self::service_task_named().saturating_sub(base));
220
		}
221
		if periodic {
222
			total.saturating_accrue(Self::service_task_periodic().saturating_sub(base));
223
		}
224
		total
225
	}
226
}
227
impl<T: WeightInfo> MarginalWeightInfo for T {}
228

            
229
1176
#[frame_support::pallet]
230
pub mod pallet {
231
	use super::*;
232
	use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*};
233
	use frame_system::pallet_prelude::*;
234

            
235
	/// The in-code storage version.
236
	const STORAGE_VERSION: StorageVersion = StorageVersion::new(4);
237

            
238
1698
	#[pallet::pallet]
239
	#[pallet::storage_version(STORAGE_VERSION)]
240
	pub struct Pallet<T>(_);
241

            
242
	/// `system::Config` should always be included in our implied traits.
243
	#[pallet::config]
244
	pub trait Config: frame_system::Config {
245
		/// The overarching event type.
246
		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
247

            
248
		/// The aggregated origin which the dispatch will take.
249
		type RuntimeOrigin: OriginTrait<PalletsOrigin = Self::PalletsOrigin>
250
			+ From<Self::PalletsOrigin>
251
			+ IsType<<Self as system::Config>::RuntimeOrigin>;
252

            
253
		/// The caller origin, overarching type of all pallets origins.
254
		type PalletsOrigin: From<system::RawOrigin<Self::AccountId>>
255
			+ CallerTrait<Self::AccountId>
256
			+ MaxEncodedLen;
257

            
258
		/// The aggregated call type.
259
		type RuntimeCall: Parameter
260
			+ Dispatchable<
261
				RuntimeOrigin = <Self as Config>::RuntimeOrigin,
262
				PostInfo = PostDispatchInfo,
263
			> + GetDispatchInfo
264
			+ From<system::Call<Self>>;
265

            
266
		/// The maximum weight that may be scheduled per block for any dispatchables.
267
		#[pallet::constant]
268
		type MaximumWeight: Get<Weight>;
269

            
270
		/// Required origin to schedule or cancel calls.
271
		type ScheduleOrigin: EnsureOrigin<<Self as system::Config>::RuntimeOrigin>;
272

            
273
		/// Compare the privileges of origins.
274
		///
275
		/// This will be used when canceling a task, to ensure that the origin that tries
276
		/// to cancel has greater or equal privileges as the origin that created the scheduled task.
277
		///
278
		/// For simplicity the [`EqualPrivilegeOnly`](frame_support::traits::EqualPrivilegeOnly) can
279
		/// be used. This will only check if two given origins are equal.
280
		type OriginPrivilegeCmp: PrivilegeCmp<Self::PalletsOrigin>;
281

            
282
		/// The maximum number of scheduled calls in the queue for a single block.
283
		///
284
		/// NOTE:
285
		/// + Dependent pallets' benchmarks might require a higher limit for the setting. Set a
286
		/// higher limit under `runtime-benchmarks` feature.
287
		#[pallet::constant]
288
		type MaxScheduledPerBlock: Get<u32>;
289

            
290
		/// Weight information for extrinsics in this pallet.
291
		type WeightInfo: WeightInfo;
292

            
293
		/// The preimage provider with which we look up call hashes to get the call.
294
		type Preimages: QueryPreimage<H = Self::Hashing> + StorePreimage;
295
	}
296

            
297
389526
	#[pallet::storage]
298
	pub type IncompleteSince<T: Config> = StorageValue<_, BlockNumberFor<T>>;
299

            
300
	/// Items to be executed, indexed by the block number that they should be executed on.
301
391662
	#[pallet::storage]
302
	pub type Agenda<T: Config> = StorageMap<
303
		_,
304
		Twox64Concat,
305
		BlockNumberFor<T>,
306
		BoundedVec<Option<ScheduledOf<T>>, T::MaxScheduledPerBlock>,
307
		ValueQuery,
308
	>;
309

            
310
	/// Retry configurations for items to be executed, indexed by task address.
311
54
	#[pallet::storage]
312
	pub type Retries<T: Config> = StorageMap<
313
		_,
314
		Blake2_128Concat,
315
		TaskAddress<BlockNumberFor<T>>,
316
		RetryConfig<BlockNumberFor<T>>,
317
		OptionQuery,
318
	>;
319

            
320
	/// Lookup from a name to the block number and index of the task.
321
	///
322
	/// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4
323
	/// identities.
324
	#[pallet::storage]
325
	pub(crate) type Lookup<T: Config> =
326
		StorageMap<_, Twox64Concat, TaskName, TaskAddress<BlockNumberFor<T>>>;
327

            
328
	/// Events type.
329
	#[pallet::event]
330
1044
	#[pallet::generate_deposit(pub(super) fn deposit_event)]
331
	pub enum Event<T: Config> {
332
		/// Scheduled some task.
333
		Scheduled { when: BlockNumberFor<T>, index: u32 },
334
		/// Canceled some task.
335
		Canceled { when: BlockNumberFor<T>, index: u32 },
336
		/// Dispatched some task.
337
		Dispatched {
338
			task: TaskAddress<BlockNumberFor<T>>,
339
			id: Option<TaskName>,
340
			result: DispatchResult,
341
		},
342
		/// Set a retry configuration for some task.
343
		RetrySet {
344
			task: TaskAddress<BlockNumberFor<T>>,
345
			id: Option<TaskName>,
346
			period: BlockNumberFor<T>,
347
			retries: u8,
348
		},
349
		/// Cancel a retry configuration for some task.
350
		RetryCancelled { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
351
		/// The call for the provided hash was not found so the task has been aborted.
352
		CallUnavailable { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
353
		/// The given task was unable to be renewed since the agenda is full at that block.
354
		PeriodicFailed { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
355
		/// The given task was unable to be retried since the agenda is full at that block or there
356
		/// was not enough weight to reschedule it.
357
		RetryFailed { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
358
		/// The given task can never be executed since it is overweight.
359
		PermanentlyOverweight { task: TaskAddress<BlockNumberFor<T>>, id: Option<TaskName> },
360
	}
361

            
362
	#[pallet::error]
363
	pub enum Error<T> {
364
		/// Failed to schedule a call
365
		FailedToSchedule,
366
		/// Cannot find the scheduled call.
367
		NotFound,
368
		/// Given target block number is in the past.
369
		TargetBlockNumberInPast,
370
		/// Reschedule failed because it does not change scheduled time.
371
		RescheduleNoChange,
372
		/// Attempt to use a non-named function on a named task.
373
		Named,
374
	}
375

            
376
554367
	#[pallet::hooks]
377
	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
378
		/// Execute the scheduled calls
379
194763
		fn on_initialize(now: BlockNumberFor<T>) -> Weight {
380
194763
			let mut weight_counter = WeightMeter::with_limit(T::MaximumWeight::get());
381
194763
			Self::service_agendas(&mut weight_counter, now, u32::max_value());
382
194763
			weight_counter.consumed()
383
194763
		}
384
	}
385

            
386
8043
	#[pallet::call]
387
	impl<T: Config> Pallet<T> {
388
		/// Anonymously schedule a task.
389
		#[pallet::call_index(0)]
390
		#[pallet::weight(<T as Config>::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))]
391
		pub fn schedule(
392
			origin: OriginFor<T>,
393
			when: BlockNumberFor<T>,
394
			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
395
			priority: schedule::Priority,
396
			call: Box<<T as Config>::RuntimeCall>,
397
405
		) -> DispatchResult {
398
405
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
399
			let origin = <T as Config>::RuntimeOrigin::from(origin);
400
			Self::do_schedule(
401
				DispatchTime::At(when),
402
				maybe_periodic,
403
				priority,
404
				origin.caller().clone(),
405
				T::Preimages::bound(*call)?,
406
			)?;
407
			Ok(())
408
		}
409

            
410
		/// Cancel an anonymously scheduled task.
411
		#[pallet::call_index(1)]
412
		#[pallet::weight(<T as Config>::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))]
413
90
		pub fn cancel(origin: OriginFor<T>, when: BlockNumberFor<T>, index: u32) -> DispatchResult {
414
90
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
415
			let origin = <T as Config>::RuntimeOrigin::from(origin);
416
			Self::do_cancel(Some(origin.caller().clone()), (when, index))?;
417
			Ok(())
418
		}
419

            
420
		/// Schedule a named task.
421
		#[pallet::call_index(2)]
422
		#[pallet::weight(<T as Config>::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))]
423
		pub fn schedule_named(
424
			origin: OriginFor<T>,
425
			id: TaskName,
426
			when: BlockNumberFor<T>,
427
			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
428
			priority: schedule::Priority,
429
			call: Box<<T as Config>::RuntimeCall>,
430
129
		) -> DispatchResult {
431
129
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
432
			let origin = <T as Config>::RuntimeOrigin::from(origin);
433
			Self::do_schedule_named(
434
				id,
435
				DispatchTime::At(when),
436
				maybe_periodic,
437
				priority,
438
				origin.caller().clone(),
439
				T::Preimages::bound(*call)?,
440
			)?;
441
			Ok(())
442
		}
443

            
444
		/// Cancel a named scheduled task.
445
		#[pallet::call_index(3)]
446
		#[pallet::weight(<T as Config>::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))]
447
18
		pub fn cancel_named(origin: OriginFor<T>, id: TaskName) -> DispatchResult {
448
18
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
449
			let origin = <T as Config>::RuntimeOrigin::from(origin);
450
			Self::do_cancel_named(Some(origin.caller().clone()), id)?;
451
			Ok(())
452
		}
453

            
454
		/// Anonymously schedule a task after a delay.
455
		#[pallet::call_index(4)]
456
		#[pallet::weight(<T as Config>::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))]
457
		pub fn schedule_after(
458
			origin: OriginFor<T>,
459
			after: BlockNumberFor<T>,
460
			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
461
			priority: schedule::Priority,
462
			call: Box<<T as Config>::RuntimeCall>,
463
78
		) -> DispatchResult {
464
78
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
465
			let origin = <T as Config>::RuntimeOrigin::from(origin);
466
			Self::do_schedule(
467
				DispatchTime::After(after),
468
				maybe_periodic,
469
				priority,
470
				origin.caller().clone(),
471
				T::Preimages::bound(*call)?,
472
			)?;
473
			Ok(())
474
		}
475

            
476
		/// Schedule a named task after a delay.
477
		#[pallet::call_index(5)]
478
		#[pallet::weight(<T as Config>::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))]
479
		pub fn schedule_named_after(
480
			origin: OriginFor<T>,
481
			id: TaskName,
482
			after: BlockNumberFor<T>,
483
			maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
484
			priority: schedule::Priority,
485
			call: Box<<T as Config>::RuntimeCall>,
486
69
		) -> DispatchResult {
487
69
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
488
			let origin = <T as Config>::RuntimeOrigin::from(origin);
489
			Self::do_schedule_named(
490
				id,
491
				DispatchTime::After(after),
492
				maybe_periodic,
493
				priority,
494
				origin.caller().clone(),
495
				T::Preimages::bound(*call)?,
496
			)?;
497
			Ok(())
498
		}
499

            
500
		/// Set a retry configuration for a task so that, in case its scheduled run fails, it will
501
		/// be retried after `period` blocks, for a total amount of `retries` retries or until it
502
		/// succeeds.
503
		///
504
		/// Tasks which need to be scheduled for a retry are still subject to weight metering and
505
		/// agenda space, same as a regular task. If a periodic task fails, it will be scheduled
506
		/// normally while the task is retrying.
507
		///
508
		/// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic
509
		/// clones of the original task. Their retry configuration will be derived from the
510
		/// original task's configuration, but will have a lower value for `remaining` than the
511
		/// original `total_retries`.
512
		#[pallet::call_index(6)]
513
		#[pallet::weight(<T as Config>::WeightInfo::set_retry())]
514
		pub fn set_retry(
515
			origin: OriginFor<T>,
516
			task: TaskAddress<BlockNumberFor<T>>,
517
			retries: u8,
518
			period: BlockNumberFor<T>,
519
201
		) -> DispatchResult {
520
201
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
521
			let origin = <T as Config>::RuntimeOrigin::from(origin);
522
			let (when, index) = task;
523
			let agenda = Agenda::<T>::get(when);
524
			let scheduled = agenda
525
				.get(index as usize)
526
				.and_then(Option::as_ref)
527
				.ok_or(Error::<T>::NotFound)?;
528
			Self::ensure_privilege(origin.caller(), &scheduled.origin)?;
529
			Retries::<T>::insert(
530
				(when, index),
531
				RetryConfig { total_retries: retries, remaining: retries, period },
532
			);
533
			Self::deposit_event(Event::RetrySet { task, id: None, period, retries });
534
			Ok(())
535
		}
536

            
537
		/// Set a retry configuration for a named task so that, in case its scheduled run fails, it
538
		/// will be retried after `period` blocks, for a total amount of `retries` retries or until
539
		/// it succeeds.
540
		///
541
		/// Tasks which need to be scheduled for a retry are still subject to weight metering and
542
		/// agenda space, same as a regular task. If a periodic task fails, it will be scheduled
543
		/// normally while the task is retrying.
544
		///
545
		/// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic
546
		/// clones of the original task. Their retry configuration will be derived from the
547
		/// original task's configuration, but will have a lower value for `remaining` than the
548
		/// original `total_retries`.
549
		#[pallet::call_index(7)]
550
		#[pallet::weight(<T as Config>::WeightInfo::set_retry_named())]
551
		pub fn set_retry_named(
552
			origin: OriginFor<T>,
553
			id: TaskName,
554
			retries: u8,
555
			period: BlockNumberFor<T>,
556
105
		) -> DispatchResult {
557
105
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
558
			let origin = <T as Config>::RuntimeOrigin::from(origin);
559
			let (when, agenda_index) = Lookup::<T>::get(&id).ok_or(Error::<T>::NotFound)?;
560
			let agenda = Agenda::<T>::get(when);
561
			let scheduled = agenda
562
				.get(agenda_index as usize)
563
				.and_then(Option::as_ref)
564
				.ok_or(Error::<T>::NotFound)?;
565
			Self::ensure_privilege(origin.caller(), &scheduled.origin)?;
566
			Retries::<T>::insert(
567
				(when, agenda_index),
568
				RetryConfig { total_retries: retries, remaining: retries, period },
569
			);
570
			Self::deposit_event(Event::RetrySet {
571
				task: (when, agenda_index),
572
				id: Some(id),
573
				period,
574
				retries,
575
			});
576
			Ok(())
577
		}
578

            
579
		/// Removes the retry configuration of a task.
580
		#[pallet::call_index(8)]
581
		#[pallet::weight(<T as Config>::WeightInfo::cancel_retry())]
582
		pub fn cancel_retry(
583
			origin: OriginFor<T>,
584
			task: TaskAddress<BlockNumberFor<T>>,
585
54
		) -> DispatchResult {
586
54
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
587
			let origin = <T as Config>::RuntimeOrigin::from(origin);
588
			Self::do_cancel_retry(origin.caller(), task)?;
589
			Self::deposit_event(Event::RetryCancelled { task, id: None });
590
			Ok(())
591
		}
592

            
593
		/// Cancel the retry configuration of a named task.
594
		#[pallet::call_index(9)]
595
		#[pallet::weight(<T as Config>::WeightInfo::cancel_retry_named())]
596
21
		pub fn cancel_retry_named(origin: OriginFor<T>, id: TaskName) -> DispatchResult {
597
21
			T::ScheduleOrigin::ensure_origin(origin.clone())?;
598
			let origin = <T as Config>::RuntimeOrigin::from(origin);
599
			let task = Lookup::<T>::get(&id).ok_or(Error::<T>::NotFound)?;
600
			Self::do_cancel_retry(origin.caller(), task)?;
601
			Self::deposit_event(Event::RetryCancelled { task, id: Some(id) });
602
			Ok(())
603
		}
604
	}
605
}
606

            
607
impl<T: Config> Pallet<T> {
608
	/// Migrate storage format from V1 to V4.
609
	///
610
	/// Returns the weight consumed by this migration.
611
	pub fn migrate_v1_to_v4() -> Weight {
612
		use migration::v1 as old;
613
		let mut weight = T::DbWeight::get().reads_writes(1, 1);
614

            
615
		// Delete all undecodable values.
616
		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
617
		let keys = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
618
		for key in keys {
619
			weight.saturating_accrue(T::DbWeight::get().reads(1));
620
			if let Err(_) = old::Agenda::<T>::try_get(&key) {
621
				weight.saturating_accrue(T::DbWeight::get().writes(1));
622
				old::Agenda::<T>::remove(&key);
623
				log::warn!("Deleted undecodable agenda");
624
			}
625
		}
626

            
627
		Agenda::<T>::translate::<
628
			Vec<Option<ScheduledV1<<T as Config>::RuntimeCall, BlockNumberFor<T>>>>,
629
			_,
630
		>(|_, agenda| {
631
			Some(BoundedVec::truncate_from(
632
				agenda
633
					.into_iter()
634
					.map(|schedule| {
635
						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
636

            
637
						schedule.and_then(|schedule| {
638
							if let Some(id) = schedule.maybe_id.as_ref() {
639
								let name = blake2_256(id);
640
								if let Some(item) = old::Lookup::<T>::take(id) {
641
									Lookup::<T>::insert(name, item);
642
								}
643
								weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
644
							}
645

            
646
							let call = T::Preimages::bound(schedule.call).ok()?;
647

            
648
							if call.lookup_needed() {
649
								weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1));
650
							}
651

            
652
							Some(Scheduled {
653
								maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
654
								priority: schedule.priority,
655
								call,
656
								maybe_periodic: schedule.maybe_periodic,
657
								origin: system::RawOrigin::Root.into(),
658
								_phantom: Default::default(),
659
							})
660
						})
661
					})
662
					.collect::<Vec<_>>(),
663
			))
664
		});
665

            
666
		#[allow(deprecated)]
667
		frame_support::storage::migration::remove_storage_prefix(
668
			Self::name().as_bytes(),
669
			b"StorageVersion",
670
			&[],
671
		);
672

            
673
		StorageVersion::new(4).put::<Self>();
674

            
675
		weight + T::DbWeight::get().writes(2)
676
	}
677

            
678
	/// Migrate storage format from V2 to V4.
679
	///
680
	/// Returns the weight consumed by this migration.
681
	pub fn migrate_v2_to_v4() -> Weight {
682
		use migration::v2 as old;
683
		let mut weight = T::DbWeight::get().reads_writes(1, 1);
684

            
685
		// Delete all undecodable values.
686
		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
687
		let keys = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
688
		for key in keys {
689
			weight.saturating_accrue(T::DbWeight::get().reads(1));
690
			if let Err(_) = old::Agenda::<T>::try_get(&key) {
691
				weight.saturating_accrue(T::DbWeight::get().writes(1));
692
				old::Agenda::<T>::remove(&key);
693
				log::warn!("Deleted undecodable agenda");
694
			}
695
		}
696

            
697
		Agenda::<T>::translate::<Vec<Option<ScheduledV2Of<T>>>, _>(|_, agenda| {
698
			Some(BoundedVec::truncate_from(
699
				agenda
700
					.into_iter()
701
					.map(|schedule| {
702
						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
703
						schedule.and_then(|schedule| {
704
							if let Some(id) = schedule.maybe_id.as_ref() {
705
								let name = blake2_256(id);
706
								if let Some(item) = old::Lookup::<T>::take(id) {
707
									Lookup::<T>::insert(name, item);
708
								}
709
								weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
710
							}
711

            
712
							let call = T::Preimages::bound(schedule.call).ok()?;
713
							if call.lookup_needed() {
714
								weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1));
715
							}
716

            
717
							Some(Scheduled {
718
								maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
719
								priority: schedule.priority,
720
								call,
721
								maybe_periodic: schedule.maybe_periodic,
722
								origin: schedule.origin,
723
								_phantom: Default::default(),
724
							})
725
						})
726
					})
727
					.collect::<Vec<_>>(),
728
			))
729
		});
730

            
731
		#[allow(deprecated)]
732
		frame_support::storage::migration::remove_storage_prefix(
733
			Self::name().as_bytes(),
734
			b"StorageVersion",
735
			&[],
736
		);
737

            
738
		StorageVersion::new(4).put::<Self>();
739

            
740
		weight + T::DbWeight::get().writes(2)
741
	}
742

            
743
	/// Migrate storage format from V3 to V4.
744
	///
745
	/// Returns the weight consumed by this migration.
746
	#[allow(deprecated)]
747
	pub fn migrate_v3_to_v4() -> Weight {
748
		use migration::v3 as old;
749
		let mut weight = T::DbWeight::get().reads_writes(2, 1);
750

            
751
		// Delete all undecodable values.
752
		// `StorageMap::translate` is not enough since it just skips them and leaves the keys in.
753
		let blocks = old::Agenda::<T>::iter_keys().collect::<Vec<_>>();
754
		for block in blocks {
755
			weight.saturating_accrue(T::DbWeight::get().reads(1));
756
			if let Err(_) = old::Agenda::<T>::try_get(&block) {
757
				weight.saturating_accrue(T::DbWeight::get().writes(1));
758
				old::Agenda::<T>::remove(&block);
759
				log::warn!("Deleted undecodable agenda of block: {:?}", block);
760
			}
761
		}
762

            
763
		Agenda::<T>::translate::<Vec<Option<ScheduledV3Of<T>>>, _>(|block, agenda| {
764
			log::info!("Migrating agenda of block: {:?}", &block);
765
			Some(BoundedVec::truncate_from(
766
				agenda
767
					.into_iter()
768
					.map(|schedule| {
769
						weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
770
						schedule
771
							.and_then(|schedule| {
772
								if let Some(id) = schedule.maybe_id.as_ref() {
773
									let name = blake2_256(id);
774
									if let Some(item) = old::Lookup::<T>::take(id) {
775
										Lookup::<T>::insert(name, item);
776
										log::info!("Migrated name for id: {:?}", id);
777
									} else {
778
										log::error!("No name in Lookup for id: {:?}", &id);
779
									}
780
									weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
781
								} else {
782
									log::info!("Schedule is unnamed");
783
								}
784

            
785
								let call = match schedule.call {
786
									MaybeHashed::Hash(h) => {
787
										let bounded = Bounded::from_legacy_hash(h);
788
										// Check that the call can be decoded in the new runtime.
789
										if let Err(err) = T::Preimages::peek::<
790
											<T as Config>::RuntimeCall,
791
										>(&bounded)
792
										{
793
											log::error!(
794
												"Dropping undecodable call {:?}: {:?}",
795
												&h,
796
												&err
797
											);
798
											return None
799
										}
800
										weight.saturating_accrue(T::DbWeight::get().reads(1));
801
										log::info!("Migrated call by hash, hash: {:?}", h);
802
										bounded
803
									},
804
									MaybeHashed::Value(v) => {
805
										let call = T::Preimages::bound(v)
806
											.map_err(|e| {
807
												log::error!("Could not bound Call: {:?}", e)
808
											})
809
											.ok()?;
810
										if call.lookup_needed() {
811
											weight.saturating_accrue(
812
												T::DbWeight::get().reads_writes(0, 1),
813
											);
814
										}
815
										log::info!(
816
											"Migrated call by value, hash: {:?}",
817
											call.hash()
818
										);
819
										call
820
									},
821
								};
822

            
823
								Some(Scheduled {
824
									maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])),
825
									priority: schedule.priority,
826
									call,
827
									maybe_periodic: schedule.maybe_periodic,
828
									origin: schedule.origin,
829
									_phantom: Default::default(),
830
								})
831
							})
832
							.or_else(|| {
833
								log::info!("Schedule in agenda for block {:?} is empty - nothing to do here.", &block);
834
								None
835
							})
836
					})
837
					.collect::<Vec<_>>(),
838
			))
839
		});
840

            
841
		#[allow(deprecated)]
842
		frame_support::storage::migration::remove_storage_prefix(
843
			Self::name().as_bytes(),
844
			b"StorageVersion",
845
			&[],
846
		);
847

            
848
		StorageVersion::new(4).put::<Self>();
849

            
850
		weight + T::DbWeight::get().writes(2)
851
	}
852
}
853

            
854
impl<T: Config> Pallet<T> {
855
	/// Helper to migrate scheduler when the pallet origin type has changed.
856
	pub fn migrate_origin<OldOrigin: Into<T::PalletsOrigin> + codec::Decode>() {
857
		Agenda::<T>::translate::<
858
			Vec<
859
				Option<
860
					Scheduled<
861
						TaskName,
862
						BoundedCallOf<T>,
863
						BlockNumberFor<T>,
864
						OldOrigin,
865
						T::AccountId,
866
					>,
867
				>,
868
			>,
869
			_,
870
		>(|_, agenda| {
871
			Some(BoundedVec::truncate_from(
872
				agenda
873
					.into_iter()
874
					.map(|schedule| {
875
						schedule.map(|schedule| Scheduled {
876
							maybe_id: schedule.maybe_id,
877
							priority: schedule.priority,
878
							call: schedule.call,
879
							maybe_periodic: schedule.maybe_periodic,
880
							origin: schedule.origin.into(),
881
							_phantom: Default::default(),
882
						})
883
					})
884
					.collect::<Vec<_>>(),
885
			))
886
		});
887
	}
888

            
889
990
	fn resolve_time(
890
990
		when: DispatchTime<BlockNumberFor<T>>,
891
990
	) -> Result<BlockNumberFor<T>, DispatchError> {
892
990
		let now = frame_system::Pallet::<T>::block_number();
893

            
894
990
		let when = match when {
895
990
			DispatchTime::At(x) => x,
896
			// The current block has already completed it's scheduled tasks, so
897
			// Schedule the task at lest one block after this current block.
898
			DispatchTime::After(x) => now.saturating_add(x).saturating_add(One::one()),
899
		};
900

            
901
990
		if when <= now {
902
			return Err(Error::<T>::TargetBlockNumberInPast.into())
903
990
		}
904
990

            
905
990
		Ok(when)
906
990
	}
907

            
908
990
	fn place_task(
909
990
		when: BlockNumberFor<T>,
910
990
		what: ScheduledOf<T>,
911
990
	) -> Result<TaskAddress<BlockNumberFor<T>>, (DispatchError, ScheduledOf<T>)> {
912
990
		let maybe_name = what.maybe_id;
913
990
		let index = Self::push_to_agenda(when, what)?;
914
990
		let address = (when, index);
915
990
		if let Some(name) = maybe_name {
916
			Lookup::<T>::insert(name, address)
917
990
		}
918
990
		Self::deposit_event(Event::Scheduled { when: address.0, index: address.1 });
919
990
		Ok(address)
920
990
	}
921

            
922
990
	fn push_to_agenda(
923
990
		when: BlockNumberFor<T>,
924
990
		what: ScheduledOf<T>,
925
990
	) -> Result<u32, (DispatchError, ScheduledOf<T>)> {
926
990
		let mut agenda = Agenda::<T>::get(when);
927
990
		let index = if (agenda.len() as u32) < T::MaxScheduledPerBlock::get() {
928
			// will always succeed due to the above check.
929
990
			let _ = agenda.try_push(Some(what));
930
990
			agenda.len() as u32 - 1
931
		} else {
932
			if let Some(hole_index) = agenda.iter().position(|i| i.is_none()) {
933
				agenda[hole_index] = Some(what);
934
				hole_index as u32
935
			} else {
936
				return Err((DispatchError::Exhausted, what))
937
			}
938
		};
939
990
		Agenda::<T>::insert(when, agenda);
940
990
		Ok(index)
941
990
	}
942

            
943
	/// Remove trailing `None` items of an agenda at `when`. If all items are `None` remove the
944
	/// agenda record entirely.
945
54
	fn cleanup_agenda(when: BlockNumberFor<T>) {
946
54
		let mut agenda = Agenda::<T>::get(when);
947
54
		match agenda.iter().rposition(|i| i.is_some()) {
948
6
			Some(i) if agenda.len() > i + 1 => {
949
				agenda.truncate(i + 1);
950
				Agenda::<T>::insert(when, agenda);
951
			},
952
6
			Some(_) => {},
953
48
			None => {
954
48
				Agenda::<T>::remove(when);
955
48
			},
956
		}
957
54
	}
958

            
959
990
	fn do_schedule(
960
990
		when: DispatchTime<BlockNumberFor<T>>,
961
990
		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
962
990
		priority: schedule::Priority,
963
990
		origin: T::PalletsOrigin,
964
990
		call: BoundedCallOf<T>,
965
990
	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
966
990
		let when = Self::resolve_time(when)?;
967

            
968
990
		let lookup_hash = call.lookup_hash();
969
990

            
970
990
		// sanitize maybe_periodic
971
990
		let maybe_periodic = maybe_periodic
972
990
			.filter(|p| p.1 > 1 && !p.0.is_zero())
973
990
			// Remove one from the number of repetitions since we will schedule one now.
974
990
			.map(|(p, c)| (p, c - 1));
975
990
		let task = Scheduled {
976
990
			maybe_id: None,
977
990
			priority,
978
990
			call,
979
990
			maybe_periodic,
980
990
			origin,
981
990
			_phantom: PhantomData,
982
990
		};
983
990
		let res = Self::place_task(when, task).map_err(|x| x.0)?;
984

            
985
990
		if let Some(hash) = lookup_hash {
986
			// Request the call to be made available.
987
			T::Preimages::request(&hash);
988
990
		}
989

            
990
990
		Ok(res)
991
990
	}
992

            
993
54
	fn do_cancel(
994
54
		origin: Option<T::PalletsOrigin>,
995
54
		(when, index): TaskAddress<BlockNumberFor<T>>,
996
54
	) -> Result<(), DispatchError> {
997
72
		let scheduled = Agenda::<T>::try_mutate(when, |agenda| {
998
54
			agenda.get_mut(index as usize).map_or(
999
54
				Ok(None),
54
				|s| -> Result<Option<Scheduled<_, _, _, _, _>>, DispatchError> {
54
					if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) {
						Self::ensure_privilege(o, &s.origin)?;
54
					};
54
					Ok(s.take())
54
				},
54
			)
72
		})?;
54
		if let Some(s) = scheduled {
54
			T::Preimages::drop(&s.call);
54
			if let Some(id) = s.maybe_id {
				Lookup::<T>::remove(id);
54
			}
54
			Retries::<T>::remove((when, index));
54
			Self::cleanup_agenda(when);
54
			Self::deposit_event(Event::Canceled { when, index });
54
			Ok(())
		} else {
			return Err(Error::<T>::NotFound.into())
		}
54
	}
	fn do_reschedule(
		(when, index): TaskAddress<BlockNumberFor<T>>,
		new_time: DispatchTime<BlockNumberFor<T>>,
	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
		let new_time = Self::resolve_time(new_time)?;
		if new_time == when {
			return Err(Error::<T>::RescheduleNoChange.into())
		}
		let task = Agenda::<T>::try_mutate(when, |agenda| {
			let task = agenda.get_mut(index as usize).ok_or(Error::<T>::NotFound)?;
			ensure!(!matches!(task, Some(Scheduled { maybe_id: Some(_), .. })), Error::<T>::Named);
			task.take().ok_or(Error::<T>::NotFound)
		})?;
		Self::cleanup_agenda(when);
		Self::deposit_event(Event::Canceled { when, index });
		Self::place_task(new_time, task).map_err(|x| x.0)
	}
	fn do_schedule_named(
		id: TaskName,
		when: DispatchTime<BlockNumberFor<T>>,
		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
		priority: schedule::Priority,
		origin: T::PalletsOrigin,
		call: BoundedCallOf<T>,
	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
		// ensure id it is unique
		if Lookup::<T>::contains_key(&id) {
			return Err(Error::<T>::FailedToSchedule.into())
		}
		let when = Self::resolve_time(when)?;
		let lookup_hash = call.lookup_hash();
		// sanitize maybe_periodic
		let maybe_periodic = maybe_periodic
			.filter(|p| p.1 > 1 && !p.0.is_zero())
			// Remove one from the number of repetitions since we will schedule one now.
			.map(|(p, c)| (p, c - 1));
		let task = Scheduled {
			maybe_id: Some(id),
			priority,
			call,
			maybe_periodic,
			origin,
			_phantom: Default::default(),
		};
		let res = Self::place_task(when, task).map_err(|x| x.0)?;
		if let Some(hash) = lookup_hash {
			// Request the call to be made available.
			T::Preimages::request(&hash);
		}
		Ok(res)
	}
	fn do_cancel_named(origin: Option<T::PalletsOrigin>, id: TaskName) -> DispatchResult {
		Lookup::<T>::try_mutate_exists(id, |lookup| -> DispatchResult {
			if let Some((when, index)) = lookup.take() {
				let i = index as usize;
				Agenda::<T>::try_mutate(when, |agenda| -> DispatchResult {
					if let Some(s) = agenda.get_mut(i) {
						if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) {
							Self::ensure_privilege(o, &s.origin)?;
							Retries::<T>::remove((when, index));
							T::Preimages::drop(&s.call);
						}
						*s = None;
					}
					Ok(())
				})?;
				Self::cleanup_agenda(when);
				Self::deposit_event(Event::Canceled { when, index });
				Ok(())
			} else {
				return Err(Error::<T>::NotFound.into())
			}
		})
	}
	fn do_reschedule_named(
		id: TaskName,
		new_time: DispatchTime<BlockNumberFor<T>>,
	) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> {
		let new_time = Self::resolve_time(new_time)?;
		let lookup = Lookup::<T>::get(id);
		let (when, index) = lookup.ok_or(Error::<T>::NotFound)?;
		if new_time == when {
			return Err(Error::<T>::RescheduleNoChange.into())
		}
		let task = Agenda::<T>::try_mutate(when, |agenda| {
			let task = agenda.get_mut(index as usize).ok_or(Error::<T>::NotFound)?;
			task.take().ok_or(Error::<T>::NotFound)
		})?;
		Self::cleanup_agenda(when);
		Self::deposit_event(Event::Canceled { when, index });
		Self::place_task(new_time, task).map_err(|x| x.0)
	}
	fn do_cancel_retry(
		origin: &T::PalletsOrigin,
		(when, index): TaskAddress<BlockNumberFor<T>>,
	) -> Result<(), DispatchError> {
		let agenda = Agenda::<T>::get(when);
		let scheduled = agenda
			.get(index as usize)
			.and_then(Option::as_ref)
			.ok_or(Error::<T>::NotFound)?;
		Self::ensure_privilege(origin, &scheduled.origin)?;
		Retries::<T>::remove((when, index));
		Ok(())
	}
}
enum ServiceTaskError {
	/// Could not be executed due to missing preimage.
	Unavailable,
	/// Could not be executed due to weight limitations.
	Overweight,
}
use ServiceTaskError::*;
impl<T: Config> Pallet<T> {
	/// Service up to `max` agendas queue starting from earliest incompletely executed agenda.
194763
	fn service_agendas(weight: &mut WeightMeter, now: BlockNumberFor<T>, max: u32) {
194763
		if weight.try_consume(T::WeightInfo::service_agendas_base()).is_err() {
			return
194763
		}
194763

            
194763
		let mut incomplete_since = now + One::one();
194763
		let mut when = IncompleteSince::<T>::take().unwrap_or(now);
194763
		let mut executed = 0;
194763

            
194763
		let max_items = T::MaxScheduledPerBlock::get();
194763
		let mut count_down = max;
194763
		let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items);
389526
		while count_down > 0 && when <= now && weight.can_consume(service_agenda_base_weight) {
194763
			if !Self::service_agenda(weight, &mut executed, now, when, u32::max_value()) {
				incomplete_since = incomplete_since.min(when);
194763
			}
194763
			when.saturating_inc();
194763
			count_down.saturating_dec();
		}
194763
		incomplete_since = incomplete_since.min(when);
194763
		if incomplete_since <= now {
			IncompleteSince::<T>::put(incomplete_since);
194763
		}
194763
	}
	/// Returns `true` if the agenda was fully completed, `false` if it should be revisited at a
	/// later block.
194763
	fn service_agenda(
194763
		weight: &mut WeightMeter,
194763
		executed: &mut u32,
194763
		now: BlockNumberFor<T>,
194763
		when: BlockNumberFor<T>,
194763
		max: u32,
194763
	) -> bool {
194763
		let mut agenda = Agenda::<T>::get(when);
194763
		let mut ordered = agenda
194763
			.iter()
194763
			.enumerate()
194763
			.filter_map(|(index, maybe_item)| {
				maybe_item.as_ref().map(|item| (index as u32, item.priority))
194763
			})
194763
			.collect::<Vec<_>>();
194763
		ordered.sort_by_key(|k| k.1);
194763
		let within_limit = weight
194763
			.try_consume(T::WeightInfo::service_agenda_base(ordered.len() as u32))
194763
			.is_ok();
194763
		debug_assert!(within_limit, "weight limit should have been checked in advance");
		// Items which we know can be executed and have postponed for execution in a later block.
194763
		let mut postponed = (ordered.len() as u32).saturating_sub(max);
194763
		// Items which we don't know can ever be executed.
194763
		let mut dropped = 0;
194763
		for (agenda_index, _) in ordered.into_iter().take(max as usize) {
			let task = match agenda[agenda_index as usize].take() {
				None => continue,
				Some(t) => t,
			};
			let base_weight = T::WeightInfo::service_task(
				task.call.lookup_len().map(|x| x as usize),
				task.maybe_id.is_some(),
				task.maybe_periodic.is_some(),
			);
			if !weight.can_consume(base_weight) {
				postponed += 1;
				break
			}
			let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task);
			agenda[agenda_index as usize] = match result {
				Err((Unavailable, slot)) => {
					dropped += 1;
					slot
				},
				Err((Overweight, slot)) => {
					postponed += 1;
					slot
				},
				Ok(()) => {
					*executed += 1;
					None
				},
			};
		}
194763
		if postponed > 0 || dropped > 0 {
			Agenda::<T>::insert(when, agenda);
194763
		} else {
194763
			Agenda::<T>::remove(when);
194763
		}
194763
		postponed == 0
194763
	}
	/// Service (i.e. execute) the given task, being careful not to overflow the `weight` counter.
	///
	/// This involves:
	/// - removing and potentially replacing the `Lookup` entry for the task.
	/// - realizing the task's call which can include a preimage lookup.
	/// - Rescheduling the task for execution in a later agenda if periodic.
	fn service_task(
		weight: &mut WeightMeter,
		now: BlockNumberFor<T>,
		when: BlockNumberFor<T>,
		agenda_index: u32,
		is_first: bool,
		mut task: ScheduledOf<T>,
	) -> Result<(), (ServiceTaskError, Option<ScheduledOf<T>>)> {
		if let Some(ref id) = task.maybe_id {
			Lookup::<T>::remove(id);
		}
		let (call, lookup_len) = match T::Preimages::peek(&task.call) {
			Ok(c) => c,
			Err(_) => {
				Self::deposit_event(Event::CallUnavailable {
					task: (when, agenda_index),
					id: task.maybe_id,
				});
				// It was not available when we needed it, so we don't need to have requested it
				// anymore.
				T::Preimages::drop(&task.call);
				// We don't know why `peek` failed, thus we most account here for the "full weight".
				let _ = weight.try_consume(T::WeightInfo::service_task(
					task.call.lookup_len().map(|x| x as usize),
					task.maybe_id.is_some(),
					task.maybe_periodic.is_some(),
				));
				return Err((Unavailable, Some(task)))
			},
		};
		let _ = weight.try_consume(T::WeightInfo::service_task(
			lookup_len.map(|x| x as usize),
			task.maybe_id.is_some(),
			task.maybe_periodic.is_some(),
		));
		match Self::execute_dispatch(weight, task.origin.clone(), call) {
			Err(()) if is_first => {
				T::Preimages::drop(&task.call);
				Self::deposit_event(Event::PermanentlyOverweight {
					task: (when, agenda_index),
					id: task.maybe_id,
				});
				Err((Unavailable, Some(task)))
			},
			Err(()) => Err((Overweight, Some(task))),
			Ok(result) => {
				let failed = result.is_err();
				let maybe_retry_config = Retries::<T>::take((when, agenda_index));
				Self::deposit_event(Event::Dispatched {
					task: (when, agenda_index),
					id: task.maybe_id,
					result,
				});
				match maybe_retry_config {
					Some(retry_config) if failed => {
						Self::schedule_retry(weight, now, when, agenda_index, &task, retry_config);
					},
					_ => {},
				}
				if let &Some((period, count)) = &task.maybe_periodic {
					if count > 1 {
						task.maybe_periodic = Some((period, count - 1));
					} else {
						task.maybe_periodic = None;
					}
					let wake = now.saturating_add(period);
					match Self::place_task(wake, task) {
						Ok(new_address) =>
							if let Some(retry_config) = maybe_retry_config {
								Retries::<T>::insert(new_address, retry_config);
							},
						Err((_, task)) => {
							// TODO: Leave task in storage somewhere for it to be rescheduled
							// manually.
							T::Preimages::drop(&task.call);
							Self::deposit_event(Event::PeriodicFailed {
								task: (when, agenda_index),
								id: task.maybe_id,
							});
						},
					}
				} else {
					T::Preimages::drop(&task.call);
				}
				Ok(())
			},
		}
	}
	/// Make a dispatch to the given `call` from the given `origin`, ensuring that the `weight`
	/// counter does not exceed its limit and that it is counted accurately (e.g. accounted using
	/// post info if available).
	///
	/// NOTE: Only the weight for this function will be counted (origin lookup, dispatch and the
	/// call itself).
	///
	/// Returns an error if the call is overweight.
	fn execute_dispatch(
		weight: &mut WeightMeter,
		origin: T::PalletsOrigin,
		call: <T as Config>::RuntimeCall,
	) -> Result<DispatchResult, ()> {
		let base_weight = match origin.as_system_ref() {
			Some(&RawOrigin::Signed(_)) => T::WeightInfo::execute_dispatch_signed(),
			_ => T::WeightInfo::execute_dispatch_unsigned(),
		};
		let call_weight = call.get_dispatch_info().weight;
		// We only allow a scheduled call if it cannot push the weight past the limit.
		let max_weight = base_weight.saturating_add(call_weight);
		if !weight.can_consume(max_weight) {
			return Err(())
		}
		let dispatch_origin = origin.into();
		let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) {
			Ok(post_info) => (post_info.actual_weight, Ok(())),
			Err(error_and_info) =>
				(error_and_info.post_info.actual_weight, Err(error_and_info.error)),
		};
		let call_weight = maybe_actual_call_weight.unwrap_or(call_weight);
		let _ = weight.try_consume(base_weight);
		let _ = weight.try_consume(call_weight);
		Ok(result)
	}
	/// Check if a task has a retry configuration in place and, if so, try to reschedule it.
	///
	/// Possible causes for failure to schedule a retry for a task:
	/// - there wasn't enough weight to run the task reschedule logic
	/// - there was no retry configuration in place
	/// - there were no more retry attempts left
	/// - the agenda was full.
	fn schedule_retry(
		weight: &mut WeightMeter,
		now: BlockNumberFor<T>,
		when: BlockNumberFor<T>,
		agenda_index: u32,
		task: &ScheduledOf<T>,
		retry_config: RetryConfig<BlockNumberFor<T>>,
	) {
		if weight
			.try_consume(T::WeightInfo::schedule_retry(T::MaxScheduledPerBlock::get()))
			.is_err()
		{
			Self::deposit_event(Event::RetryFailed {
				task: (when, agenda_index),
				id: task.maybe_id,
			});
			return;
		}
		let RetryConfig { total_retries, mut remaining, period } = retry_config;
		remaining = match remaining.checked_sub(1) {
			Some(n) => n,
			None => return,
		};
		let wake = now.saturating_add(period);
		match Self::place_task(wake, task.as_retry()) {
			Ok(address) => {
				// Reinsert the retry config to the new address of the task after it was
				// placed.
				Retries::<T>::insert(address, RetryConfig { total_retries, remaining, period });
			},
			Err((_, task)) => {
				// TODO: Leave task in storage somewhere for it to be
				// rescheduled manually.
				T::Preimages::drop(&task.call);
				Self::deposit_event(Event::RetryFailed {
					task: (when, agenda_index),
					id: task.maybe_id,
				});
			},
		}
	}
	/// Ensure that `left` has at least the same level of privilege or higher than `right`.
	///
	/// Returns an error if `left` has a lower level of privilege or the two cannot be compared.
	fn ensure_privilege(
		left: &<T as Config>::PalletsOrigin,
		right: &<T as Config>::PalletsOrigin,
	) -> Result<(), DispatchError> {
		if matches!(T::OriginPrivilegeCmp::cmp_privilege(left, right), Some(Ordering::Less) | None)
		{
			return Err(BadOrigin.into());
		}
		Ok(())
	}
}
#[allow(deprecated)]
impl<T: Config> schedule::v2::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
	for Pallet<T>
{
	type Address = TaskAddress<BlockNumberFor<T>>;
	type Hash = T::Hash;
	fn schedule(
		when: DispatchTime<BlockNumberFor<T>>,
		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
		priority: schedule::Priority,
		origin: T::PalletsOrigin,
		call: CallOrHashOf<T>,
	) -> Result<Self::Address, DispatchError> {
		let call = call.as_value().ok_or(DispatchError::CannotLookup)?;
		let call = T::Preimages::bound(call)?.transmute();
		Self::do_schedule(when, maybe_periodic, priority, origin, call)
	}
	fn cancel((when, index): Self::Address) -> Result<(), ()> {
		Self::do_cancel(None, (when, index)).map_err(|_| ())
	}
	fn reschedule(
		address: Self::Address,
		when: DispatchTime<BlockNumberFor<T>>,
	) -> Result<Self::Address, DispatchError> {
		Self::do_reschedule(address, when)
	}
	fn next_dispatch_time((when, index): Self::Address) -> Result<BlockNumberFor<T>, ()> {
		Agenda::<T>::get(when).get(index as usize).ok_or(()).map(|_| when)
	}
}
// TODO: migrate `schedule::v2::Anon` to `v3`
#[allow(deprecated)]
impl<T: Config> schedule::v2::Named<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
	for Pallet<T>
{
	type Address = TaskAddress<BlockNumberFor<T>>;
	type Hash = T::Hash;
	fn schedule_named(
		id: Vec<u8>,
		when: DispatchTime<BlockNumberFor<T>>,
		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
		priority: schedule::Priority,
		origin: T::PalletsOrigin,
		call: CallOrHashOf<T>,
	) -> Result<Self::Address, ()> {
		let call = call.as_value().ok_or(())?;
		let call = T::Preimages::bound(call).map_err(|_| ())?.transmute();
		let name = blake2_256(&id[..]);
		Self::do_schedule_named(name, when, maybe_periodic, priority, origin, call).map_err(|_| ())
	}
	fn cancel_named(id: Vec<u8>) -> Result<(), ()> {
		let name = blake2_256(&id[..]);
		Self::do_cancel_named(None, name).map_err(|_| ())
	}
	fn reschedule_named(
		id: Vec<u8>,
		when: DispatchTime<BlockNumberFor<T>>,
	) -> Result<Self::Address, DispatchError> {
		let name = blake2_256(&id[..]);
		Self::do_reschedule_named(name, when)
	}
	fn next_dispatch_time(id: Vec<u8>) -> Result<BlockNumberFor<T>, ()> {
		let name = blake2_256(&id[..]);
		Lookup::<T>::get(name)
			.and_then(|(when, index)| Agenda::<T>::get(when).get(index as usize).map(|_| when))
			.ok_or(())
	}
}
impl<T: Config> schedule::v3::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
	for Pallet<T>
{
	type Address = TaskAddress<BlockNumberFor<T>>;
	type Hasher = T::Hashing;
990
	fn schedule(
990
		when: DispatchTime<BlockNumberFor<T>>,
990
		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
990
		priority: schedule::Priority,
990
		origin: T::PalletsOrigin,
990
		call: BoundedCallOf<T>,
990
	) -> Result<Self::Address, DispatchError> {
990
		Self::do_schedule(when, maybe_periodic, priority, origin, call)
990
	}
54
	fn cancel((when, index): Self::Address) -> Result<(), DispatchError> {
54
		Self::do_cancel(None, (when, index)).map_err(map_err_to_v3_err::<T>)
54
	}
	fn reschedule(
		address: Self::Address,
		when: DispatchTime<BlockNumberFor<T>>,
	) -> Result<Self::Address, DispatchError> {
		Self::do_reschedule(address, when).map_err(map_err_to_v3_err::<T>)
	}
	fn next_dispatch_time(
		(when, index): Self::Address,
	) -> Result<BlockNumberFor<T>, DispatchError> {
		Agenda::<T>::get(when)
			.get(index as usize)
			.ok_or(DispatchError::Unavailable)
			.map(|_| when)
	}
}
use schedule::v3::TaskName;
impl<T: Config> schedule::v3::Named<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin>
	for Pallet<T>
{
	type Address = TaskAddress<BlockNumberFor<T>>;
	type Hasher = T::Hashing;
	fn schedule_named(
		id: TaskName,
		when: DispatchTime<BlockNumberFor<T>>,
		maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>,
		priority: schedule::Priority,
		origin: T::PalletsOrigin,
		call: BoundedCallOf<T>,
	) -> Result<Self::Address, DispatchError> {
		Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call)
	}
	fn cancel_named(id: TaskName) -> Result<(), DispatchError> {
		Self::do_cancel_named(None, id).map_err(map_err_to_v3_err::<T>)
	}
	fn reschedule_named(
		id: TaskName,
		when: DispatchTime<BlockNumberFor<T>>,
	) -> Result<Self::Address, DispatchError> {
		Self::do_reschedule_named(id, when).map_err(map_err_to_v3_err::<T>)
	}
	fn next_dispatch_time(id: TaskName) -> Result<BlockNumberFor<T>, DispatchError> {
		Lookup::<T>::get(id)
			.and_then(|(when, index)| Agenda::<T>::get(when).get(index as usize).map(|_| when))
			.ok_or(DispatchError::Unavailable)
	}
}
/// Maps a pallet error to an `schedule::v3` error.
fn map_err_to_v3_err<T: Config>(err: DispatchError) -> DispatchError {
	if err == DispatchError::from(Error::<T>::NotFound) {
		DispatchError::Unavailable
	} else {
		err
	}
}