1
// This file is part of Substrate.
2

            
3
// Copyright (C) Parity Technologies (UK) Ltd.
4
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5

            
6
// This program is free software: you can redistribute it and/or modify
7
// it under the terms of the GNU General Public License as published by
8
// the Free Software Foundation, either version 3 of the License, or
9
// (at your option) any later version.
10

            
11
// This program is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
// GNU General Public License for more details.
15

            
16
// You should have received a copy of the GNU General Public License
17
// along with this program. If not, see <https://www.gnu.org/licenses/>.
18

            
19
use crate::{ExecutionLimit, HwBench};
20

            
21
use sc_telemetry::SysInfo;
22
use sp_core::{sr25519, Pair};
23
use sp_io::crypto::sr25519_verify;
24

            
25
use derive_more::From;
26
use rand::{seq::SliceRandom, Rng, RngCore};
27
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
28
use std::{
29
	fmt,
30
	fmt::{Display, Formatter},
31
	fs::File,
32
	io::{Seek, SeekFrom, Write},
33
	ops::{Deref, DerefMut},
34
	path::{Path, PathBuf},
35
	time::{Duration, Instant},
36
};
37

            
38
/// A single hardware metric.
39
#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)]
40
pub enum Metric {
41
	/// SR25519 signature verification.
42
	Sr25519Verify,
43
	/// Blake2-256 hashing algorithm.
44
	Blake2256,
45
	/// Copying data in RAM.
46
	MemCopy,
47
	/// Disk sequential write.
48
	DiskSeqWrite,
49
	/// Disk random write.
50
	DiskRndWrite,
51
}
52

            
53
/// Describes a checking failure for the hardware requirements.
54
#[derive(Debug, Clone, Copy, PartialEq)]
55
pub struct CheckFailure {
56
	/// The metric that failed the check.
57
	pub metric: Metric,
58
	/// The expected minimum value.
59
	pub expected: Throughput,
60
	/// The measured value.
61
	pub found: Throughput,
62
}
63

            
64
/// A list of metrics that failed to meet the minimum hardware requirements.
65
#[derive(Debug, Clone, PartialEq, From)]
66
pub struct CheckFailures(pub Vec<CheckFailure>);
67

            
68
impl Display for CheckFailures {
69
	fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
70
		write!(formatter, "Failed checks: ")?;
71
		for failure in &self.0 {
72
			write!(
73
				formatter,
74
				"{}(expected: {}, found: {}), ",
75
				failure.metric.name(),
76
				failure.expected,
77
				failure.found
78
			)?
79
		}
80
		Ok(())
81
	}
82
}
83

            
84
impl Metric {
85
	/// The category of the metric.
86
	pub fn category(&self) -> &'static str {
87
		match self {
88
			Self::Sr25519Verify | Self::Blake2256 => "CPU",
89
			Self::MemCopy => "Memory",
90
			Self::DiskSeqWrite | Self::DiskRndWrite => "Disk",
91
		}
92
	}
93

            
94
	/// The name of the metric. It is always prefixed by the [`self.category()`].
95
	pub fn name(&self) -> &'static str {
96
		match self {
97
			Self::Sr25519Verify => "SR25519-Verify",
98
			Self::Blake2256 => "BLAKE2-256",
99
			Self::MemCopy => "Copy",
100
			Self::DiskSeqWrite => "Seq Write",
101
			Self::DiskRndWrite => "Rnd Write",
102
		}
103
	}
104
}
105

            
106
/// The unit in which the [`Throughput`] (bytes per second) is denoted.
107
pub enum Unit {
108
	GiBs,
109
	MiBs,
110
	KiBs,
111
}
112

            
113
impl fmt::Display for Unit {
114
	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
115
		f.write_str(match self {
116
			Unit::GiBs => "GiBs",
117
			Unit::MiBs => "MiBs",
118
			Unit::KiBs => "KiBs",
119
		})
120
	}
121
}
122

            
123
/// Throughput as measured in bytes per second.
124
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
125
pub struct Throughput(f64);
126

            
127
const KIBIBYTE: f64 = (1 << 10) as f64;
128
const MEBIBYTE: f64 = (1 << 20) as f64;
129
const GIBIBYTE: f64 = (1 << 30) as f64;
130

            
131
impl Throughput {
132
	/// Construct [`Self`] from kibibyte/s.
133
	pub fn from_kibs(kibs: f64) -> Throughput {
134
		Throughput(kibs * KIBIBYTE)
135
	}
136

            
137
	/// Construct [`Self`] from mebibyte/s.
138
	pub fn from_mibs(mibs: f64) -> Throughput {
139
		Throughput(mibs * MEBIBYTE)
140
	}
141

            
142
	/// Construct [`Self`] from gibibyte/s.
143
	pub fn from_gibs(gibs: f64) -> Throughput {
144
		Throughput(gibs * GIBIBYTE)
145
	}
146

            
147
	/// [`Self`] as number of byte/s.
148
	pub fn as_bytes(&self) -> f64 {
149
		self.0
150
	}
151

            
152
	/// [`Self`] as number of kibibyte/s.
153
	pub fn as_kibs(&self) -> f64 {
154
		self.0 / KIBIBYTE
155
	}
156

            
157
	/// [`Self`] as number of mebibyte/s.
158
	pub fn as_mibs(&self) -> f64 {
159
		self.0 / MEBIBYTE
160
	}
161

            
162
	/// [`Self`] as number of gibibyte/s.
163
	pub fn as_gibs(&self) -> f64 {
164
		self.0 / GIBIBYTE
165
	}
166

            
167
	/// Normalizes [`Self`] to use the largest unit possible.
168
	pub fn normalize(&self) -> (f64, Unit) {
169
		let bs = self.0;
170

            
171
		if bs >= GIBIBYTE {
172
			(self.as_gibs(), Unit::GiBs)
173
		} else if bs >= MEBIBYTE {
174
			(self.as_mibs(), Unit::MiBs)
175
		} else {
176
			(self.as_kibs(), Unit::KiBs)
177
		}
178
	}
179
}
180

            
181
impl fmt::Display for Throughput {
182
	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
183
		let (value, unit) = self.normalize();
184
		write!(f, "{:.2?} {}", value, unit)
185
	}
186
}
187

            
188
/// Serializes `Throughput` and uses MiBs as the unit.
189
pub fn serialize_throughput<S>(throughput: &Throughput, serializer: S) -> Result<S::Ok, S::Error>
190
where
191
	S: Serializer,
192
{
193
	serializer.serialize_u64(throughput.as_mibs() as u64)
194
}
195

            
196
/// Serializes `Option<Throughput>` and uses MiBs as the unit.
197
pub fn serialize_throughput_option<S>(
198
	maybe_throughput: &Option<Throughput>,
199
	serializer: S,
200
) -> Result<S::Ok, S::Error>
201
where
202
	S: Serializer,
203
{
204
	if let Some(throughput) = maybe_throughput {
205
		return serializer.serialize_some(&(throughput.as_mibs() as u64))
206
	}
207
	serializer.serialize_none()
208
}
209

            
210
/// Serializes throughput into MiBs and represents it as `f64`.
211
fn serialize_throughput_as_f64<S>(throughput: &Throughput, serializer: S) -> Result<S::Ok, S::Error>
212
where
213
	S: Serializer,
214
{
215
	serializer.serialize_f64(throughput.as_mibs())
216
}
217

            
218
struct ThroughputVisitor;
219
impl<'de> Visitor<'de> for ThroughputVisitor {
220
	type Value = Throughput;
221

            
222
	fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
223
		formatter.write_str("A value that is a f64.")
224
	}
225

            
226
	fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
227
	where
228
		E: serde::de::Error,
229
	{
230
		Ok(Throughput::from_mibs(value))
231
	}
232
}
233

            
234
fn deserialize_throughput<'de, D>(deserializer: D) -> Result<Throughput, D::Error>
235
where
236
	D: Deserializer<'de>,
237
{
238
	Ok(deserializer.deserialize_f64(ThroughputVisitor))?
239
}
240

            
241
/// Multiple requirements for the hardware.
242
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
243
pub struct Requirements(pub Vec<Requirement>);
244

            
245
/// A single requirement for the hardware.
246
#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)]
247
pub struct Requirement {
248
	/// The metric to measure.
249
	pub metric: Metric,
250
	/// The minimal throughput that needs to be archived for this requirement.
251
	#[serde(
252
		serialize_with = "serialize_throughput_as_f64",
253
		deserialize_with = "deserialize_throughput"
254
	)]
255
	pub minimum: Throughput,
256
}
257

            
258
#[inline(always)]
259
pub(crate) fn benchmark<E>(
260
	name: &str,
261
	size: usize,
262
	max_iterations: usize,
263
	max_duration: Duration,
264
	mut run: impl FnMut() -> Result<(), E>,
265
) -> Result<Throughput, E> {
266
	// Run the benchmark once as a warmup to get the code into the L1 cache.
267
	run()?;
268

            
269
	// Then run it multiple times and average the result.
270
	let timestamp = Instant::now();
271
	let mut elapsed = Duration::default();
272
	let mut count = 0;
273
	for _ in 0..max_iterations {
274
		run()?;
275

            
276
		count += 1;
277
		elapsed = timestamp.elapsed();
278

            
279
		if elapsed >= max_duration {
280
			break
281
		}
282
	}
283

            
284
	let score = Throughput::from_kibs((size * count) as f64 / (elapsed.as_secs_f64() * 1024.0));
285
	log::trace!(
286
		"Calculated {} of {} in {} iterations in {}ms",
287
		name,
288
		score,
289
		count,
290
		elapsed.as_millis()
291
	);
292
	Ok(score)
293
}
294

            
295
/// Gathers information about node's hardware and software.
296
pub fn gather_sysinfo() -> SysInfo {
297
	#[allow(unused_mut)]
298
	let mut sysinfo = SysInfo {
299
		cpu: None,
300
		memory: None,
301
		core_count: None,
302
		linux_kernel: None,
303
		linux_distro: None,
304
		is_virtual_machine: None,
305
	};
306

            
307
	#[cfg(target_os = "linux")]
308
	crate::sysinfo_linux::gather_linux_sysinfo(&mut sysinfo);
309

            
310
	sysinfo
311
}
312

            
313
#[inline(never)]
314
fn clobber_slice<T>(slice: &mut [T]) {
315
	assert!(!slice.is_empty());
316

            
317
	// Discourage the compiler from optimizing out our benchmarks.
318
	//
319
	// Volatile reads and writes are guaranteed to not be elided nor reordered,
320
	// so we can use them to effectively clobber a piece of memory and prevent
321
	// the compiler from optimizing out our technically unnecessary code.
322
	//
323
	// This is not totally bulletproof in theory, but should work in practice.
324
	//
325
	// SAFETY: We've checked that the slice is not empty, so reading and writing
326
	//         its first element is always safe.
327
	unsafe {
328
		let value = std::ptr::read_volatile(slice.as_ptr());
329
		std::ptr::write_volatile(slice.as_mut_ptr(), value);
330
	}
331
}
332

            
333
#[inline(never)]
334
fn clobber_value<T>(input: &mut T) {
335
	// Look into `clobber_slice` for a comment.
336
	unsafe {
337
		let value = std::ptr::read_volatile(input);
338
		std::ptr::write_volatile(input, value);
339
	}
340
}
341

            
342
/// A default [`ExecutionLimit`] that can be used to call [`benchmark_cpu`].
343
pub const DEFAULT_CPU_EXECUTION_LIMIT: ExecutionLimit =
344
	ExecutionLimit::Both { max_iterations: 4 * 1024, max_duration: Duration::from_millis(100) };
345

            
346
// This benchmarks the CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes per second.
347
pub fn benchmark_cpu(limit: ExecutionLimit) -> Throughput {
348
	// In general the results of this benchmark are somewhat sensitive to how much
349
	// data we hash at the time. The smaller this is the *less* B/s we can hash,
350
	// the bigger this is the *more* B/s we can hash, up until a certain point
351
	// where we can achieve roughly ~100% of what the hasher can do. If we'd plot
352
	// this on a graph with the number of bytes we want to hash on the X axis
353
	// and the speed in B/s on the Y axis then we'd essentially see it grow
354
	// logarithmically.
355
	//
356
	// In practice however we might not always have enough data to hit the maximum
357
	// possible speed that the hasher can achieve, so the size set here should be
358
	// picked in such a way as to still measure how fast the hasher is at hashing,
359
	// but without hitting its theoretical maximum speed.
360
	const SIZE: usize = 32 * 1024;
361

            
362
	let mut buffer = Vec::new();
363
	buffer.resize(SIZE, 0x66);
364
	let mut hash = Default::default();
365

            
366
	let run = || -> Result<(), ()> {
367
		clobber_slice(&mut buffer);
368
		hash = sp_crypto_hashing::blake2_256(&buffer);
369
		clobber_slice(&mut hash);
370

            
371
		Ok(())
372
	};
373

            
374
	benchmark("CPU score", SIZE, limit.max_iterations(), limit.max_duration(), run)
375
		.expect("benchmark cannot fail; qed")
376
}
377

            
378
/// A default [`ExecutionLimit`] that can be used to call [`benchmark_memory`].
379
pub const DEFAULT_MEMORY_EXECUTION_LIMIT: ExecutionLimit =
380
	ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(100) };
381

            
382
// This benchmarks the effective `memcpy` memory bandwidth available in bytes per second.
383
//
384
// It doesn't technically measure the absolute maximum memory bandwidth available,
385
// but that's fine, because real code most of the time isn't optimized to take
386
// advantage of the full memory bandwidth either.
387
pub fn benchmark_memory(limit: ExecutionLimit) -> Throughput {
388
	// Ideally this should be at least as big as the CPU's L3 cache,
389
	// and it should be big enough so that the `memcpy` takes enough
390
	// time to be actually measurable.
391
	//
392
	// As long as it's big enough increasing it further won't change
393
	// the benchmark's results.
394
	const SIZE: usize = 64 * 1024 * 1024;
395

            
396
	let mut src = Vec::new();
397
	let mut dst = Vec::new();
398

            
399
	// Prefault the pages; we want to measure the memory bandwidth,
400
	// not how fast the kernel can supply us with fresh memory pages.
401
	src.resize(SIZE, 0x66);
402
	dst.resize(SIZE, 0x77);
403

            
404
	let run = || -> Result<(), ()> {
405
		clobber_slice(&mut src);
406
		clobber_slice(&mut dst);
407

            
408
		// SAFETY: Both vectors are of the same type and of the same size,
409
		//         so copying data between them is safe.
410
		unsafe {
411
			// We use `memcpy` directly here since `copy_from_slice` isn't actually
412
			// guaranteed to be turned into a `memcpy`.
413
			libc::memcpy(dst.as_mut_ptr().cast(), src.as_ptr().cast(), SIZE);
414
		}
415

            
416
		clobber_slice(&mut dst);
417
		clobber_slice(&mut src);
418

            
419
		Ok(())
420
	};
421

            
422
	benchmark("memory score", SIZE, limit.max_iterations(), limit.max_duration(), run)
423
		.expect("benchmark cannot fail; qed")
424
}
425

            
426
struct TemporaryFile {
427
	fp: Option<File>,
428
	path: PathBuf,
429
}
430

            
431
impl Drop for TemporaryFile {
432
	fn drop(&mut self) {
433
		let _ = self.fp.take();
434

            
435
		// Remove the file.
436
		//
437
		// This has to be done *after* the benchmark,
438
		// otherwise it changes the results as the data
439
		// doesn't actually get properly flushed to the disk,
440
		// since the file's not there anymore.
441
		if let Err(error) = std::fs::remove_file(&self.path) {
442
			log::warn!("Failed to remove the file used for the disk benchmark: {}", error);
443
		}
444
	}
445
}
446

            
447
impl Deref for TemporaryFile {
448
	type Target = File;
449
	fn deref(&self) -> &Self::Target {
450
		self.fp.as_ref().expect("`fp` is None only during `drop`")
451
	}
452
}
453

            
454
impl DerefMut for TemporaryFile {
455
	fn deref_mut(&mut self) -> &mut Self::Target {
456
		self.fp.as_mut().expect("`fp` is None only during `drop`")
457
	}
458
}
459

            
460
fn rng() -> rand_pcg::Pcg64 {
461
	rand_pcg::Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96)
462
}
463

            
464
fn random_data(size: usize) -> Vec<u8> {
465
	let mut buffer = Vec::new();
466
	buffer.resize(size, 0);
467
	rng().fill(&mut buffer[..]);
468
	buffer
469
}
470

            
471
/// A default [`ExecutionLimit`] that can be used to call [`benchmark_disk_sequential_writes`]
472
/// and [`benchmark_disk_random_writes`].
473
pub const DEFAULT_DISK_EXECUTION_LIMIT: ExecutionLimit =
474
	ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(300) };
475

            
476
pub fn benchmark_disk_sequential_writes(
477
	limit: ExecutionLimit,
478
	directory: &Path,
479
) -> Result<Throughput, String> {
480
	const SIZE: usize = 64 * 1024 * 1024;
481

            
482
	let buffer = random_data(SIZE);
483
	let path = directory.join(".disk_bench_seq_wr.tmp");
484

            
485
	let fp =
486
		File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?;
487

            
488
	let mut fp = TemporaryFile { fp: Some(fp), path };
489

            
490
	fp.sync_all()
491
		.map_err(|error| format!("failed to fsync the test file: {}", error))?;
492

            
493
	let run = || {
494
		// Just dump everything to the disk in one go.
495
		fp.write_all(&buffer)
496
			.map_err(|error| format!("failed to write to the test file: {}", error))?;
497

            
498
		// And then make sure it was actually written to disk.
499
		fp.sync_all()
500
			.map_err(|error| format!("failed to fsync the test file: {}", error))?;
501

            
502
		// Rewind to the beginning for the next iteration of the benchmark.
503
		fp.seek(SeekFrom::Start(0))
504
			.map_err(|error| format!("failed to seek to the start of the test file: {}", error))?;
505

            
506
		Ok(())
507
	};
508

            
509
	benchmark(
510
		"disk sequential write score",
511
		SIZE,
512
		limit.max_iterations(),
513
		limit.max_duration(),
514
		run,
515
	)
516
}
517

            
518
pub fn benchmark_disk_random_writes(
519
	limit: ExecutionLimit,
520
	directory: &Path,
521
) -> Result<Throughput, String> {
522
	const SIZE: usize = 64 * 1024 * 1024;
523

            
524
	let buffer = random_data(SIZE);
525
	let path = directory.join(".disk_bench_rand_wr.tmp");
526

            
527
	let fp =
528
		File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?;
529

            
530
	let mut fp = TemporaryFile { fp: Some(fp), path };
531

            
532
	// Since we want to test random writes we need an existing file
533
	// through which we can seek, so here we just populate it with some data.
534
	fp.write_all(&buffer)
535
		.map_err(|error| format!("failed to write to the test file: {}", error))?;
536

            
537
	fp.sync_all()
538
		.map_err(|error| format!("failed to fsync the test file: {}", error))?;
539

            
540
	// Generate a list of random positions at which we'll issue writes.
541
	let mut positions = Vec::with_capacity(SIZE / 4096);
542
	{
543
		let mut position = 0;
544
		while position < SIZE {
545
			positions.push(position);
546
			position += 4096;
547
		}
548
	}
549

            
550
	positions.shuffle(&mut rng());
551

            
552
	let run = || {
553
		for &position in &positions {
554
			fp.seek(SeekFrom::Start(position as u64))
555
				.map_err(|error| format!("failed to seek in the test file: {}", error))?;
556

            
557
			// Here we deliberately only write half of the chunk since we don't
558
			// want the OS' disk scheduler to coalesce our writes into one single
559
			// sequential write.
560
			//
561
			// Also the chunk's size is deliberately exactly half of a modern disk's
562
			// sector size to trigger an RMW cycle.
563
			let chunk = &buffer[position..position + 2048];
564
			fp.write_all(&chunk)
565
				.map_err(|error| format!("failed to write to the test file: {}", error))?;
566
		}
567

            
568
		fp.sync_all()
569
			.map_err(|error| format!("failed to fsync the test file: {}", error))?;
570

            
571
		Ok(())
572
	};
573

            
574
	// We only wrote half of the bytes hence `SIZE / 2`.
575
	benchmark(
576
		"disk random write score",
577
		SIZE / 2,
578
		limit.max_iterations(),
579
		limit.max_duration(),
580
		run,
581
	)
582
}
583

            
584
/// Benchmarks the verification speed of sr25519 signatures.
585
///
586
/// Returns the throughput in B/s by convention.
587
/// The values are rather small (0.4-0.8) so it is advised to convert them into KB/s.
588
pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> Throughput {
589
	const INPUT_SIZE: usize = 32;
590
	const ITERATION_SIZE: usize = 2048;
591
	let pair = sr25519::Pair::from_string("//Alice", None).unwrap();
592

            
593
	let mut rng = rng();
594
	let mut msgs = Vec::new();
595
	let mut sigs = Vec::new();
596

            
597
	for _ in 0..ITERATION_SIZE {
598
		let mut msg = vec![0u8; INPUT_SIZE];
599
		rng.fill_bytes(&mut msg[..]);
600

            
601
		sigs.push(pair.sign(&msg));
602
		msgs.push(msg);
603
	}
604

            
605
	let run = || -> Result<(), String> {
606
		for (sig, msg) in sigs.iter().zip(msgs.iter()) {
607
			let mut ok = sr25519_verify(&sig, &msg[..], &pair.public());
608
			clobber_value(&mut ok);
609
		}
610
		Ok(())
611
	};
612
	benchmark(
613
		"sr25519 verification score",
614
		INPUT_SIZE * ITERATION_SIZE,
615
		limit.max_iterations(),
616
		limit.max_duration(),
617
		run,
618
	)
619
	.expect("sr25519 verification cannot fail; qed")
620
}
621

            
622
/// Benchmarks the hardware and returns the results of those benchmarks.
623
///
624
/// Optionally accepts a path to a `scratch_directory` to use to benchmark the
625
/// disk. Also accepts the `requirements` for the hardware benchmark and a
626
/// boolean to specify if the node is an authority.
627
pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench {
628
	#[allow(unused_mut)]
629
	let mut hwbench = HwBench {
630
		cpu_hashrate_score: benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT),
631
		memory_memcpy_score: benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT),
632
		disk_sequential_write_score: None,
633
		disk_random_write_score: None,
634
	};
635

            
636
	if let Some(scratch_directory) = scratch_directory {
637
		hwbench.disk_sequential_write_score =
638
			match benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory)
639
			{
640
				Ok(score) => Some(score),
641
				Err(error) => {
642
					log::warn!("Failed to run the sequential write disk benchmark: {}", error);
643
					None
644
				},
645
			};
646

            
647
		hwbench.disk_random_write_score =
648
			match benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) {
649
				Ok(score) => Some(score),
650
				Err(error) => {
651
					log::warn!("Failed to run the random write disk benchmark: {}", error);
652
					None
653
				},
654
			};
655
	}
656

            
657
	hwbench
658
}
659

            
660
impl Requirements {
661
	/// Whether the hardware requirements are met by the provided benchmark results.
662
	pub fn check_hardware(&self, hwbench: &HwBench) -> Result<(), CheckFailures> {
663
		let mut failures = Vec::new();
664
		for requirement in self.0.iter() {
665
			match requirement.metric {
666
				Metric::Blake2256 =>
667
					if requirement.minimum > hwbench.cpu_hashrate_score {
668
						failures.push(CheckFailure {
669
							metric: requirement.metric,
670
							expected: requirement.minimum,
671
							found: hwbench.cpu_hashrate_score,
672
						});
673
					},
674
				Metric::MemCopy =>
675
					if requirement.minimum > hwbench.memory_memcpy_score {
676
						failures.push(CheckFailure {
677
							metric: requirement.metric,
678
							expected: requirement.minimum,
679
							found: hwbench.memory_memcpy_score,
680
						});
681
					},
682
				Metric::DiskSeqWrite =>
683
					if let Some(score) = hwbench.disk_sequential_write_score {
684
						if requirement.minimum > score {
685
							failures.push(CheckFailure {
686
								metric: requirement.metric,
687
								expected: requirement.minimum,
688
								found: score,
689
							});
690
						}
691
					},
692
				Metric::DiskRndWrite =>
693
					if let Some(score) = hwbench.disk_random_write_score {
694
						if requirement.minimum > score {
695
							failures.push(CheckFailure {
696
								metric: requirement.metric,
697
								expected: requirement.minimum,
698
								found: score,
699
							});
700
						}
701
					},
702
				Metric::Sr25519Verify => {},
703
			}
704
		}
705
		if failures.is_empty() {
706
			Ok(())
707
		} else {
708
			Err(failures.into())
709
		}
710
	}
711
}
712

            
713
#[cfg(test)]
714
mod tests {
715
	use super::*;
716
	use sp_runtime::assert_eq_error_rate_float;
717

            
718
	#[cfg(target_os = "linux")]
719
	#[test]
720
	fn test_gather_sysinfo_linux() {
721
		let sysinfo = gather_sysinfo();
722
		assert!(sysinfo.cpu.unwrap().len() > 0);
723
		assert!(sysinfo.core_count.unwrap() > 0);
724
		assert!(sysinfo.memory.unwrap() > 0);
725
		assert_ne!(sysinfo.is_virtual_machine, None);
726
		assert_ne!(sysinfo.linux_kernel, None);
727
		assert_ne!(sysinfo.linux_distro, None);
728
	}
729

            
730
	#[test]
731
	fn test_benchmark_cpu() {
732
		assert!(benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) > Throughput::from_mibs(0.0));
733
	}
734

            
735
	#[test]
736
	fn test_benchmark_memory() {
737
		assert!(benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) > Throughput::from_mibs(0.0));
738
	}
739

            
740
	#[test]
741
	fn test_benchmark_disk_sequential_writes() {
742
		assert!(
743
			benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() >
744
				Throughput::from_mibs(0.0)
745
		);
746
	}
747

            
748
	#[test]
749
	fn test_benchmark_disk_random_writes() {
750
		assert!(
751
			benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() >
752
				Throughput::from_mibs(0.0)
753
		);
754
	}
755

            
756
	#[test]
757
	fn test_benchmark_sr25519_verify() {
758
		assert!(
759
			benchmark_sr25519_verify(ExecutionLimit::MaxIterations(1)) > Throughput::from_mibs(0.0)
760
		);
761
	}
762

            
763
	/// Test the [`Throughput`].
764
	#[test]
765
	fn throughput_works() {
766
		/// Float precision.
767
		const EPS: f64 = 0.1;
768
		let gib = Throughput::from_gibs(14.324);
769

            
770
		assert_eq_error_rate_float!(14.324, gib.as_gibs(), EPS);
771
		assert_eq_error_rate_float!(14667.776, gib.as_mibs(), EPS);
772
		assert_eq_error_rate_float!(14667.776 * 1024.0, gib.as_kibs(), EPS);
773
		assert_eq!("14.32 GiBs", gib.to_string());
774

            
775
		let mib = Throughput::from_mibs(1029.0);
776
		assert_eq!("1.00 GiBs", mib.to_string());
777
	}
778

            
779
	/// Test the [`HwBench`] serialization.
780
	#[test]
781
	fn hwbench_serialize_works() {
782
		let hwbench = HwBench {
783
			cpu_hashrate_score: Throughput::from_gibs(1.32),
784
			memory_memcpy_score: Throughput::from_kibs(9342.432),
785
			disk_sequential_write_score: Some(Throughput::from_kibs(4332.12)),
786
			disk_random_write_score: None,
787
		};
788

            
789
		let serialized = serde_json::to_string(&hwbench).unwrap();
790
		// Throughput from all of the benchmarks should be converted to MiBs.
791
		assert_eq!(serialized, "{\"cpu_hashrate_score\":1351,\"memory_memcpy_score\":9,\"disk_sequential_write_score\":4}");
792
	}
793
}