diff --git a/Cargo.lock b/Cargo.lock index bfe0d95443..b7ab884a4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1869,6 +1869,8 @@ version = "0.0.0" dependencies = [ "num-traits", "rustc-hash", + "serde", + "thiserror 1.0.63", ] [[package]] diff --git a/source/pip/qsharp/magnets/geometry/hypergraph.py b/source/pip/qsharp/magnets/geometry/hypergraph.py index fc5cd38447..dd55ebf408 100644 --- a/source/pip/qsharp/magnets/geometry/hypergraph.py +++ b/source/pip/qsharp/magnets/geometry/hypergraph.py @@ -28,7 +28,7 @@ class Hyperedge: vertices: Sorted list of vertex indices connected by this hyperedge. Example: - + .. code-block:: python >>> edge = Hyperedge([2, 0, 1]) >>> edge.vertices @@ -60,7 +60,7 @@ class Hypergraph: _edge_list: Set of hyperedges for efficient membership testing. Example: - + .. code-block:: python >>> edges = [Hyperedge([0, 1]), Hyperedge([1, 2]), Hyperedge([0, 2])] >>> graph = Hypergraph(edges) @@ -113,7 +113,7 @@ def edges(self, part: int = 0) -> Iterator[Hyperedge]: return iter(self._edge_list) def __str__(self) -> str: - return f"Hypergraph with {self.nvertices()} vertices and {self.nedges()} edges." + return f"Hypergraph with {self.nvertices} vertices and {self.nedges} edges." def __repr__(self) -> str: return f"Hypergraph({list(self._edges)})" diff --git a/source/pip/tests/magnets/test_hypergraph.py b/source/pip/tests/magnets/test_hypergraph.py index 79f071f47b..5a050993c9 100755 --- a/source/pip/tests/magnets/test_hypergraph.py +++ b/source/pip/tests/magnets/test_hypergraph.py @@ -55,36 +55,36 @@ def test_hypergraph_init_basic(): """Test basic Hypergraph initialization.""" edges = [Hyperedge([0, 1]), Hyperedge([1, 2])] graph = Hypergraph(edges) - assert graph.nedges() == 2 - assert graph.nvertices() == 3 + assert graph.nedges == 2 + assert graph.nvertices == 3 def test_hypergraph_empty_graph(): """Test hypergraph with no edges.""" graph = Hypergraph([]) - assert graph.nedges() == 0 - assert graph.nvertices() == 0 + assert graph.nedges == 0 + assert graph.nvertices == 0 def test_hypergraph_nedges(): """Test edge count.""" edges = [Hyperedge([0, 1]), Hyperedge([1, 2]), Hyperedge([2, 3])] graph = Hypergraph(edges) - assert graph.nedges() == 3 + assert graph.nedges == 3 def test_hypergraph_nvertices(): """Test vertex count with unique vertices.""" edges = [Hyperedge([0, 1]), Hyperedge([2, 3])] graph = Hypergraph(edges) - assert graph.nvertices() == 4 + assert graph.nvertices == 4 def test_hypergraph_nvertices_with_shared_vertices(): """Test vertex count when edges share vertices.""" edges = [Hyperedge([0, 1]), Hyperedge([1, 2]), Hyperedge([0, 2])] graph = Hypergraph(edges) - assert graph.nvertices() == 3 + assert graph.nvertices == 3 def test_hypergraph_vertices_iterator(): @@ -135,8 +135,8 @@ def test_hypergraph_single_vertex_edges(): """Test hypergraph with self-loop edges.""" edges = [Hyperedge([0]), Hyperedge([1]), Hyperedge([2])] graph = Hypergraph(edges) - assert graph.nedges() == 3 - assert graph.nvertices() == 3 + assert graph.nedges == 3 + assert graph.nvertices == 3 def test_hypergraph_mixed_edge_sizes(): @@ -147,14 +147,14 @@ def test_hypergraph_mixed_edge_sizes(): Hyperedge([3, 4, 5]), # 3 vertices (triple) ] graph = Hypergraph(edges) - assert graph.nedges() == 3 - assert graph.nvertices() == 6 + assert graph.nedges == 3 + assert graph.nvertices == 6 def test_hypergraph_non_contiguous_vertices(): """Test hypergraph with non-contiguous vertex indices.""" edges = [Hyperedge([0, 10]), Hyperedge([5, 20])] graph = Hypergraph(edges) - assert graph.nvertices() == 4 + assert graph.nvertices == 4 vertices = list(graph.vertices()) assert vertices == [0, 5, 10, 20] diff --git a/source/qre/Cargo.toml b/source/qre/Cargo.toml index e7e7c80783..88148dca7c 100644 --- a/source/qre/Cargo.toml +++ b/source/qre/Cargo.toml @@ -9,8 +9,11 @@ edition.workspace = true license.workspace = true [dependencies] -rustc-hash = { workspace = true } num-traits = { workspace = true } +rustc-hash = { workspace = true } +serde = { workspace = true } +thiserror = { workspace = true } + [dev-dependencies] diff --git a/source/qre/src/isa.rs b/source/qre/src/isa.rs index c608b91084..310f375c56 100644 --- a/source/qre/src/isa.rs +++ b/source/qre/src/isa.rs @@ -8,6 +8,7 @@ use std::{ use num_traits::FromPrimitive; use rustc_hash::FxHashMap; +use serde::{Deserialize, Serialize}; #[cfg(test)] mod tests; @@ -158,7 +159,7 @@ impl FromIterator for ISARequirements { } } -#[derive(Clone)] +#[derive(Clone, Serialize, Deserialize)] pub struct Instruction { id: u64, encoding: Encoding, @@ -328,13 +329,14 @@ impl InstructionConstraint { } } -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum Encoding { + #[default] Physical, Logical, } -#[derive(Clone)] +#[derive(Clone, Serialize, Deserialize)] pub enum Metrics { FixedArity { arity: u64, @@ -351,7 +353,7 @@ pub enum Metrics { }, } -#[derive(Clone)] +#[derive(Clone, Serialize, Deserialize)] pub enum VariableArityFunction { Constant { value: T }, Linear { slope: T }, diff --git a/source/qre/src/lib.rs b/source/qre/src/lib.rs index 00d70a2a53..4baa1f9e13 100644 --- a/source/qre/src/lib.rs +++ b/source/qre/src/lib.rs @@ -1,8 +1,51 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use thiserror::Error; + mod isa; +mod pareto; +pub use pareto::{ + ParetoFrontier as ParetoFrontier2D, ParetoFrontier3D, ParetoItem2D, ParetoItem3D, +}; +mod result; +pub use result::{EstimationCollection, EstimationResult, FactoryResult}; +mod trace; pub use isa::{ ConstraintBound, Encoding, ISA, ISARequirements, Instruction, InstructionConstraint, VariableArityFunction, }; +pub use trace::instruction_ids; +pub use trace::{Block, LatticeSurgery, PSSPC, Property, Trace, TraceTransform, estimate_parallel}; + +/// A resourc estimation error. +#[derive(Clone, Debug, Error, PartialEq)] +pub enum Error { + /// The resource estimation exceeded the maximum allowed error. + #[error("resource estimation exceeded the maximum allowed error: {actual_error} > {max_error}")] + MaximumErrorExceeded { actual_error: f64, max_error: f64 }, + /// Missing instruction in the ISA. + #[error("requested instruction {0} not present in ISA")] + InstructionNotFound(u64), + /// Cannot extract space from instruction. + #[error("cannot extract space from instruction {0} for fixed arity")] + CannotExtractSpace(u64), + /// Cannot extract time from instruction. + #[error("cannot extract time from instruction {0} for fixed arity")] + CannotExtractTime(u64), + /// Cannot extract error rate from instruction. + #[error("cannot extract error rate from instruction {0} for fixed arity")] + CannotExtractErrorRate(u64), + /// Factory time exceeds algorithm runtime + #[error( + "factory instruction {id} time {factory_time} exceeds algorithm runtime {algorithm_runtime}" + )] + FactoryTimeExceedsAlgorithmRuntime { + id: u64, + factory_time: u64, + algorithm_runtime: u64, + }, + /// Unsupported instruction in trace transformation + #[error("unsupported instruction {id} in trace transformation '{name}'")] + UnsupportedInstruction { id: u64, name: &'static str }, +} diff --git a/source/qre/src/pareto.rs b/source/qre/src/pareto.rs new file mode 100644 index 0000000000..96f842a0a4 --- /dev/null +++ b/source/qre/src/pareto.rs @@ -0,0 +1,259 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +mod tests; + +pub trait ParetoItem2D { + type Objective1: PartialOrd + Copy; + type Objective2: PartialOrd + Copy; + + fn objective1(&self) -> Self::Objective1; + fn objective2(&self) -> Self::Objective2; +} + +pub trait ParetoItem3D { + type Objective1: PartialOrd + Copy; + type Objective2: PartialOrd + Copy; + type Objective3: PartialOrd + Copy; + + fn objective1(&self) -> Self::Objective1; + fn objective2(&self) -> Self::Objective2; + fn objective3(&self) -> Self::Objective3; +} + +/// A Pareto frontier for 2-dimensional objectives. +/// +/// The implementation maintains the frontier sorted by the first objective. +/// This allows for efficient updates based on the geometric property that +/// a point is dominated if and only if it is dominated by its immediate +/// predecessor in the sorted list (when sorted by the first objective). +/// +/// This approach is related to the algorithms described in: +/// H. T. Kung, F. Luccio, and F. P. Preparata, "On Finding the Maxima of a Set of Vectors," +/// Journal of the ACM, vol. 22, no. 4, pp. 469-476, 1975. +#[derive(Default, Debug, Clone)] +pub struct ParetoFrontier(pub Vec); + +impl ParetoFrontier { + #[must_use] + pub fn new() -> Self { + Self(Vec::new()) + } + + pub fn insert(&mut self, p: I) { + // If any objective is incomparable (e.g. NaN), we silently ignore the item + // to maintain the frontier's sorting invariant. + if p.objective1().partial_cmp(&p.objective1()).is_none() + || p.objective2().partial_cmp(&p.objective2()).is_none() + { + return; + } + + let frontier = &mut self.0; + let search = frontier.binary_search_by(|q| { + q.objective1() + .partial_cmp(&p.objective1()) + .expect("objectives must be comparable") + }); + + let pos = match search { + Ok(i) => { + if frontier[i].objective2() <= p.objective2() { + return; + } + i + } + Err(i) => { + if i > 0 { + let left = &frontier[i - 1]; + if left.objective2() <= p.objective2() { + return; + } + } + i + } + }; + let i = pos; + while i < frontier.len() && frontier[i].objective2() >= p.objective2() { + frontier.remove(i); + } + frontier.insert(pos, p); + } + + pub fn iter(&self) -> std::slice::Iter<'_, I> { + self.0.iter() + } + + #[must_use] + pub fn len(&self) -> usize { + self.0.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Extend for ParetoFrontier { + fn extend>(&mut self, iter: T) { + for p in iter { + self.insert(p); + } + } +} + +impl FromIterator for ParetoFrontier { + fn from_iter>(iter: T) -> Self { + let mut frontier = Self::new(); + frontier.extend(iter); + frontier + } +} + +impl IntoIterator for ParetoFrontier { + type Item = I; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, I: ParetoItem2D> IntoIterator for &'a ParetoFrontier { + type Item = &'a I; + type IntoIter = std::slice::Iter<'a, I>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +/// A Pareto frontier for 3-dimensional objectives. +/// +/// The implementation maintains the frontier sorted lexicographically. +/// Unlike the 2D case where dominance checks are O(1) given the sorted order, +/// the 3D case requires checking the prefix or suffix to establish dominance, +/// though maintaining sorted order significantly reduces the search space. +/// +/// The theoretical O(N log N) bound for constructing the 3D frontier is established in: +/// H. T. Kung, F. Luccio, and F. P. Preparata, "On Finding the Maxima of a Set of Vectors," +/// Journal of the ACM, vol. 22, no. 4, pp. 469-476, 1975. +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ParetoFrontier3D(pub Vec); + +impl ParetoFrontier3D { + #[must_use] + pub fn new() -> Self { + Self(Vec::new()) + } + + pub fn insert(&mut self, p: I) { + // If any objective is incomparable (e.g. NaN), we silently ignore the item. + if p.objective1().partial_cmp(&p.objective1()).is_none() + || p.objective2().partial_cmp(&p.objective2()).is_none() + || p.objective3().partial_cmp(&p.objective3()).is_none() + { + return; + } + + let frontier = &mut self.0; + + // Use lexicographical sort covering all objectives. + // This makes the binary search deterministic and ensures specific properties for prefix/suffix. + let Err(pos) = frontier.binary_search_by(|q| { + q.objective1() + .partial_cmp(&p.objective1()) + .expect("objectives must be comparable") + .then_with(|| { + q.objective2() + .partial_cmp(&p.objective2()) + .expect("objectives must be comparable") + }) + .then_with(|| { + q.objective3() + .partial_cmp(&p.objective3()) + .expect("objectives must be comparable") + }) + }) else { + return; + }; + + // 1. Check if dominated by any existing point in the prefix [0..pos]. + // Because the list is sorted lexicographically, any point `q` before `pos` + // satisfies `q.obj1 <= p.obj1` (often strictly less). + // Therefore, we only need to check if `q` is also better in obj2 and obj3. + for q in &frontier[..pos] { + if q.objective2() <= p.objective2() && q.objective3() <= p.objective3() { + return; + } + } + + // 2. Remove points dominated by the new point in the suffix [pos..]. + // Any point `q` at or after `pos` satisfies `p.obj1 <= q.obj1`. + // So `p` can only dominate `q` if `p` is better in obj2 and obj3. + let mut i = pos; + while i < frontier.len() { + let q = &frontier[i]; + if p.objective2() <= q.objective2() && p.objective3() <= q.objective3() { + frontier.remove(i); + } else { + i += 1; + } + } + + frontier.insert(pos, p); + } + + pub fn iter(&self) -> std::slice::Iter<'_, I> { + self.0.iter() + } + + #[must_use] + pub fn len(&self) -> usize { + self.0.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Extend for ParetoFrontier3D { + fn extend>(&mut self, iter: T) { + for p in iter { + self.insert(p); + } + } +} + +impl FromIterator for ParetoFrontier3D { + fn from_iter>(iter: T) -> Self { + let mut frontier = Self::new(); + frontier.extend(iter); + frontier + } +} + +impl IntoIterator for ParetoFrontier3D { + type Item = I; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, I: ParetoItem3D> IntoIterator for &'a ParetoFrontier3D { + type Item = &'a I; + type IntoIter = std::slice::Iter<'a, I>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} diff --git a/source/qre/src/pareto/tests.rs b/source/qre/src/pareto/tests.rs new file mode 100644 index 0000000000..eaf2539c33 --- /dev/null +++ b/source/qre/src/pareto/tests.rs @@ -0,0 +1,243 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::{ + EstimationCollection, EstimationResult, + pareto::{ParetoFrontier, ParetoFrontier3D, ParetoItem2D, ParetoItem3D}, +}; + +struct Point2D { + x: f64, + y: f64, +} + +impl ParetoItem2D for Point2D { + type Objective1 = f64; + type Objective2 = f64; + + fn objective1(&self) -> Self::Objective1 { + self.x + } + + fn objective2(&self) -> Self::Objective2 { + self.y + } +} + +#[test] +fn test_update_frontier() { + let mut frontier: ParetoFrontier = ParetoFrontier::new(); + let p1 = Point2D { x: 1.0, y: 5.0 }; + frontier.insert(p1); + assert_eq!(frontier.0.len(), 1); + let p2 = Point2D { x: 2.0, y: 4.0 }; + frontier.insert(p2); + assert_eq!(frontier.0.len(), 2); + let p3 = Point2D { x: 1.5, y: 6.0 }; + frontier.insert(p3); + assert_eq!(frontier.0.len(), 2); + let p4 = Point2D { x: 3.0, y: 3.0 }; + frontier.insert(p4); + assert_eq!(frontier.0.len(), 3); + let p5 = Point2D { x: 2.5, y: 2.0 }; + frontier.insert(p5); + assert_eq!(frontier.0.len(), 3); +} + +#[test] +fn test_iter_frontier() { + let mut frontier: ParetoFrontier = ParetoFrontier::new(); + frontier.insert(Point2D { x: 1.0, y: 5.0 }); + frontier.insert(Point2D { x: 2.0, y: 4.0 }); + + let mut iter = frontier.iter(); + let p = iter.next().expect("Has element"); + assert!((p.x - 1.0).abs() <= f64::EPSILON); + assert!((p.y - 5.0).abs() <= f64::EPSILON); + + let p = iter.next().expect("Has element"); + assert!((p.x - 2.0).abs() <= f64::EPSILON); + assert!((p.y - 4.0).abs() <= f64::EPSILON); + + assert!(iter.next().is_none()); + + // Test IntoIterator for &ParetoFrontier + for p in &frontier { + assert!(p.x > 0.0); + } +} + +#[derive(Clone, Copy, Debug)] +struct Point3D { + x: f64, + y: f64, + z: f64, +} + +impl ParetoItem3D for Point3D { + type Objective1 = f64; + type Objective2 = f64; + type Objective3 = f64; + + fn objective1(&self) -> Self::Objective1 { + self.x + } + + fn objective2(&self) -> Self::Objective2 { + self.y + } + + fn objective3(&self) -> Self::Objective3 { + self.z + } +} + +#[test] +fn test_update_frontier_3d() { + let mut frontier: ParetoFrontier3D = ParetoFrontier3D::new(); + + // p1: 1, 5, 5 + let p1 = Point3D { + x: 1.0, + y: 5.0, + z: 5.0, + }; + frontier.insert(p1); + assert_eq!(frontier.0.len(), 1); + + // p2: 2, 6, 6 (dominated by p1) + let p2 = Point3D { + x: 2.0, + y: 6.0, + z: 6.0, + }; + frontier.insert(p2); + assert_eq!(frontier.0.len(), 1); + + // p3: 0.5, 6, 6 (not dominated, x makes it unique) + let p3 = Point3D { + x: 0.5, + y: 6.0, + z: 6.0, + }; + frontier.insert(p3); + assert_eq!(frontier.0.len(), 2); + + // p4: 1, 4, 4 (dominates p1, should remove p1 and add p4) + // p1 (1,5,5). p4 (1,4,4). p4 <= p1? 1<=1, 4<=5, 4<=5. Yes. + // p3 (0.5,6,6). p4 (1,4,4). p4 <= p3? 1<=0.5 False. + // Result: p1 removed, p4 added. p3 remains. + let p4 = Point3D { + x: 1.0, + y: 4.0, + z: 4.0, + }; + frontier.insert(p4); + assert_eq!(frontier.0.len(), 2); + + // Verify content (generic check, not order specific) + let points: Vec<(f64, f64, f64)> = frontier.iter().map(|p| (p.x, p.y, p.z)).collect(); + + // Should contain p3 and p4 + assert!( + points + .iter() + .any(|p| (p.0 - 0.5).abs() < 1e-9 && (p.1 - 6.0).abs() < 1e-9) + ); + assert!( + points + .iter() + .any(|p| (p.0 - 1.0).abs() < 1e-9 && (p.1 - 4.0).abs() < 1e-9) + ); +} + +#[test] +fn test_estimation_results() { + let mut result_worst = EstimationResult::new(); + result_worst.add_qubits(994_570); + result_worst.add_runtime(346_196_523_750); + + let mut result_mid = EstimationResult::new(); + result_mid.add_qubits(994_570); + result_mid.add_runtime(346_191_476_400); + + let mut result_best = EstimationResult::new(); + result_best.add_qubits(994_570); + result_best.add_runtime(346_181_381_700); + + let results = [result_worst, result_mid, result_best]; + let permutations = [ + [0, 1, 2], + [0, 2, 1], + [1, 0, 2], + [1, 2, 0], + [2, 0, 1], + [2, 1, 0], + ]; + + for p in permutations { + let mut frontier = EstimationCollection::new(); + frontier.insert(results[p[0]].clone()); + frontier.insert(results[p[1]].clone()); + frontier.insert(results[p[2]].clone()); + assert_eq!(frontier.len(), 1, "Failed for permutation {p:?}"); + + // Verify the retained item is the best one (index 2) + let item = frontier.iter().next().expect("has item"); + assert_eq!( + item.runtime(), + 346_181_381_700, + "Wrong item retained for permutation {p:?}", + ); + } +} + +#[test] +fn test_estimation_results_3d_permutations() { + // Check that 3D frontier handles strictly dominating points correctly + // even when first dimension is equal. + + // p_worst: (10, 100, 1000) + let p_worst = Point3D { + x: 10.0, + y: 100.0, + z: 1000.0, + }; + // p_mid: (10, 90, 1000) -> Dominates p_worst + let p_mid = Point3D { + x: 10.0, + y: 90.0, + z: 1000.0, + }; + // p_best: (10, 80, 1000) -> Dominates p_mid and p_worst + let p_best = Point3D { + x: 10.0, + y: 80.0, + z: 1000.0, + }; + + let results = [p_worst, p_mid, p_best]; + + let permutations = [ + [0, 1, 2], + [0, 2, 1], + [1, 0, 2], + [1, 2, 0], + [2, 0, 1], + [2, 1, 0], + ]; + + for p in permutations { + let mut frontier = ParetoFrontier3D::new(); + frontier.insert(results[p[0]]); + frontier.insert(results[p[1]]); + frontier.insert(results[p[2]]); + assert_eq!(frontier.len(), 1, "Failed for 3D permutation {p:?}"); + + let item = frontier.iter().next().expect("has item"); + assert!( + (item.y - 80.0).abs() < f64::EPSILON, + "Wrong item retained for 3D permutation {p:?}", + ); + } +} diff --git a/source/qre/src/result.rs b/source/qre/src/result.rs new file mode 100644 index 0000000000..fec8bf2135 --- /dev/null +++ b/source/qre/src/result.rs @@ -0,0 +1,180 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use std::{ + fmt::Display, + ops::{Deref, DerefMut}, +}; + +use rustc_hash::FxHashMap; + +use crate::{ParetoFrontier2D, ParetoItem2D}; + +#[derive(Clone, Default)] +pub struct EstimationResult { + qubits: u64, + runtime: u64, + error: f64, + factories: FxHashMap, +} + +impl EstimationResult { + #[must_use] + pub fn new() -> Self { + Self::default() + } + + #[must_use] + pub fn qubits(&self) -> u64 { + self.qubits + } + + #[must_use] + pub fn runtime(&self) -> u64 { + self.runtime + } + + #[must_use] + pub fn error(&self) -> f64 { + self.error + } + + #[must_use] + pub fn factories(&self) -> &FxHashMap { + &self.factories + } + + pub fn set_qubits(&mut self, qubits: u64) { + self.qubits = qubits; + } + + pub fn set_runtime(&mut self, runtime: u64) { + self.runtime = runtime; + } + + pub fn set_error(&mut self, error: f64) { + self.error = error; + } + + /// Adds to the current qubit count and returns the new value. + pub fn add_qubits(&mut self, qubits: u64) -> u64 { + self.qubits += qubits; + self.qubits + } + + /// Adds to the current runtime and returns the new value. + pub fn add_runtime(&mut self, runtime: u64) -> u64 { + self.runtime += runtime; + self.runtime + } + + /// Adds to the current error and returns the new value. + pub fn add_error(&mut self, error: f64) -> f64 { + self.error += error; + self.error + } + + pub fn add_factory_result(&mut self, id: u64, result: FactoryResult) { + self.factories.insert(id, result); + } +} + +impl Display for EstimationResult { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Qubits: {}, Runtime: {}, Error: {}", + self.qubits, self.runtime, self.error + )?; + + if !self.factories.is_empty() { + for (id, factory) in &self.factories { + write!( + f, + ", {id}: {} runs x {} copies", + factory.runs(), + factory.copies() + )?; + } + } + + Ok(()) + } +} + +impl ParetoItem2D for EstimationResult { + type Objective1 = u64; // qubits + type Objective2 = u64; // runtime + + fn objective1(&self) -> Self::Objective1 { + self.qubits + } + + fn objective2(&self) -> Self::Objective2 { + self.runtime + } +} + +#[derive(Default)] +pub struct EstimationCollection(ParetoFrontier2D); + +impl EstimationCollection { + #[must_use] + pub fn new() -> Self { + Self::default() + } +} + +impl Deref for EstimationCollection { + type Target = ParetoFrontier2D; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for EstimationCollection { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[derive(Clone)] +pub struct FactoryResult { + copies: u64, + runs: u64, + states: u64, + error_rate: f64, +} + +impl FactoryResult { + #[must_use] + pub fn new(copies: u64, runs: u64, states: u64, error_rate: f64) -> Self { + Self { + copies, + runs, + states, + error_rate, + } + } + + #[must_use] + pub fn copies(&self) -> u64 { + self.copies + } + + #[must_use] + pub fn runs(&self) -> u64 { + self.runs + } + + #[must_use] + pub fn states(&self) -> u64 { + self.states + } + + #[must_use] + pub fn error_rate(&self) -> f64 { + self.error_rate + } +} diff --git a/source/qre/src/trace.rs b/source/qre/src/trace.rs new file mode 100644 index 0000000000..0193b3c9db --- /dev/null +++ b/source/qre/src/trace.rs @@ -0,0 +1,590 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use std::fmt::{Display, Formatter}; + +use rustc_hash::{FxHashMap, FxHashSet}; + +use crate::{Error, EstimationCollection, EstimationResult, FactoryResult, ISA, Instruction}; + +pub mod instruction_ids; +#[cfg(test)] +mod tests; + +mod transforms; +pub use transforms::{LatticeSurgery, PSSPC, TraceTransform}; + +#[derive(Clone, Default)] +pub struct Trace { + block: Block, + base_error: f64, + compute_qubits: u64, + memory_qubits: Option, + resource_states: Option>, + properties: FxHashMap, +} + +impl Trace { + #[must_use] + pub fn new(compute_qubits: u64) -> Self { + Self { + compute_qubits, + ..Default::default() + } + } + + #[must_use] + pub fn clone_empty(&self, compute_qubits: Option) -> Self { + Self { + block: Block::default(), + base_error: self.base_error, + compute_qubits: compute_qubits.unwrap_or(self.compute_qubits), + memory_qubits: self.memory_qubits, + resource_states: self.resource_states.clone(), + properties: self.properties.clone(), + } + } + + #[must_use] + pub fn compute_qubits(&self) -> u64 { + self.compute_qubits + } + + pub fn add_operation(&mut self, id: u64, qubits: Vec, params: Vec) { + self.block.add_operation(id, qubits, params); + } + + pub fn add_block(&mut self, repetitions: u64) -> &mut Block { + self.block.add_block(repetitions) + } + + #[must_use] + pub fn base_error(&self) -> f64 { + self.base_error + } + + pub fn increment_base_error(&mut self, amount: f64) { + self.base_error += amount; + } + + pub fn increment_resource_state(&mut self, resource_id: u64, amount: u64) { + if amount == 0 { + return; + } + let states = self.resource_states.get_or_insert_with(FxHashMap::default); + *states.entry(resource_id).or_default() += amount; + } + + #[must_use] + pub fn get_resource_states(&self) -> Option<&FxHashMap> { + self.resource_states.as_ref() + } + + #[must_use] + pub fn get_resource_state_count(&self, resource_id: u64) -> u64 { + if let Some(states) = &self.resource_states + && let Some(count) = states.get(&resource_id) + { + return *count; + } + 0 + } + + pub fn set_property(&mut self, key: String, value: Property) { + self.properties.insert(key, value); + } + + #[must_use] + pub fn get_property(&self, key: &str) -> Option<&Property> { + self.properties.get(key) + } + + #[must_use] + pub fn deep_iter(&self) -> TraceIterator<'_> { + TraceIterator::new(&self.block) + } + + #[must_use] + pub fn depth(&self) -> u64 { + self.block.depth() + } + + #[allow( + clippy::cast_precision_loss, + clippy::cast_possible_truncation, + clippy::cast_sign_loss + )] + pub fn estimate(&self, isa: &ISA, max_error: Option) -> Result { + let max_error = max_error.unwrap_or(1.0); + + if self.base_error > max_error { + return Err(Error::MaximumErrorExceeded { + actual_error: self.base_error, + max_error, + }); + } + + let mut result = EstimationResult::new(); + + // base error starts with the error already present in the trace + result.add_error(self.base_error); + + // Counts how many magic state factories are needed per resource state ID + let mut factories: FxHashMap = FxHashMap::default(); + + // This will track the number of physical qubits per logical qubit while + // processing all the instructions. Normally, we assume that the number + // is always the same. + let mut qubit_counts: Vec = vec![]; + + // ------------------------------------------------------------------ + // Add errors from resource states. Allow callable error rates. + // ------------------------------------------------------------------ + if let Some(resource_states) = &self.resource_states { + for (state_id, count) in resource_states { + let rate = get_error_rate_by_id(isa, *state_id)?; + let actual_error = result.add_error(rate * (*count as f64)); + if actual_error > max_error { + return Err(Error::MaximumErrorExceeded { + actual_error, + max_error, + }); + } + factories.insert(*state_id, *count); + } + } + + // ------------------------------------------------------------------ + // Gate error accumulation using recursion over block structure. + // Each block contributes repetitions * internal_gate_errors. + // Missing instructions raise an error. Callable rates use arity. + // ------------------------------------------------------------------ + for (gate, mult) in self.deep_iter() { + let instr = get_instruction(isa, gate.id)?; + + let arity = gate.qubits.len() as u64; + + let rate = instr.expect_error_rate(Some(arity)); + + let qubit_count = instr.expect_space(Some(arity)) as f64 / arity as f64; + + if let Err(i) = qubit_counts.binary_search_by(|qc| qc.total_cmp(&qubit_count)) { + qubit_counts.insert(i, qubit_count); + } + + let actual_error = result.add_error(rate * (mult as f64)); + if actual_error > max_error { + return Err(Error::MaximumErrorExceeded { + actual_error, + max_error, + }); + } + } + + let total_compute_qubits = (self.compute_qubits() as f64 + * qubit_counts.last().copied().unwrap_or(1.0)) + .ceil() as u64; + result.add_qubits(total_compute_qubits); + + result.add_runtime( + self.block + .depth_and_used(Some(&|op: &Gate| { + let instr = get_instruction(isa, op.id)?; + Ok(instr.expect_time(Some(op.qubits.len() as u64))) + }))? + .0, + ); + + // ------------------------------------------------------------------ + // Factory overhead estimation. Each factory produces states at + // a certain rate, so we need enough copies to meet the demand. + // ------------------------------------------------------------------ + for (factory, count) in &factories { + let instr = get_instruction(isa, *factory)?; + let factory_time = get_time(instr)?; + let factory_space = get_space(instr)?; + let factory_error_rate = get_error_rate(instr)?; + let runs = result.runtime() / factory_time; + + if runs == 0 { + return Err(Error::FactoryTimeExceedsAlgorithmRuntime { + id: *factory, + factory_time, + algorithm_runtime: result.runtime(), + }); + } + + let copies = count.div_ceil(runs); + + result.add_qubits(copies * factory_space); + result.add_factory_result( + *factory, + FactoryResult::new(copies, runs, *count, factory_error_rate), + ); + } + + Ok(result) + } +} + +impl Display for Trace { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!(f, "@compute_qubits({})", self.compute_qubits())?; + + if let Some(memory_qubits) = self.memory_qubits { + writeln!(f, "@memory_qubits({memory_qubits})")?; + } + if self.base_error > 0.0 { + writeln!(f, "@base_error({})", self.base_error)?; + } + if let Some(resource_states) = &self.resource_states { + for (res_id, amount) in resource_states { + writeln!(f, "@resource_state({res_id}, {amount})")?; + } + } + write!(f, "{}", self.block) + } +} + +#[derive(Clone, Debug)] +pub enum Operation { + GateOperation(Gate), + BlockOperation(Block), +} + +#[derive(Clone, Debug)] +pub struct Gate { + id: u64, + qubits: Vec, + params: Vec, +} + +#[derive(Clone, Debug)] +pub struct Block { + operations: Vec, + repetitions: u64, +} + +impl Default for Block { + fn default() -> Self { + Self { + operations: Vec::new(), + repetitions: 1, + } + } +} + +impl Block { + pub fn add_operation(&mut self, id: u64, qubits: Vec, params: Vec) { + self.operations + .push(Operation::gate_operation(id, qubits, params)); + } + + pub fn add_block(&mut self, repetitions: u64) -> &mut Block { + self.operations + .push(Operation::block_operation(repetitions)); + + match self.operations.last_mut() { + Some(Operation::BlockOperation(b)) => b, + _ => unreachable!("Last operation must be a block operation"), + } + } + + pub fn write(&self, f: &mut Formatter<'_>, indent: usize) -> std::fmt::Result { + let indent_str = " ".repeat(indent); + if self.repetitions == 1 { + writeln!(f, "{indent_str}{{")?; + } else { + writeln!(f, "{indent_str}repeat {} {{", self.repetitions)?; + } + + for op in &self.operations { + match op { + Operation::GateOperation(Gate { id, qubits, params }) => { + writeln!(f, "{indent_str} {id}({params:?})({qubits:?})")?; + } + Operation::BlockOperation(b) => { + b.write(f, indent + 2)?; + } + } + } + writeln!(f, "{indent_str}}}") + } + + fn depth_and_used Result>( + &self, + duration_fn: Option<&FnDuration>, + ) -> Result<(u64, FxHashSet), Error> { + let mut qubit_depths: FxHashMap = FxHashMap::default(); + let mut all_used = FxHashSet::default(); + + for op in &self.operations { + match op { + Operation::GateOperation(gate) => { + let start_time = gate + .qubits + .iter() + .filter_map(|q| qubit_depths.get(q)) + .max() + .copied() + .unwrap_or(0); + + let duration = match duration_fn { + Some(f) => f(gate)?, + None => 1, + }; + + let end_time = start_time + duration; + for q in &gate.qubits { + qubit_depths.insert(*q, end_time); + all_used.insert(*q); + } + } + Operation::BlockOperation(block) => { + let (duration, used) = block.depth_and_used(duration_fn)?; + if used.is_empty() { + continue; + } + + let start_time = used + .iter() + .filter_map(|q| qubit_depths.get(q)) + .max() + .copied() + .unwrap_or(0); + + let end_time = start_time + duration; + for q in &used { + qubit_depths.insert(*q, end_time); + } + all_used.extend(used); + } + } + } + + let max_depth = qubit_depths.values().max().copied().unwrap_or(0); + Ok((max_depth * self.repetitions, all_used)) + } + + #[must_use] + pub fn depth(&self) -> u64 { + self.depth_and_used:: Result>(None) + .expect("Duration function is None") + .0 + } +} + +impl Display for Block { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.write(f, 0) + } +} + +impl Operation { + fn gate_operation(id: u64, qubits: Vec, params: Vec) -> Self { + Operation::GateOperation(Gate { id, qubits, params }) + } + + fn block_operation(repetitions: u64) -> Self { + Operation::BlockOperation(Block { + operations: Vec::new(), + repetitions, + }) + } +} + +pub struct TraceIterator<'a> { + stack: Vec<(std::slice::Iter<'a, Operation>, u64)>, +} + +impl<'a> TraceIterator<'a> { + fn new(block: &'a Block) -> Self { + Self { + stack: vec![(block.operations.iter(), 1)], + } + } +} + +impl<'a> Iterator for TraceIterator<'a> { + type Item = (&'a Gate, u64); + + fn next(&mut self) -> Option { + loop { + let (iter, multiplier) = self.stack.last_mut()?; + match iter.next() { + Some(op) => match op { + Operation::GateOperation(g) => return Some((g, *multiplier)), + Operation::BlockOperation(block) => { + let new_multiplier = *multiplier * block.repetitions; + self.stack.push((block.operations.iter(), new_multiplier)); + } + }, + None => { + self.stack.pop(); + } + } + } + } +} + +#[derive(Clone)] +pub enum Property { + Bool(bool), + Int(i64), + Float(f64), + Str(String), +} + +impl Property { + #[must_use] + pub fn new_bool(b: bool) -> Self { + Property::Bool(b) + } + + #[must_use] + pub fn new_int(i: i64) -> Self { + Property::Int(i) + } + + #[must_use] + pub fn new_float(f: f64) -> Self { + Property::Float(f) + } + + #[must_use] + pub fn new_str(s: String) -> Self { + Property::Str(s) + } + + #[must_use] + pub fn as_bool(&self) -> Option { + match self { + Property::Bool(b) => Some(*b), + _ => None, + } + } + + #[must_use] + pub fn as_int(&self) -> Option { + match self { + Property::Int(i) => Some(*i), + _ => None, + } + } + + #[must_use] + pub fn as_float(&self) -> Option { + match self { + Property::Float(f) => Some(*f), + _ => None, + } + } + + #[must_use] + pub fn as_str(&self) -> Option<&str> { + match self { + Property::Str(s) => Some(s), + _ => None, + } + } + + #[must_use] + pub fn is_bool(&self) -> bool { + matches!(self, Property::Bool(_)) + } + + #[must_use] + pub fn is_int(&self) -> bool { + matches!(self, Property::Int(_)) + } + + #[must_use] + pub fn is_float(&self) -> bool { + matches!(self, Property::Float(_)) + } + + #[must_use] + pub fn is_str(&self) -> bool { + matches!(self, Property::Str(_)) + } +} + +impl Display for Property { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Property::Bool(b) => write!(f, "{b}"), + Property::Int(i) => write!(f, "{i}"), + Property::Float(fl) => write!(f, "{fl}"), + Property::Str(s) => write!(f, "{s}"), + } + } +} + +// Some helper functions to extract instructions and their metrics together with +// error handling + +fn get_instruction(isa: &ISA, id: u64) -> Result<&Instruction, Error> { + isa.get(&id).ok_or(Error::InstructionNotFound(id)) +} + +fn get_space(instruction: &Instruction) -> Result { + instruction + .space(None) + .ok_or(Error::CannotExtractSpace(instruction.id())) +} + +fn get_time(instruction: &Instruction) -> Result { + instruction + .time(None) + .ok_or(Error::CannotExtractTime(instruction.id())) +} + +fn get_error_rate(instruction: &Instruction) -> Result { + instruction + .error_rate(None) + .ok_or(Error::CannotExtractErrorRate(instruction.id())) +} + +fn get_error_rate_by_id(isa: &ISA, id: u64) -> Result { + let instr = get_instruction(isa, id)?; + instr + .error_rate(None) + .ok_or(Error::CannotExtractErrorRate(id)) +} + +fn estimate_chunks<'a>(traces: &[&'a Trace], isas: &[&'a ISA]) -> Vec { + let mut local_collection = Vec::new(); + for trace in traces { + for isa in isas { + if let Ok(estimation) = trace.estimate(isa, None) { + local_collection.push(estimation); + } + } + } + local_collection +} + +#[must_use] +pub fn estimate_parallel<'a>(traces: &[&'a Trace], isas: &[&'a ISA]) -> EstimationCollection { + let mut collection = EstimationCollection::new(); + std::thread::scope(|scope| { + let num_threads = std::thread::available_parallelism() + .map(std::num::NonZero::get) + .unwrap_or(1); + let chunk_size = traces.len().div_ceil(num_threads); + + let (tx, rx) = std::sync::mpsc::sync_channel(num_threads); + + for chunk in traces.chunks(chunk_size) { + let tx = tx.clone(); + scope.spawn(move || tx.send(estimate_chunks(chunk, isas))); + } + drop(tx); + + for local_collection in rx.iter().take(num_threads) { + collection.extend(local_collection.into_iter()); + } + }); + + collection +} diff --git a/source/qre/src/trace/instruction_ids.rs b/source/qre/src/trace/instruction_ids.rs new file mode 100644 index 0000000000..f8f78bc958 --- /dev/null +++ b/source/qre/src/trace/instruction_ids.rs @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// NOTE: Define new instruction ids here. Then: +// - add them to `add_instruction_ids` in qre.rs +// - add them to instruction_ids.pyi + +pub const PAULI_I: u64 = 0x0; +pub const PAULI_X: u64 = 0x1; +pub const PAULI_Y: u64 = 0x2; +pub const PAULI_Z: u64 = 0x3; +pub const H: u64 = 0x10; +pub const H_XZ: u64 = 0x10; +pub const H_XY: u64 = 0x11; +pub const H_YZ: u64 = 0x12; +pub const SQRT_X: u64 = 0x13; +pub const SQRT_X_DAG: u64 = 0x14; +pub const SQRT_Y: u64 = 0x15; +pub const SQRT_Y_DAG: u64 = 0x16; +pub const S: u64 = 0x17; +pub const SQRT_Z: u64 = 0x17; +pub const S_DAG: u64 = 0x18; +pub const SQRT_Z_DAG: u64 = 0x18; +pub const CNOT: u64 = 0x19; +pub const CX: u64 = 0x19; +pub const CY: u64 = 0x1A; +pub const CZ: u64 = 0x1B; +pub const SWAP: u64 = 0x1C; +pub const PREP_X: u64 = 0x30; +pub const PREP_Y: u64 = 0x31; +pub const PREP_Z: u64 = 0x32; +pub const ONE_QUBIT_CLIFFORD: u64 = 0x50; +pub const TWO_QUBIT_CLIFFORD: u64 = 0x51; +pub const N_QUBIT_CLIFFORD: u64 = 0x52; +pub const MEAS_X: u64 = 0x100; +pub const MEAS_Y: u64 = 0x101; +pub const MEAS_Z: u64 = 0x102; +pub const MEAS_RESET_X: u64 = 0x103; +pub const MEAS_RESET_Y: u64 = 0x104; +pub const MEAS_RESET_Z: u64 = 0x105; +pub const MEAS_XX: u64 = 0x106; +pub const MEAS_YY: u64 = 0x107; +pub const MEAS_ZZ: u64 = 0x108; +pub const MEAS_XZ: u64 = 0x109; +pub const MEAS_XY: u64 = 0x10A; +pub const MEAS_YZ: u64 = 0x10B; +pub const SQRT_SQRT_X: u64 = 0x400; +pub const SQRT_SQRT_X_DAG: u64 = 0x401; +pub const SQRT_SQRT_Y: u64 = 0x402; +pub const SQRT_SQRT_Y_DAG: u64 = 0x403; +pub const SQRT_SQRT_Z: u64 = 0x404; +pub const T: u64 = 0x404; +pub const SQRT_SQRT_Z_DAG: u64 = 0x405; +pub const T_DAG: u64 = 0x405; +pub const CCX: u64 = 0x406; +pub const CCY: u64 = 0x407; +pub const CCZ: u64 = 0x408; +pub const CSWAP: u64 = 0x409; +pub const AND: u64 = 0x40A; +pub const AND_DAG: u64 = 0x40B; +pub const RX: u64 = 0x40C; +pub const RY: u64 = 0x40D; +pub const RZ: u64 = 0x40E; +pub const CRX: u64 = 0x40F; +pub const CRY: u64 = 0x410; +pub const CRZ: u64 = 0x411; +pub const RXX: u64 = 0x412; +pub const RYY: u64 = 0x413; +pub const RZZ: u64 = 0x414; +pub const MULTI_PAULI_MEAS: u64 = 0x1000; +pub const LATTICE_SURGERY: u64 = 0x1100; +pub const READ_FROM_MEMORY: u64 = 0x1200; +pub const WRITE_TO_MEMORY: u64 = 0x1201; +pub const CYCLIC_SHIFT: u64 = 0x1300; +pub const GENERIC: u64 = 0xFFFF; + +#[must_use] +pub fn is_pauli_measurement(id: u64) -> bool { + matches!( + id, + MEAS_X + | MEAS_Y + | MEAS_Z + | MEAS_XX + | MEAS_YY + | MEAS_ZZ + | MEAS_XZ + | MEAS_XY + | MEAS_YZ + | MULTI_PAULI_MEAS + ) +} + +#[must_use] +pub fn is_t_like(id: u64) -> bool { + matches!( + id, + SQRT_SQRT_X + | SQRT_SQRT_X_DAG + | SQRT_SQRT_Y + | SQRT_SQRT_Y_DAG + | SQRT_SQRT_Z + | SQRT_SQRT_Z_DAG + ) +} + +#[must_use] +pub fn is_ccx_like(id: u64) -> bool { + matches!(id, CCX | CCY | CCZ | CSWAP | AND | AND_DAG) +} + +#[must_use] +pub fn is_rotation_like(id: u64) -> bool { + matches!(id, RX | RY | RZ | RXX | RYY | RZZ) +} + +#[must_use] +pub fn is_clifford(id: u64) -> bool { + matches!( + id, + PAULI_I + | PAULI_X + | PAULI_Y + | PAULI_Z + | H_XZ + | H_XY + | H_YZ + | SQRT_X + | SQRT_X_DAG + | SQRT_Y + | SQRT_Y_DAG + | SQRT_Z + | SQRT_Z_DAG + | CX + | CY + | CZ + | SWAP + | PREP_X + | PREP_Y + | PREP_Z + | ONE_QUBIT_CLIFFORD + | TWO_QUBIT_CLIFFORD + | N_QUBIT_CLIFFORD + ) +} diff --git a/source/qre/src/trace/tests.rs b/source/qre/src/trace/tests.rs new file mode 100644 index 0000000000..6509b30048 --- /dev/null +++ b/source/qre/src/trace/tests.rs @@ -0,0 +1,240 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#[test] +fn test_trace_iteration() { + use crate::trace::Trace; + + let mut trace = Trace::new(2); + trace.add_operation(1, vec![0], vec![]); + trace.add_operation(2, vec![1], vec![]); + + assert_eq!(trace.deep_iter().count(), 2); +} + +#[test] +fn test_nested_blocks() { + use crate::trace::Trace; + + let mut trace = Trace::new(3); + trace.add_operation(1, vec![0], vec![]); + let block = trace.add_block(2); + block.add_operation(2, vec![1], vec![]); + let block = block.add_block(3); + block.add_operation(3, vec![2], vec![]); + trace.add_operation(1, vec![0], vec![]); + + let repetitions = trace.deep_iter().map(|(_, rep)| rep).collect::>(); + assert_eq!(repetitions.len(), 4); + assert_eq!(repetitions, vec![1, 2, 6, 1]); +} + +#[test] +fn test_depth_simple() { + use crate::trace::Trace; + + let mut trace = Trace::new(2); + trace.add_operation(1, vec![0], vec![]); + trace.add_operation(2, vec![1], vec![]); + + // Operations are parallel + assert_eq!(trace.depth(), 1); + + trace.add_operation(3, vec![0], vec![]); + // Operation on qubit 0 is sequential to first one + assert_eq!(trace.depth(), 2); +} + +#[test] +fn test_depth_with_blocks() { + use crate::trace::Trace; + + let mut trace = Trace::new(2); + trace.add_operation(1, vec![0], vec![]); // Depth 1 on q0 + + let block = trace.add_block(2); + block.add_operation(2, vec![1], vec![]); // Depth 1 on q1 * 2 reps = 2 + + // Block acts as barrier *only on qubits it touches*. + // q1 is touched. q0 is not. + // q0 stays at depth 1. + // q1 ends at depth 2. + + trace.add_operation(3, vec![0], vec![]); + // Next op starts at depth 1 (after op 1). Ends at 2. + + assert_eq!(trace.depth(), 2); +} + +#[test] +fn test_depth_parallel_blocks() { + use crate::trace::Trace; + + let mut trace = Trace::new(4); + + let block1 = trace.add_block(1); + block1.add_operation(1, vec![0], vec![]); // q0: 1 + + let block2 = trace.add_block(1); + block2.add_operation(2, vec![1], vec![]); // q1: 1 + + // Blocks are parallel + assert_eq!(trace.depth(), 1); + + trace.add_operation(3, vec![0, 1], vec![]); + // Dependent on q0 (1) and q1 (1). Start at 1. End at 2. + + assert_eq!(trace.depth(), 2); +} + +#[test] +fn test_depth_entangled() { + use crate::trace::Trace; + + let mut trace = Trace::new(2); + trace.add_operation(1, vec![0], vec![]); // q0: 1 + trace.add_operation(2, vec![1], vec![]); // q1: 1 + + trace.add_operation(3, vec![0, 1], vec![]); // q0, q1 synced at 1 -> end at 2 + + assert_eq!(trace.depth(), 2); + + trace.add_operation(4, vec![0], vec![]); // q0: 3 + assert_eq!(trace.depth(), 3); +} + +#[test] +fn test_psspc_transform() { + use crate::trace::{PSSPC, Trace, TraceTransform, instruction_ids::*}; + + let mut trace = Trace::new(3); + + trace.add_operation(T, vec![0], vec![]); + trace.add_operation(CCX, vec![0, 1, 2], vec![]); + trace.add_operation(RZ, vec![0], vec![0.1]); + trace.add_operation(CX, vec![0, 1], vec![]); + trace.add_operation(RZ, vec![1], vec![0.2]); + trace.add_operation(MEAS_Z, vec![0], vec![]); + + // Configure PSSPC with 20 T states per rotation, include CCX magic states + let psspc = PSSPC::new(20, true); + + let transformed = psspc.transform(&trace).expect("Transformation failed"); + + assert_eq!(transformed.compute_qubits(), 12); + assert_eq!(transformed.depth(), 47); + + assert_eq!(transformed.get_resource_state_count(T), 41); + assert_eq!(transformed.get_resource_state_count(CCX), 1); + + assert!(transformed.base_error() > 0.0); + // Error is roughly 5e-9 for 20 Ts + assert!(transformed.base_error() < 1e-8); +} + +#[test] +fn test_lattice_surgery_transform() { + use crate::trace::{LatticeSurgery, Trace, TraceTransform, instruction_ids::*}; + + let mut trace = Trace::new(3); + + trace.add_operation(T, vec![0], vec![]); + trace.add_operation(CX, vec![1, 2], vec![]); + trace.add_operation(T, vec![0], vec![]); + + assert_eq!(trace.depth(), 2); + + let ls = LatticeSurgery::new(); + let transformed = ls.transform(&trace).expect("Transformation failed"); + + assert_eq!(transformed.compute_qubits(), 3); + assert_eq!(transformed.depth(), 2); + + // Check that we have a LATTICE_SURGERY operation + // TraceIterator visits the operation definition once, but with a multiplier. + let ls_ops: Vec<_> = transformed + .deep_iter() + .filter(|(gate, _)| gate.id == LATTICE_SURGERY) + .collect(); + + assert_eq!(ls_ops.len(), 1); + + let (gate, mult) = ls_ops[0]; + assert_eq!(gate.id, LATTICE_SURGERY); + assert_eq!(mult, 2); // Multiplier should carry the repetition count (depth) +} + +#[test] +fn test_estimate_simple() { + use crate::isa::{Encoding, ISA, Instruction}; + use crate::trace::{Trace, instruction_ids::*}; + + let mut trace = Trace::new(1); + trace.add_operation(T, vec![0], vec![]); + + // Create ISA + let mut isa = ISA::new(); + isa.add_instruction(Instruction::fixed_arity( + T, + Encoding::Logical, + 1, // arity + 100, // time + Some(50), // space + None, // length (defaults to arity) + 0.001, // error_rate + )); + + let result = trace.estimate(&isa, None).expect("Estimation failed"); + + assert!((result.error() - 0.001).abs() <= f64::EPSILON); + assert_eq!(result.runtime(), 100); + assert_eq!(result.qubits(), 50); +} + +#[test] +fn test_estimate_with_factory() { + use crate::isa::{Encoding, ISA, Instruction}; + use crate::trace::{Trace, instruction_ids::*}; + + let mut trace = Trace::new(1); + // Algorithm needs 1000 T states + trace.increment_resource_state(T, 1000); + + // Some compute runtime to allow factories to run + trace.add_operation(GENERIC, vec![0], vec![]); + + let mut isa = ISA::new(); + + // T factory instruction + // Produces 1 T state + isa.add_instruction(Instruction::fixed_arity( + T, + Encoding::Logical, + 1, // arity + 10, // time to produce 1 state + Some(50), // space for factory + None, + 0.0001, // error rate of produced state + )); + + isa.add_instruction(Instruction::fixed_arity( + GENERIC, + Encoding::Logical, + 1, + 1000, // runtime 1000 + Some(200), + None, + 0.0, + )); + + let result = trace.estimate(&isa, None).expect("Estimation failed"); + + assert_eq!(result.runtime(), 1000); + assert_eq!(result.qubits(), 700); + + // Check factory result + let factory_res = result.factories().get(&T).expect("Factory missing"); + assert_eq!(factory_res.copies(), 10); + assert_eq!(factory_res.runs(), 100); + assert_eq!(result.factories().len(), 1); +} diff --git a/source/qre/src/trace/transforms.rs b/source/qre/src/trace/transforms.rs new file mode 100644 index 0000000000..d232ba5b9f --- /dev/null +++ b/source/qre/src/trace/transforms.rs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +mod lattice_surgery; +mod psspc; + +pub use lattice_surgery::LatticeSurgery; +pub use psspc::PSSPC; + +use crate::{Error, Trace}; + +pub trait TraceTransform { + fn transform(&self, trace: &Trace) -> Result; +} diff --git a/source/qre/src/trace/transforms/lattice_surgery.rs b/source/qre/src/trace/transforms/lattice_surgery.rs new file mode 100644 index 0000000000..425606b99d --- /dev/null +++ b/source/qre/src/trace/transforms/lattice_surgery.rs @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::trace::TraceTransform; +use crate::{Error, Trace, instruction_ids}; + +#[derive(Default)] +pub struct LatticeSurgery; + +impl LatticeSurgery { + #[must_use] + pub fn new() -> Self { + Self + } +} + +impl TraceTransform for LatticeSurgery { + fn transform(&self, trace: &Trace) -> Result { + let mut transformed = trace.clone_empty(None); + + let block = transformed.add_block(trace.depth()); + block.add_operation( + instruction_ids::LATTICE_SURGERY, + (0..trace.compute_qubits()).collect(), + vec![], + ); + + Ok(transformed) + } +} diff --git a/source/qre/src/trace/transforms/psspc.rs b/source/qre/src/trace/transforms/psspc.rs new file mode 100644 index 0000000000..287e6c0aa1 --- /dev/null +++ b/source/qre/src/trace/transforms/psspc.rs @@ -0,0 +1,218 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::trace::{Gate, TraceTransform}; +use crate::{Error, Trace, instruction_ids}; + +/// Implements the Parellel Synthesis Sequential Pauli Computation (PSSPC) +/// layout algorithm described in Appendix D in +/// [arXiv:2211.07629](https://arxiv.org/pdf/2211.07629). This scheme combines +/// sequential Pauli-based computation (SPC) as described in +/// [arXiv:1808.02892](https://arxiv.org/pdf/1808.02892) and +/// [arXiv:2109.02746](https://arxiv.org/pdf/2109.02746) with an approach to +/// synthesize sets of diagonal non-Clifford unitaries in parallel as done in +/// [arXiv:2110.11493](https://arxiv.org/pdf/2110.11493). +/// +/// References: +/// - Michael E. Beverland, Prakash Murali, Matthias Troyer, Krysta M. Svore, +/// Torsten Hoefler, Vadym Kliuchnikov, Guang Hao Low, Mathias Soeken, Aarthi +/// Sundaram, Alexander Vaschillo: Assessing requirements to scale to +/// practical quantum advantage, +/// [arXiv:2211.07629](https://arxiv.org/pdf/2211.07629) +/// - Daniel Litinski: A Game of Surface Codes: Large-Scale Quantum Computing +/// with Lattice Surgery, [arXiv:1808.02892](https://arxiv.org/pdf/1808.02892) +/// - Christopher Chamberland, Earl T. Campbell: Universal quantum computing +/// with twist-free and temporally encoded lattice surgery, +/// [arXiv:2109.02746](https://arxiv.org/pdf/2109.02746) +/// - Michael Beverland, Vadym Kliuchnikov, Eddie Schoute: Surface code +/// compilation via edge-disjoint paths, +/// [arXiv:2110.11493](https://arxiv.org/pdf/2110.11493). +#[derive(Clone)] +pub struct PSSPC { + /// Number of multi-qubit Pauli measurements to inject a synthesized + /// rotation, defaults to 1, see [arXiv:2211.07629, (D3)] + num_measurements_per_r: u64, + /// Number of multi-qubit Pauli measurements to apply a Toffoli gate, + /// defaults to 3, see [arXiv:2211.07629, (D3)] + num_measurements_per_ccx: u64, + /// Number of Pauli measurements to write to memory, defaults to 2, see + /// [arXiv:2109.02746, Fig. 16a] + num_measurements_per_wtm: u64, + /// Number of Pauli measurements to read from memory, defaults to 1, see + /// [arXiv:2109.02746, Fig. 16b] + num_measurements_per_rfm: u64, + + /// Number of Ts per rotation synthesis + num_ts_per_rotation: u64, + /// Perform Toffoli gates using CCX magic states, if false, T gates are used + ccx_magic_states: bool, +} + +impl PSSPC { + #[must_use] + pub fn new(num_ts_per_rotation: u64, ccx_magic_states: bool) -> Self { + Self { + num_measurements_per_r: 1, + num_measurements_per_ccx: 3, + num_measurements_per_wtm: 2, + num_measurements_per_rfm: 1, + num_ts_per_rotation, + ccx_magic_states, + } + } +} + +impl PSSPC { + #[allow(clippy::cast_possible_truncation)] + fn psspc_counts(trace: &Trace) -> Result { + let mut counter = PSSPCCounts::default(); + + let mut max_rotation_depth = vec![0; trace.compute_qubits() as usize]; + + for (Gate { id, qubits, .. }, mult) in trace.deep_iter() { + if instruction_ids::is_pauli_measurement(*id) { + counter.measurements += mult; + } else if instruction_ids::is_t_like(*id) { + counter.t_like += mult; + } else if instruction_ids::is_ccx_like(*id) { + counter.ccx_like += mult; + } else if instruction_ids::is_rotation_like(*id) { + counter.rotation_like += mult; + + // Track rotation depth + let mut current_depth = 0; + for q in qubits { + if max_rotation_depth[*q as usize] > current_depth { + current_depth = max_rotation_depth[*q as usize]; + } + } + let new_depth = current_depth + mult; + for q in qubits { + max_rotation_depth[*q as usize] = new_depth; + } + if new_depth > counter.rotation_depth { + counter.rotation_depth = new_depth; + } + } else if *id == instruction_ids::READ_FROM_MEMORY { + counter.read_from_memory += mult; + } else if *id == instruction_ids::WRITE_TO_MEMORY { + counter.write_to_memory += mult; + } else if !instruction_ids::is_clifford(*id) { + // Unsupported non-Clifford gate + return Err(Error::UnsupportedInstruction { + id: *id, + name: "PSSPC", + }); + } else { + // For Clifford gates, synchronize depths across qubits + if !qubits.is_empty() { + let mut max_depth = 0; + for q in qubits { + if max_rotation_depth[*q as usize] > max_depth { + max_depth = max_rotation_depth[*q as usize]; + } + } + for q in qubits { + max_rotation_depth[*q as usize] = max_depth; + } + } + } + } + + Ok(counter) + } + + #[allow(clippy::cast_precision_loss)] + fn compute_only_trace(&self, trace: &Trace, counts: &PSSPCCounts) -> Trace { + let num_qubits = trace.compute_qubits(); + let logical_qubits = Self::logical_qubit_overhead(num_qubits); + + let mut transformed = trace.clone_empty(Some(logical_qubits)); + + let logical_depth = self.logical_depth_overhead(counts); + let (t_states, ccx_states) = self.num_magic_states(counts); + + transformed.increment_resource_state(instruction_ids::T, t_states); + transformed.increment_resource_state(instruction_ids::CCX, ccx_states); + + let block = transformed.add_block(logical_depth); + block.add_operation( + instruction_ids::MULTI_PAULI_MEAS, + (0..logical_qubits).collect(), + vec![], + ); + + // Add error due to rotation synthesis + transformed.increment_base_error(counts.rotation_like as f64 * self.synthesis_error()); + + transformed + } + + /// Calculates the number of logical qubits required for the PSSPC layout + /// according to Eq. (D1) in + /// [arXiv:2211.07629](https://arxiv.org/pdf/2211.07629) + #[allow( + clippy::cast_precision_loss, + clippy::cast_possible_truncation, + clippy::cast_sign_loss + )] + fn logical_qubit_overhead(algorithm_qubits: u64) -> u64 { + let qubit_padding = ((8 * algorithm_qubits) as f64).sqrt().ceil() as u64 + 1; + 2 * algorithm_qubits + qubit_padding + } + + /// Calculates the number of multi-qubit Pauli measurements executed in + /// sequence according to Eq. (D3) in + /// [arXiv:2211.07629](https://arxiv.org/pdf/2211.07629) + fn logical_depth_overhead(&self, counter: &PSSPCCounts) -> u64 { + (counter.measurements + counter.t_like + counter.rotation_like) + * self.num_measurements_per_r + + counter.ccx_like * self.num_measurements_per_ccx + + counter.read_from_memory * self.num_measurements_per_rfm + + counter.write_to_memory * self.num_measurements_per_wtm + + (self.num_ts_per_rotation * counter.rotation_depth) * self.num_measurements_per_r + } + + /// Calculates the number of T and CCX magic states that are consumed by + /// multi-qubit Pauli measurements executed by PSSPC according to Eq. (D4) + /// in [arXiv:2211.07629](https://arxiv.org/pdf/2211.07629) + /// + /// CCX magic states are only counted if the hyper parameter + /// `ccx_magic_states` is set to true. + fn num_magic_states(&self, counter: &PSSPCCounts) -> (u64, u64) { + let t_states = counter.t_like + self.num_ts_per_rotation * counter.rotation_like; + + if self.ccx_magic_states { + (t_states, counter.ccx_like) + } else { + (t_states + 4 * counter.ccx_like, 0) + } + } + + /// Calculates the synthesis error from the formula provided in Table 1 in + /// [arXiv:2203.10064](https://arxiv.org/pdf/2203.10064) for Clifford+T in + /// the mixed fallback approximation protocol. + #[allow(clippy::cast_precision_loss)] + fn synthesis_error(&self) -> f64 { + 2f64.powf((4.86 - self.num_ts_per_rotation as f64) / 0.53) + } +} + +impl TraceTransform for PSSPC { + fn transform(&self, trace: &Trace) -> Result { + let counts = Self::psspc_counts(trace)?; + + Ok(self.compute_only_trace(trace, &counts)) + } +} + +#[derive(Default)] +struct PSSPCCounts { + measurements: u64, + t_like: u64, + ccx_like: u64, + rotation_like: u64, + write_to_memory: u64, + read_from_memory: u64, + rotation_depth: u64, +}