@article {3460, title = {Complexity-constrained quantum thermodynamics}, year = {2024}, month = {3/7/2024}, abstract = {

Quantum complexity measures the difficulty of realizing a quantum process, such as preparing a state or implementing a unitary. We present an approach to quantifying the thermodynamic resources required to implement a process if the process\&$\#$39;s complexity is restricted. We focus on the prototypical task of information erasure, or Landauer erasure, wherein an n-qubit memory is reset to the all-zero state. We show that the minimum thermodynamic work required to reset an arbitrary state, via a complexity-constrained process, is quantified by the state\&$\#$39;s complexity entropy. The complexity entropy therefore quantifies a trade-off between the work cost and complexity cost of resetting a state. If the qubits have a nontrivial (but product) Hamiltonian, the optimal work cost is determined by the complexity relative entropy. The complexity entropy quantifies the amount of randomness a system appears to have to a computationally limited observer. Similarly, the complexity relative entropy quantifies such an observer\&$\#$39;s ability to distinguish two states. We prove elementary properties of the complexity (relative) entropy and determine the complexity entropy\&$\#$39;s behavior under random circuits. Also, we identify information-theoretic applications of the complexity entropy. The complexity entropy quantifies the resources required for data compression if the compression algorithm must use a restricted number of gates. We further introduce a complexity conditional entropy, which arises naturally in a complexity-constrained variant of information-theoretic decoupling. Assuming that this entropy obeys a conjectured chain rule, we show that the entropy bounds the number of qubits that one can decouple from a reference system, as judged by a computationally bounded referee. Overall, our framework extends the resource-theoretic approach to thermodynamics to integrate a notion of time, as quantified by complexity.

}, url = {https://arxiv.org/abs/2403.04828}, author = {Anthony Munson and Naga Bhavya Teja Kothakonda and Jonas Haferkamp and Nicole Yunger Halpern and Jens Eisert and Philippe Faist} } @article {3363, title = {Verifiable measurement-based quantum random sampling with trapped ions}, year = {2023}, month = {7/26/2023}, abstract = {

Quantum computers are now on the brink of outperforming their classical counterparts. One way to demonstrate the advantage of quantum computation is through quantum random sampling performed on quantum computing devices. However, existing tools for verifying that a quantum device indeed performed the classically intractable sampling task are either impractical or not scalable to the quantum advantage regime. The verification problem thus remains an outstanding challenge. Here, we experimentally demonstrate efficiently verifiable quantum random sampling in the measurement-based model of quantum computation on a trapped-ion quantum processor. We create random cluster states, which are at the heart of measurement-based computing, up to a size of 4 x 4 qubits. Moreover, by exploiting the structure of these states, we are able to recycle qubits during the computation to sample from entangled cluster states that are larger than the qubit register. We then efficiently estimate the fidelity to verify the prepared states--in single instances and on average--and compare our results to cross-entropy benchmarking. Finally, we study the effect of experimental noise on the certificates. Our results and techniques provide a feasible path toward a verified demonstration of a quantum advantage.

}, doi = {https://doi.org/10.48550/arXiv.2307.14424}, url = {https://arxiv.org/abs/2307.14424}, author = {Martin Ringbauer and Marcel Hinsche and Thomas Feldker and Paul K. Faehrmann and Juani Bermejo-Vega and Claire Edmunds and Lukas Postler and Roman Stricker and Christian D. Marciniak and Michael Meth and Ivan Pogorelov and Rainer Blatt and Philipp Schindler and Jens Eisert and Thomas Monz and Dominik Hangleiter} } @article {3066, title = {Computational advantage of quantum random sampling}, year = {2022}, month = {6/8/2022}, abstract = {

Quantum random sampling is the leading proposal for demonstrating a computational advantage of quantum computers over classical computers. Recently, first large-scale implementations of quantum random sampling have arguably surpassed the boundary of what can be simulated on existing classical hardware. In this article, we comprehensively review the theoretical underpinning of quantum random sampling in terms of computational complexity and verifiability, as well as the practical aspects of its experimental implementation using superconducting and photonic devices and its classical simulation. We discuss in detail open questions in the field and provide perspectives for the road ahead, including potential applications of quantum random sampling.

}, url = {https://arxiv.org/abs/2206.04079}, author = {Dominik Hangleiter and Jens Eisert} } @article {3027, title = {Linear growth of quantum circuit complexity}, journal = {Nat. Phys.}, year = {2022}, month = {3/28/2022}, abstract = {

The complexity of quantum states has become a key quantity of interest across various subfields of physics, from quantum computing to the theory of black holes. The evolution of generic quantum systems can be modelled by considering a collection of qubits subjected to sequences of random unitary gates. Here we investigate how the complexity of these random quantum circuits increases by considering how to construct a unitary operation from Haar-random two-qubit quantum gates. Implementing the unitary operation exactly requires a minimal number of gates\—this is the operation\’s exact circuit complexity. We prove a conjecture that this complexity grows linearly, before saturating when the number of applied gates reaches a threshold that grows exponentially with the number of qubits. Our proof overcomes difficulties in establishing lower bounds for the exact circuit complexity by combining differential topology and elementary algebraic geometry with an inductive construction of Clifford circuits.

}, doi = {https://doi.org/10.1038/s41567-022-01539-6}, author = {Jonas Haferkamp and Philippe Faist and Naga B. T. Kothakonda and Jens Eisert and Nicole Yunger Halpern} } @article {2983, title = {Quantum computational advantage via high-dimensional Gaussian boson sampling}, journal = {Science Advances}, volume = {8}, year = {2022}, month = {1/5/2022}, pages = {eabi7894}, abstract = {

A programmable quantum computer based on fiber optics outperforms classical computers with a high level of confidence. Photonics is a promising platform for demonstrating a quantum computational advantage (QCA) by outperforming the most powerful classical supercomputers on a well-defined computational task. Despite this promise, existing proposals and demonstrations face challenges. Experimentally, current implementations of Gaussian boson sampling (GBS) lack programmability or have prohibitive loss rates. Theoretically, there is a comparative lack of rigorous evidence for the classical hardness of GBS. In this work, we make progress in improving both the theoretical evidence and experimental prospects. We provide evidence for the hardness of GBS, comparable to the strongest theoretical proposals for QCA. We also propose a QCA architecture we call high-dimensional GBS, which is programmable and can be implemented with low loss using few optical components. We show that particular algorithms for simulating GBS are outperformed by high-dimensional GBS experiments at modest system sizes. This work thus opens the path to demonstrating QCA with programmable photonic processors.

}, doi = {10.1126/sciadv.abi7894}, url = {https://www.science.org/doi/abs/10.1126/sciadv.abi7894}, author = {Abhinav Deshpande and Arthur Mehta and Trevor Vincent and Nicolas Quesada and Marcel Hinsche and Marios Ioannou and Lars Madsen and Jonathan Lavoie and Haoyu Qi and Jens Eisert and Dominik Hangleiter and Bill Fefferman and Ish Dhand} } @article {3200, title = {Resource theory of quantum uncomplexity}, journal = {Physical Review A}, volume = {106}, year = {2022}, month = {12/19/2022}, abstract = {

Quantum complexity is emerging as a key property of many-body systems, including black holes, topological materials, and early quantum computers. A state\&$\#$39;s complexity quantifies the number of computational gates required to prepare the state from a simple tensor product. The greater a state\&$\#$39;s distance from maximal complexity, or \"uncomplexity,\" the more useful the state is as input to a quantum computation. Separately, resource theories -- simple models for agents subject to constraints -- are burgeoning in quantum information theory. We unite the two domains, confirming Brown and Susskind\&$\#$39;s conjecture that a resource theory of uncomplexity can be defined. The allowed operations, fuzzy operations, are slightly random implementations of two-qubit gates chosen by an agent. We formalize two operational tasks, uncomplexity extraction and expenditure. Their optimal efficiencies depend on an entropy that we engineer to reflect complexity. We also present two monotones, uncomplexity measures that decline monotonically under fuzzy operations, in certain regimes. This work unleashes on many-body complexity the resource-theory toolkit from quantum information theory.

}, doi = {10.1103/physreva.106.062417}, url = {https://arxiv.org/abs/2110.11371}, author = {Nicole Yunger Halpern and Naga B. T. Kothakonda and Jonas Haferkamp and Anthony Munson and Jens Eisert and Philippe Faist} } @article {3065, title = {A single T-gate makes distribution learning hard}, year = {2022}, month = {7/7/2022}, abstract = {

The task of learning a probability distribution from samples is ubiquitous across the natural sciences. The output distributions of local quantum circuits form a particularly interesting class of distributions, of key importance both to quantum advantage proposals and a variety of quantum machine learning algorithms. In this work, we provide an extensive characterization of the learnability of the output distributions of local quantum circuits. Our first result yields insight into the relationship between the efficient learnability and the efficient simulatability of these distributions. Specifically, we prove that the density modelling problem associated with Clifford circuits can be efficiently solved, while for depth d=nΩ(1) circuits the injection of a single T-gate into the circuit renders this problem hard. This result shows that efficient simulatability does not imply efficient learnability. Our second set of results provides insight into the potential and limitations of quantum generative modelling algorithms. We first show that the generative modelling problem associated with depth d=nΩ(1) local quantum circuits is hard for any learning algorithm, classical or quantum. As a consequence, one cannot use a quantum algorithm to gain a practical advantage for this task. We then show that, for a wide variety of the most practically relevant learning algorithms -- including hybrid-quantum classical algorithms -- even the generative modelling problem associated with depth d=ω(log(n)) Clifford circuits is hard. This result places limitations on the applicability of near-term hybrid quantum-classical generative modelling algorithms.

}, url = {https://arxiv.org/abs/2207.03140}, author = {Marcel Hinsche and Marios Ioannou and Alexander Nietner and Jonas Haferkamp and Yihui Quek and Dominik Hangleiter and Jean-Pierre Seifert and Jens Eisert and Ryan Sweke} } @article {2869, title = {Learnability of the output distributions of local quantum circuits}, year = {2021}, month = {10/11/2021}, abstract = {

There is currently a large interest in understanding the potential advantages quantum devices can offer for probabilistic modelling. In this work we investigate, within two different oracle models, the probably approximately correct (PAC) learnability of quantum circuit Born machines, i.e., the output distributions of local quantum circuits. We first show a negative result, namely, that the output distributions of super-logarithmic depth Clifford circuits are not sample-efficiently learnable in the statistical query model, i.e., when given query access to empirical expectation values of bounded functions over the sample space. This immediately implies the hardness, for both quantum and classical algorithms, of learning from statistical queries the output distributions of local quantum circuits using any gate set which includes the Clifford group. As many practical generative modelling algorithms use statistical queries -- including those for training quantum circuit Born machines -- our result is broadly applicable and strongly limits the possibility of a meaningful quantum advantage for learning the output distributions of local quantum circuits. As a positive result, we show that in a more powerful oracle model, namely when directly given access to samples, the output distributions of local Clifford circuits are computationally efficiently PAC learnable by a classical learner. Our results are equally applicable to the problems of learning an algorithm for generating samples from the target distribution (generative modelling) and learning an algorithm for evaluating its probabilities (density modelling). They provide the first rigorous insights into the learnability of output distributions of local quantum circuits from the probabilistic modelling perspective.\ 

}, url = {https://arxiv.org/abs/2110.05517}, author = {Marcel Hinsche and Marios Ioannou and Alexander Nietner and Jonas Haferkamp and Yihui Quek and Dominik Hangleiter and Jean-Pierre Seifert and Jens Eisert and Ryan Sweke} } @article {2827, title = {Precise Hamiltonian identification of a superconducting quantum processor}, year = {2021}, month = {8/18/2021}, abstract = {

The required precision to perform quantum simulations beyond the capabilities of classical computers imposes major experimental and theoretical challenges. Here, we develop a characterization technique to benchmark the implementation precision of a specific quantum simulation task. We infer all parameters of the bosonic Hamiltonian that governs the dynamics of excitations in a two-dimensional grid of nearest-neighbour coupled superconducting qubits. We devise a robust algorithm for identification of Hamiltonian parameters from measured times series of the expectation values of single-mode canonical coordinates. Using super-resolution and denoising methods, we first extract eigenfrequencies of the governing Hamiltonian from the complex time domain measurement; next, we recover the eigenvectors of the Hamiltonian via constrained manifold optimization over the orthogonal group. For five and six coupled qubits, we identify Hamiltonian parameters with sub-MHz precision and construct a spatial implementation error map for a grid of 27 qubits. Our approach enables us to distinguish and quantify the effects of state preparation and measurement errors and show that they are the dominant sources of errors in the implementation. Our results quantify the implementation accuracy of analog dynamics and introduce a diagnostic toolkit for understanding, calibrating, and improving analog quantum processors.

}, url = {https://arxiv.org/abs/2108.08319}, author = {Dominik Hangleiter and Ingo Roth and Jens Eisert and Pedram Roushan} } @article {2773, title = {Quantum Computational Supremacy via High-Dimensional Gaussian Boson Sampling}, year = {2021}, month = {2/24/2021}, abstract = {

Photonics is a promising platform for demonstrating quantum computational supremacy (QCS) by convincingly outperforming the most powerful classical supercomputers on a well-defined computational task. Despite this promise, existing photonics proposals and demonstrations face significant hurdles. Experimentally, current implementations of Gaussian boson sampling lack programmability or have prohibitive loss rates. Theoretically, there is a comparative lack of rigorous evidence for the classical hardness of GBS. In this work, we make significant progress in improving both the theoretical evidence and experimental prospects. On the theory side, we provide strong evidence for the hardness of Gaussian boson sampling, placing it on par with the strongest theoretical proposals for QCS. On the experimental side, we propose a new QCS architecture, high-dimensional Gaussian boson sampling, which is programmable and can be implemented with low loss rates using few optical components. We show that particular classical algorithms for simulating GBS are vastly outperformed by high-dimensional Gaussian boson sampling experiments at modest system sizes. This work thus opens the path to demonstrating QCS with programmable photonic processors.

}, url = {https://arxiv.org/abs/2102.12474}, author = {Abhinav Deshpande and Arthur Mehta and Trevor Vincent and Nicolas Quesada and Marcel Hinsche and Marios Ioannou and Lars Madsen and Jonathan Lavoie and Haoyu Qi and Jens Eisert and Dominik Hangleiter and Bill Fefferman and Ish Dhand} } @article {2921, title = {Resource theory of quantum uncomplexity}, year = {2021}, month = {10/21/2021}, abstract = {

Quantum complexity is emerging as a key property of many-body systems, including black holes, topological materials, and early quantum computers. A state\&$\#$39;s complexity quantifies the number of computational gates required to prepare the state from a simple tensor product. The greater a state\&$\#$39;s distance from maximal complexity, or {\textquoteleft}{\textquoteleft}uncomplexity,\&$\#$39;\&$\#$39; the more useful the state is as input to a quantum computation. Separately, resource theories -- simple models for agents subject to constraints -- are burgeoning in quantum information theory. We unite the two domains, confirming Brown and Susskind\&$\#$39;s conjecture that a resource theory of uncomplexity can be defined. The allowed operations, fuzzy operations, are slightly random implementations of two-qubit gates chosen by an agent. We formalize two operational tasks, uncomplexity extraction and expenditure. Their optimal efficiencies depend on an entropy that we engineer to reflect complexity. We also present two monotones, uncomplexity measures that decline monotonically under fuzzy operations, in certain regimes. This work unleashes on many-body complexity the resource-theory toolkit from quantum information theory.

}, url = {https://arxiv.org/abs/2110.11371}, author = {Nicole Yunger Halpern and Naga B. T. Kothakonda and Jonas Haferkamp and Anthony Munson and Jens Eisert and Philippe Faist} } @article {2138, title = {Recovering quantum gates from few average gate fidelities}, journal = {Phys. Rev. Lett. }, volume = {121}, year = {2018}, month = {2018/03/01}, pages = {170502}, abstract = {

Characterising quantum processes is a key task in and constitutes a challenge for the development of quantum technologies, especially at the noisy intermediate scale of today\&$\#$39;s devices. One method for characterising processes is randomised benchmarking, which is robust against state preparation and measurement (SPAM) errors, and can be used to benchmark Clifford gates. A complementing approach asks for full tomographic knowledge. Compressed sensing techniques achieve full tomography of quantum channels essentially at optimal resource efficiency. So far, guarantees for compressed sensing protocols rely on unstructured random measurements and can not be applied to the data acquired from randomised benchmarking experiments. It has been an open question whether or not the favourable features of both worlds can be combined. In this work, we give a positive answer to this question. For the important case of characterising multi-qubit unitary gates, we provide a rigorously guaranteed and practical reconstruction method that works with an essentially optimal number of average gate fidelities measured respect to random Clifford unitaries. Moreover, for general unital quantum channels we provide an explicit expansion into a unitary 2-design, allowing for a practical and guaranteed reconstruction also in that case. As a side result, we obtain a new statistical interpretation of the unitarity -- a figure of merit that characterises the coherence of a process. In our proofs we exploit recent representation theoretic insights on the Clifford group, develop a version of Collins\&$\#$39; calculus with Weingarten functions for integration over the Clifford group, and combine this with proof techniques from compressed sensing.

}, doi = {https://doi.org/10.1103/PhysRevLett.121.170502}, url = {https://arxiv.org/abs/1803.00572}, author = {Ingo Roth and Richard Kueng and Shelby Kimmel and Yi-Kai Liu and David Gross and Jens Eisert and Martin Kliesch} } @article {1434, title = {Quantum Tomography via Compressed Sensing: Error Bounds, Sample Complexity, and Efficient Estimators }, journal = {New Journal of Physics}, volume = {14}, year = {2012}, month = {2012/09/27}, pages = {095022}, abstract = { Intuitively, if a density operator has small rank, then it should be easier to estimate from experimental data, since in this case only a few eigenvectors need to be learned. We prove two complementary results that confirm this intuition. First, we show that a low-rank density matrix can be estimated using fewer copies of the state, i.e., the sample complexity of tomography decreases with the rank. Second, we show that unknown low-rank states can be reconstructed from an incomplete set of measurements, using techniques from compressed sensing and matrix completion. These techniques use simple Pauli measurements, and their output can be certified without making any assumptions about the unknown state. We give a new theoretical analysis of compressed tomography, based on the restricted isometry property (RIP) for low-rank matrices. Using these tools, we obtain near-optimal error bounds, for the realistic situation where the data contains noise due to finite statistics, and the density matrix is full-rank with decaying eigenvalues. We also obtain upper-bounds on the sample complexity of compressed tomography, and almost-matching lower bounds on the sample complexity of any procedure using adaptive sequences of Pauli measurements. Using numerical simulations, we compare the performance of two compressed sensing estimators with standard maximum-likelihood estimation (MLE). We find that, given comparable experimental resources, the compressed sensing estimators consistently produce higher-fidelity state reconstructions than MLE. In addition, the use of an incomplete set of measurements leads to faster classical processing with no loss of accuracy. Finally, we show how to certify the accuracy of a low rank estimate using direct fidelity estimation and we describe a method for compressed quantum process tomography that works for processes with small Kraus rank. }, doi = {10.1088/1367-2630/14/9/095022}, url = {http://arxiv.org/abs/1205.2300v2}, author = {Steven T. Flammia and David Gross and Yi-Kai Liu and Jens Eisert} } @article {1433, title = {Continuous-variable quantum compressed sensing}, year = {2011}, month = {2011/11/03}, abstract = { We significantly extend recently developed methods to faithfully reconstruct unknown quantum states that are approximately low-rank, using only a few measurement settings. Our new method is general enough to allow for measurements from a continuous family, and is also applicable to continuous-variable states. As a technical result, this work generalizes quantum compressed sensing to the situation where the measured observables are taken from a so-called tight frame (rather than an orthonormal basis) --- hence covering most realistic measurement scenarios. As an application, we discuss the reconstruction of quantum states of light from homodyne detection and other types of measurements, and we present simulations that show the advantage of the proposed compressed sensing technique over present methods. Finally, we introduce a method to construct a certificate which guarantees the success of the reconstruction with no assumption on the state, and we show how slightly more measurements give rise to "universal" state reconstruction that is highly robust to noise. }, url = {http://arxiv.org/abs/1111.0853v3}, author = {Matthias Ohliger and Vincent Nesme and David Gross and Yi-Kai Liu and Jens Eisert} } @article {1432, title = {Quantum state tomography via compressed sensing}, journal = {Physical Review Letters}, volume = {105}, year = {2010}, month = {2010/10/4}, abstract = { We establish methods for quantum state tomography based on compressed sensing. These methods are specialized for quantum states that are fairly pure, and they offer a significant performance improvement on large quantum systems. In particular, they are able to reconstruct an unknown density matrix of dimension d and rank r using O(rd log^2 d) measurement settings, compared to standard methods that require d^2 settings. Our methods have several features that make them amenable to experimental implementation: they require only simple Pauli measurements, use fast convex optimization, are stable against noise, and can be applied to states that are only approximately low-rank. The acquired data can be used to certify that the state is indeed close to pure, so no a priori assumptions are needed. We present both theoretical bounds and numerical simulations. }, doi = {10.1103/PhysRevLett.105.150401}, url = {http://arxiv.org/abs/0909.3304v4}, author = {David Gross and Yi-Kai Liu and Steven T. Flammia and Stephen Becker and Jens Eisert} }