@article {3461, title = {Quantum One-Wayness of the Single-Round Sponge with Invertible Permutations}, year = {2024}, month = {3/7/2024}, abstract = {
Sponge hashing is a novel class of cryptographic hash algorithms which underlies the current international hash function standard SHA-3. In a nutshell, a sponge function takes as input a bit-stream of any length and processes it via a simple iterative procedure: it repeatedly feeds each block of the input into a so-called block function, and then produces a short digest which consists of a subset of the final output bits. While much is known about the post-quantum security of the sponge construction in the case when the block function is modeled as a random function or permutation, the case of invertible permutations, which more accurately models the construction underlying SHA-3, has so far remained a fundamental open problem.
In this work, we make new progress towards overcoming this barrier and show several results. First, we prove the \"double-sided zero-search\" conjecture proposed by Unruh (eprint\&$\#$39; 2021) and show that finding zero-pairs in a random 2n-bit permutation requires at least Ω(2n/2) many queries -- and this is tight due to Grover\&$\#$39;s algorithm. At the core of our proof lies a novel \"symmetrization argument\" which uses insights from the theory of Young subgroups. Second, we consider more general variants of the double-sided search problem and show similar query lower bounds for them. As an application, we prove the quantum one-wayness of the single-round sponge with invertible permutations in the quantum random oracle model.
Quantum information science and technology (QIST) is a critical and emerging technology with the potential for enormous world impact and is currently invested in by over 40 nations. To bring these large-scale investments to fruition and bridge the lower technology readiness levels (TRLs) of fundamental research at universities to the high TRLs necessary to realize the promise of practical quantum advantage accessible to industry and the public, we present a roadmap for Quantum Technology Demonstration Projects (QTDPs). Such QTDPs, focused on intermediate TRLs, are large-scale public-private partnerships with a high probability of translation from laboratory to practice. They create technology demonstrating a clear \&$\#$39;quantum advantage\&$\#$39; for science breakthroughs that are user-motivated and will provide access to a broad and diverse community of scientific users. Successful implementation of a program of QTDPs will have large positive economic impacts.
}, url = {https://arxiv.org/abs/2210.14757}, author = {Paul Alsing and Phil Battle and Joshua C. Bienfang and Tammie Borders and Tina Brower-Thomas and Lincoln D. Carr and Fred Chong and Siamak Dadras and Brian DeMarco and Ivan Deutsch and Eden Figueroa and Danna Freedman and Henry Everitt and Daniel Gauthier and Ezekiel Johnston-Halperin and Jungsang Kim and Mackillo Kira and Prem Kumar and Paul Kwiat and John Lekki and Anjul Loiacono and Marko Lon{\v c}ar and John R. Lowell and Mikhail Lukin and Celia Merzbacher and Aaron Miller and Christopher Monroe and Johannes Pollanen and David Pappas and Michael Raymer and Ronald Reano and Brandon Rodenburg and Martin Savage and Thomas Searles and Jun Ye} } @article {3056, title = {Advantages and limitations of quantum routing}, journal = {PRX Quantum}, volume = {4}, year = {2023}, month = {2/1/2023}, abstract = {The Swap gate is a ubiquitous tool for moving information on quantum hardware, yet it can be considered a classical operation because it does not entangle product states. Genuinely quantum operations could outperform Swap for the task of permuting qubits within an architecture, which we call routing. We consider quantum routing in two models: (1) allowing arbitrary two-qubit unitaries, or (2) allowing Hamiltonians with norm-bounded interactions. We lower bound the circuit depth or time of quantum routing in terms of spectral properties of graphs representing the architecture interaction constraints, and give a generalized upper bound for all simple connected n-vertex graphs. In particular, we give conditions for a superpolynomial classical-quantum routing separation, which exclude graphs with a small spectral gap and graphs of bounded degree. Finally, we provide examples of a quadratic separation between gate-based and Hamiltonian routing models with a constant number of local ancillas per qubit and of an Ω(n) speedup if we also allow fast local interactions.
}, keywords = {Data Structures and Algorithms (cs.DS), FOS: Computer and information sciences, FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {https://doi.org/10.1103/PRXQuantum.4.010313}, url = {https://arxiv.org/abs/2206.01766}, author = {Bapat, Aniruddha and Andrew M. Childs and Alexey V. Gorshkov and Schoute, Eddie} } @article {3246, title = {Analyzing Convergence in Quantum Neural Networks: Deviations from Neural Tangent Kernels}, year = {2023}, month = {3/26/2023}, abstract = {A quantum neural network (QNN) is a parameterized mapping efficiently implementable on near-term Noisy Intermediate-Scale Quantum (NISQ) computers. It can be used for supervised learning when combined with classical gradient-based optimizers. Despite the existing empirical and theoretical investigations, the convergence of QNN training is not fully understood. Inspired by the success of the neural tangent kernels (NTKs) in probing into the dynamics of classical neural networks, a recent line of works proposes to study over-parameterized QNNs by examining a quantum version of tangent kernels. In this work, we study the dynamics of QNNs and show that contrary to popular belief it is qualitatively different from that of any kernel regression: due to the unitarity of quantum operations, there is a non-negligible deviation from the tangent kernel regression derived at the random initialization. As a result of the deviation, we prove the at-most sublinear convergence for QNNs with Pauli measurements, which is beyond the explanatory power of any kernel regression dynamics. We then present the actual dynamics of QNNs in the limit of over-parameterization. The new dynamics capture the change of convergence rate during training and implies that the range of measurements is crucial to the fast QNN convergence.
}, url = {https://arxiv.org/abs/2303.14844}, author = {Xuchen You and Shouvanik Chakrabarti and Boyang Chen and Xiaodi Wu} } @article {3248, title = {Collision-resolved pressure sensing}, year = {2023}, month = {3/17/2023}, abstract = {Heat and pressure are ultimately transmitted via quantized degrees of freedom, like gas particles and phonons. While a continuous Brownian description of these noise sources is adequate to model measurements with relatively long integration times, sufficiently precise measurements can resolve the detailed time dependence coming from individual bath-system interactions. We propose the use of nanomechanical devices operated with impulse readout sensitivity around the {\textquoteleft}{\textquoteleft}standard quantum limit\&$\#$39;\&$\#$39; to sense ultra-low gas pressures by directly counting the individual collisions of gas particles on a sensor. We illustrate this in two paradigmatic model systems: an optically levitated nanobead and a tethered membrane system in a phononic bandgap shield.
}, url = {https://arxiv.org/abs/2303.09922}, author = {Daniel S. Barker and Daniel Carney and Thomas W. LeBrun and David C. Moore and Jacob M. Taylor} } @article {3415, title = {Compressed gate characterization for quantum devices with time-correlated noise}, year = {2023}, month = {12/22/2023}, abstract = {As quantum devices make steady progress towards intermediate scale and fault-tolerant quantum computing, it is essential to develop rigorous and efficient measurement protocols that account for known sources of noise. Most existing quantum characterization protocols such as gate set tomography and randomized benchmarking assume the noise acting on the qubits is Markovian. However, this assumption is often not valid, as for the case of 1/f charge noise or hyperfine nuclear spin noise. Here, we present a general framework for quantum process tomography (QPT) in the presence of time-correlated noise. We further introduce fidelity benchmarks that quantify the relative strength of different sources of Markovian and non-Markovian noise. As an application of our method, we perform a comparative theoretical and experimental analysis of silicon spin qubits. We first develop a detailed noise model that accounts for the dominant sources of noise and validate the model against experimental data. Applying our framework for time-correlated QPT, we find that the number of independent parameters needed to characterize one and two-qubit gates can be compressed by 10x and 100x, respectively, when compared to the fully generic case. These compressions reduce the amount of tomographic measurements needed in experiment, while also significantly speeding up numerical simulations of noisy quantum circuit dynamics compared to time-dependent Hamiltonian simulation. Using this compressed noise model, we find good agreement between our theoretically predicted process fidelities and two qubit interleaved randomized benchmarking fidelities of 99.8\% measured in recent experiments on silicon spin qubits. More broadly, our formalism can be directly extended to develop efficient and scalable tuning protocols for high-fidelity control of large-arrays of quantum devices with non-Markovian noise.
}, url = {https://arxiv.org/abs/2307.14432}, author = {M. J. Gullans and M. Caranti and A. R. Mills and J. R. Petta} } @article {3396, title = {Data Needs and Challenges of Quantum Dot Devices Automation: Workshop Report}, year = {2023}, month = {12/21/2023}, abstract = {Gate-defined quantum dots are a promising candidate system to realize scalable, coupled qubit systems and serve as a fundamental building block for quantum computers. However, present-day quantum dot devices suffer from imperfections that must be accounted for, which hinders the characterization, tuning, and operation process. Moreover, with an increasing number of quantum dot qubits, the relevant parameter space grows sufficiently to make heuristic control infeasible. Thus, it is imperative that reliable and scalable autonomous tuning approaches are developed. In this report, we outline current challenges in automating quantum dot device tuning and operation with a particular focus on datasets, benchmarking, and standardization. We also present ideas put forward by the quantum dot community on how to overcome them.
}, doi = {https://doi.org/10.48550/arXiv.2312.14322}, url = {https://arxiv.org/abs/2312.14322}, author = {Justyna P. Zwolak and Jacob M. Taylor and Reed Andrews and Jared Benson and Garnett Bryant and Donovan Buterakos and Anasua Chatterjee and Sankar Das Sarma and Mark A. Eriksson and Eli{\v s}ka Greplov{\'a} and Michael J. Gullans and Fabian Hader and Tyler J. Kovach and Pranav S. Mundada and Mick Ramsey and Torbjoern Rasmussen and Brandon Severin and Anthony Sigillito and Brennan Undseth and Brian Weber} } @article {3074, title = {Decoherence from Long-Range Forces in Atom Interferometry}, journal = {Phys. Rev. A }, volume = {107}, year = {2023}, month = {3/17/2023}, doi = {https://doi.org/10.1103/PhysRevA.107.033319}, url = {https://arxiv.org/abs/2205.03006}, author = {Jonathan Kunjummen and Daniel Carney and Jacob M. Taylor} } @article {3421, title = {Digital quantum simulation of NMR experiments}, journal = {Science Advances}, volume = {9}, year = {2023}, month = {11/29/2023}, abstract = {Simulations of nuclear magnetic resonance (NMR) experiments can be an important tool for extracting information about molecular structure and optimizing experimental protocols but are often intractable on classical computers for large molecules such as proteins and for protocols such as zero-field NMR. We demonstrate the first quantum simulation of an NMR spectrum, computing the zero-field spectrum of the methyl group of acetonitrile using four qubits of a trapped-ion quantum computer. We reduce the sampling cost of the quantum simulation by an order of magnitude using compressed sensing techniques. We show how the intrinsic decoherence of NMR systems may enable the zero-field simulation of classically hard molecules on relatively near-term quantum hardware and discuss how the experimentally demonstrated quantum algorithm can be used to efficiently simulate scientifically and technologically relevant solid-state NMR experiments on more mature devices. Our work opens a practical application for quantum computation.
}, issn = {2375-2548}, doi = {10.1126/sciadv.adh2594}, url = {https://arxiv.org/abs/2109.13298}, author = {Seetharam, Kushal and Biswas, Debopriyo and Noel, Crystal and Risinger, Andrew and Zhu, Daiwei and Katz, Or and Chattopadhyay, Sambuddha and Cetina, Marko and Monroe, Christopher and Demler, Eugene and Sels, Dries} } @article {3426, title = {The discrete adiabatic quantum linear system solver has lower constant factors than the randomized adiabatic solver}, year = {2023}, month = {12/12/2023}, abstract = {The solution of linear systems of equations is the basis of many other quantum algorithms, and recent results provided an algorithm with optimal scaling in both the condition number κ and the allowable error ϵ [PRX Quantum \textbf{3}, 0403003 (2022)]. That work was based on the discrete adiabatic theorem, and worked out an explicit constant factor for an upper bound on the complexity. Here we show via numerical testing on random matrices that the constant factor is in practice about 1,500 times smaller than the upper bound found numerically in the previous results. That means that this approach is far more efficient than might naively be expected from the upper bound. In particular, it is over an order of magnitude more efficient than using a randomised approach from [arXiv:2305.11352] that claimed to be more efficient.
}, url = {https://arxiv.org/abs/2312.07690}, author = {Pedro C. S. Costa and Dong An and Ryan Babbush and Dominic Berry} } @article {3261, title = {Ever more optimized simulations of fermionic systems on a quantum computer}, year = {2023}, month = {3/6/2023}, abstract = {Despite using a novel model of computation, quantum computers break down programs into elementary gates. Among such gates, entangling gates are the most expensive. In the context of fermionic simulations, we develop a suite of compilation and optimization techniques that massively reduce the entangling-gate counts. We exploit the well-studied non-quantum optimization algorithms to achieve up to 24\% savings over the state of the art for several small-molecule simulations, with no loss of accuracy or hidden costs. Our methodologies straightforwardly generalize to wider classes of near-term simulations of the ground state of a fermionic system or real-time simulations probing dynamical properties of a fermionic system.\
}, url = {https://arxiv.org/abs/2303.03460}, author = {Qingfeng Wang and Ze-Pei Cian and Ming Li and Igor L. Markov and Yunseong Nam} } @article {3126, title = {Fat Pointers for Temporal Memory Safety of C}, journal = {Proceedings of the ACM on Programming Languages}, volume = {7}, year = {2023}, month = {3/20/2023}, pages = {316-347}, abstract = {Temporal memory safety bugs, especially use-after-free and double free bugs, pose a major security threat to C programs. Real-world exploits utilizing these bugs enable attackers to read and write arbitrary memory locations, causing disastrous violations of confidentiality, integrity, and availability. Many previous solutions retrofit temporal memory safety to C, but they all either incur high performance overhead and/or miss detecting certain types of temporal memory safety bugs.
In this paper, we propose a temporal memory safety solution that is both efficient and comprehensive. Specifically, we extend Checked C, a spatially-safe extension to C, with temporally-safe pointers. These are implemented by combining two techniques: fat pointers and dynamic key-lock checks. We show that the fat-pointer solution significantly improves running time and memory overhead compared to the disjoint-metadata approach that provides the same level of protection. With empirical program data and hands-on experience porting real-world applications, we also show that our solution is practical in terms of backward compatibility -- one of the major complaints about fat pointers.
Today\&$\#$39;s mechanical sensors are capable of detecting extremely weak perturbations while operating near the standard quantum limit. However, further improvements can be made in both sensitivity and bandwidth when we reduce the noise originating from the process of measurement itself -- the quantum-mechanical backaction of measurement -- and go below this \&$\#$39;standard\&$\#$39; limit, possibly approaching the Heisenberg limit. One of the ways to eliminate this noise is by measuring a quantum nondemolition variable such as the momentum in a free-particle system. Here, we propose and characterize theoretical models for direct velocity measurement that utilize traditional electric and magnetic transducer designs to generate a signal while enabling this backaction evasion. We consider the general readout of this signal via electric or magnetic field sensing by creating toy models analogous to the standard optomechanical position-sensing problem, thereby facilitating the assessment of measurement-added noise. Using simple models that characterize a wide range of transducers, we find that the choice of readout scheme -- voltage or current -- for each mechanical detector configuration implies access to either the position or velocity of the mechanical sub-system. This in turn suggests a path forward for key fundamental physics experiments such as the direct detection of dark matter particles.
}, url = {https://arxiv.org/abs/2311.09587}, author = {Brittany Richman and Sohitri Ghosh and Daniel Carney and Gerard Higgins and Peter Shawhan and C. J. Lobb and Jacob M. Taylor} } @article {3428, title = {Generalized Hybrid Search and Applications to Blockchain and Hash Function Security}, year = {2023}, month = {11/7/2023}, abstract = {In this work we first examine the hardness of solving various search problems by hybrid quantum-classical strategies, namely, by algorithms that have both quantum and classical capabilities. We then construct a hybrid quantum-classical search algorithm and analyze its success probability. Regarding the former, for search problems that are allowed to have multiple solutions and in which the input is sampled according to arbitrary distributions we establish their hybrid quantum-classical query complexities -- i.e., given a fixed number of classical and quantum queries, determine what is the probability of solving the search task. At a technical level, our results generalize the framework for hybrid quantum-classical search algorithms proposed by Rosmanis. Namely, for an arbitrary distribution D on Boolean functions, the probability an algorithm equipped with τc classical and τq quantum queries succeeds in finding a preimage of 1 for a function sampled from D is at most νD\⋅(2τc\−\−\√+2τq+1)2, where νD captures the average (over D) fraction of preimages of 1. As applications of our hardness results, we first revisit and generalize the security of the Bitcoin protocol called the Bitcoin backbone, to a setting where the adversary has both quantum and classical capabilities, presenting a new hybrid honest majority condition necessary for the protocol to properly operate. Secondly, we examine the generic security of hash functions against hybrid adversaries. Regarding our second contribution, we design a hybrid algorithm which first spends all of its classical queries and in the second stage runs a {\textquoteleft}{\textquoteleft}modified Grover\&$\#$39;\&$\#$39; where the initial state depends on the distribution D. We show how to analyze its success probability for arbitrary target distributions and, importantly, its optimality for the uniform and the Bernoulli distribution cases.
}, url = {arXiv:2311.03723}, author = {Alexandru Cojocaru and Juan Garay and Fang Song} } @article {3356, title = {Hamiltonians whose low-energy states require $\Omega(n)$ T gates}, year = {2023}, month = {10/2/2023}, abstract = {The recent resolution of the NLTS Conjecture [ABN22] establishes a prerequisite to the Quantum PCP (QPCP) Conjecture through a novel use of newly-constructed QLDPC codes [LZ22]. Even with NLTS now solved, there remain many independent and unresolved prerequisites to the QPCP Conjecture, such as the NLSS Conjecture of [GL22]. In this work we focus on a specific and natural prerequisite to both NLSS and the QPCP Conjecture, namely, the existence of local Hamiltonians whose low-energy states all require ω(logn) T gates to prepare. In fact, we prove a stronger result which is not necessarily implied by either conjecture: we construct local Hamiltonians whose low-energy states require Ω(n) T gates. Following a previous work [CCNN23], we further show that our procedure can be applied to the NLTS Hamiltonians of [ABN22] to yield local Hamiltonians whose low-energy states require both Ω(logn)-depth and Ω(n) T gates to prepare. Our results utilize a connection between T-count and stabilizer groups, which was recently applied in the context of learning low T-count states [GIKL23a, GIKL23b, GIKL23c].
}, url = {https://arxiv.org/abs/2310.01347}, author = {Nolan J. Coble and Matthew Coudron and Jon Nelson and Seyed Sajjad Nezhadi} } @article {3352, title = {Ion Trap with In-Vacuum High Numerical Aperture Imaging for a Dual-Species Modular Quantum Computer}, year = {2023}, month = {10/10/2023}, abstract = {Photonic interconnects between quantum systems will play a central role in both scalable quantum computing and quantum networking. Entanglement of remote qubits via photons has been demonstrated in many platforms; however, improving the rate of entanglement generation will be instrumental for integrating photonic links into modular quantum computers. We present an ion trap system that has the highest reported free-space photon collection efficiency for quantum networking. We use a pair of in-vacuum aspheric lenses, each with a numerical aperture of 0.8, to couple 10\% of the 493 nm photons emitted from a 138Ba+ ion into single-mode fibers. We also demonstrate that proximal effects of the lenses on the ion position and motion can be mitigated.
}, url = {https://arxiv.org/abs/2310.07058}, author = {Allison L. Carter and Jameson O{\textquoteright}Reilly and George Toh and Sagnik Saha and Mikhail Shalaev and Isabella Goetting and Christopher Monroe} } @article {3252, title = {Local Hamiltonians with no low-energy stabilizer states}, year = {2023}, month = {2/28/2023}, abstract = {The recently-defined No Low-energy Sampleable States (NLSS) conjecture of Gharibian and Le Gall [GL22] posits the existence of a family of local Hamiltonians where all states of low-enough constant energy do not have succinct representations allowing perfect sampling access. States that can be prepared using only Clifford gates (i.e. stabilizer states) are an example of sampleable states, so the NLSS conjecture implies the existence of local Hamiltonians whose low-energy space contains no stabilizer states. We describe families that exhibit this requisite property via a simple alteration to local Hamiltonians corresponding to CSS codes. Our method can also be applied to the recent NLTS Hamiltonians of Anshu, Breuckmann, and Nirkhe [ABN22], resulting in a family of local Hamiltonians whose low-energy space contains neither stabilizer states nor trivial states. We hope that our techniques will eventually be helpful for constructing Hamiltonians which simultaneously satisfy NLSS and NLTS.
}, url = {https://arxiv.org/abs/2302.14755}, author = {Nolan J. Coble and Matthew Coudron and Jon Nelson and Seyed Sajjad Nezhadi} } @article {3401, title = {Logical quantum processor based on reconfigurable atom arrays}, journal = {Nature}, year = {2023}, month = {12/7/2023}, issn = {1476-4687}, doi = {10.1038/s41586-023-06927-3}, url = {https://arxiv.org/abs/2312.03982}, author = {Bluvstein, Dolev and Evered, Simon J. and Geim, Alexandra A. and Li, Sophie H. and Zhou, Hengyun and Manovitz, Tom and Ebadi, Sepehr and Cain, Madelyn and Kalinowski, Marcin and Hangleiter, Dominik and Ataides, J. Pablo Bonilla and Maskara, Nishad and Cong, Iris and Gao, Xun and Rodriguez, Pedro Sales and Karolyshyn, Thomas and Semeghini, Giulia and Gullans, Michael J. and Greiner, Markus and Vuletic, Vladan and Lukin, Mikhail D.} } @article {3247, title = {The maximum refractive index of an atomic crystal - from quantum optics to quantum chemistry}, year = {2023}, month = {3/20/2023}, abstract = {All known optical materials have an index of refraction of order unity. Despite the tremendous implications that an ultrahigh index could have for optical technologies, little research has been done on why the refractive index of materials is universally small, and whether this observation is fundamental. Here, we investigate the index of an ordered arrangement of atoms, as a function of atomic density. At dilute densities, this problem falls into the realm of quantum optics, where atoms do not interact with one another except via the scattering of light. On the other hand, when the lattice constant becomes comparable to the Bohr radius, the electronic orbitals begin to overlap, giving rise to quantum chemistry. We present a minimal model that allows for a unifying theory of index spanning these two regimes. A key aspect is the treatment of multiple light scattering, which can be highly non-perturbative over a large density range, and which is the reason that conventional theories of the index break down. In the quantum optics regime, we show that ideal light-matter interactions can have a single-mode nature, allowing for a purely real refractive index that grows with density as (N/V)1/3. At the onset of quantum chemistry, we show how two physical mechanisms (excited electron tunneling dynamics and the buildup of electronic density-density correlations) can open up inelastic or spatial multi-mode light scattering processes, which ultimately reduce the index back to order unity while introducing absorption. Around the onset of chemistry, our theory predicts that ultrahigh index (n\∼30), low-loss materials could in principle be allowed by the laws of nature.\
}, url = {https://arxiv.org/abs/2303.10998}, author = {Francesco Andreoli and Bennet Windt and Stefano Grava and Gian Marcello Andolina and Michael J. Gullans and Alexander A. High and Darrick E. Chang} } @article {3395, title = {Microwave signal processing using an analog quantum reservoir computer}, year = {2023}, month = {12/26/2023}, abstract = {Quantum reservoir computing (QRC) has been proposed as a paradigm for performing machine learning with quantum processors where the training is efficient in the number of required runs of the quantum processor and takes place in the classical domain, avoiding the issue of barren plateaus in parameterized-circuit quantum neural networks. It is natural to consider using a quantum processor based on superconducting circuits to classify microwave signals that are analog -- continuous in time. However, while theoretical proposals of analog QRC exist, to date QRC has been implemented using circuit-model quantum systems -- imposing a discretization of the incoming signal in time, with each time point input by executing a gate operation. In this paper we show how a quantum superconducting circuit comprising an oscillator coupled to a qubit can be used as an analog quantum reservoir for a variety of classification tasks, achieving high accuracy on all of them. Our quantum system was operated without artificially discretizing the input data, directly taking in microwave signals. Our work does not attempt to address the question of whether QRCs could provide a quantum computational advantage in classifying pre-recorded classical signals. However, beyond illustrating that sophisticated tasks can be performed with a modest-size quantum system and inexpensive training, our work opens up the possibility of achieving a different kind of advantage than a purely computational advantage: superconducting circuits can act as extremely sensitive detectors of microwave photons; our work demonstrates processing of ultra-low-power microwave signals in our superconducting circuit, and by combining sensitive detection with QRC processing within the same system, one could achieve a quantum sensing-computational advantage, i.e., an advantage in the overall analysis of microwave signals comprising just a few photons.
}, url = {https://arxiv.org/abs/2312.16166}, author = {Alen Senanian and Sridhar Prabhu and Vladimir Kremenetski and Saswata Roy and Yingkang Cao and Jeremy Kline and Tatsuhiro Onodera and Logan G. Wright and Xiaodi Wu and Valla Fatemi and Peter L. McMahon} } @article {3358, title = {Non-equilibrium critical scaling and universality in a quantum simulator}, year = {2023}, month = {9/19/2023}, abstract = {Universality and scaling laws are hallmarks of equilibrium phase transitions and critical phenomena. However, extending these concepts to non-equilibrium systems is an outstanding challenge. Despite recent progress in the study of dynamical phases, the universality classes and scaling laws for non-equilibrium phenomena are far less understood than those in equilibrium. In this work, using a trapped-ion quantum simulator with single-ion resolution, we investigate the non-equilibrium nature of critical fluctuations following a quantum quench to the critical point. We probe the scaling of spin fluctuations after a series of quenches to the critical Hamiltonian of a long-range Ising model. With systems of up to 50 spins, we show that the amplitude and timescale of the post-quench fluctuations scale with system size with distinct universal critical exponents. While a generic quench can lead to thermal critical behaviour, we find that a second quench from one critical state to another (i.e. a double quench) results in critical behaviour that does not have an equilibrium counterpart. Our results demonstrate the ability of quantum simulators to explore universal scaling beyond the equilibrium paradigm.
}, url = {https://arxiv.org/abs/2309.10856}, author = {A. De and P. Cook and K. Collins and W. Morong and D. Paz and P. Titum and G. Pagano and A. V. Gorshkov and M. Maghrebi and C. Monroe} } @article {3413, title = {Observation of a finite-energy phase transition in a one-dimensional quantum simulator}, year = {2023}, month = {10/30/2023}, abstract = {One of the most striking many-body phenomena in nature is the sudden change of macroscopic properties as the temperature or energy reaches a critical value. Such equilibrium transitions have been predicted and observed in two and three spatial dimensions, but have long been thought not to exist in one-dimensional (1D) systems. Fifty years ago, Dyson and Thouless pointed out that a phase transition in 1D can occur in the presence of long-range interactions, but an experimental realization has so far not been achieved due to the requirement to both prepare equilibrium states and realize sufficiently long-range interactions. Here we report on the first experimental demonstration of a finite-energy phase transition in 1D. We use the simple observation that finite-energy states can be prepared by time-evolving product initial states and letting them thermalize under the dynamics of a many-body Hamiltonian. By preparing initial states with different energies in a 1D trapped-ion quantum simulator, we study the finite-energy phase diagram of a long-range interacting quantum system. We observe a ferromagnetic equilibrium phase transition as well as a crossover from a low-energy polarized paramagnet to a high-energy unpolarized paramagnet in a system of up to 23 spins, in excellent agreement with numerical simulations. Our work demonstrates the ability of quantum simulators to realize and study previously inaccessible phases at finite energy density.
}, url = {https://arxiv.org/abs/2310.19869}, author = {Alexander Schuckert and Or Katz and Lei Feng and Eleanor Crane and Arinjoy De and Mohammad Hafezi and Alexey V. Gorshkov and Christopher Monroe} } @article {3314, title = {Qafny: Quantum Program Verification Through Type-guided Classical Separation Logic}, year = {2023}, month = {7/12/2023}, abstract = {Formal verification has been proven instrumental to ensure that quantum programs implement their specifications but often requires a significant investment of time and labor. To address this challenge, we present Qafny, an automated proof system designed for verifying quantum programs. At its core, Qafny uses a type-guided quantum proof system that translates quantum operations to classical array operations. By modeling these operations as proof rules within a classical separation logic framework, Qafny provides automated support for the reasoning process that would otherwise be tedious and time-consuming. We prove the soundness and completeness of our proof system and implement a prototype compiler that transforms Qafny programs both into the Dafny programming language and into executable quantum circuits. Using Qafny, we demonstrate how to efficiently verify prominent quantum algorithms, including quantum-walk algorithms, Grover\&$\#$39;s search algorithm, and Shor\&$\#$39;s factoring algorithm, with significantly reduced human efforts.
}, url = {https://arxiv.org/abs/2211.06411}, author = {Liyi Li and Mingwei Zhu and Rance Cleaveland and Yi Lee and Le Chang and Xiaodi Wu} } @article {2448, title = {Quantum algorithm for estimating volumes of convex bodies}, journal = {ACM Transactions on Quantum Computing}, volume = {4}, year = {2023}, month = {4/2023}, chapter = {20}, abstract = {Estimating the volume of a convex body is a central problem in convex geometry and can be viewed as a continuous version of counting. We present a quantum algorithm that estimates the volume of an n-dimensional convex body within multiplicative error ε using O~(n3.5+n2.5/ε) queries to a membership oracle and O~(n5.5+n4.5/ε) additional arithmetic operations. For comparison, the best known classical algorithm uses O~(n4+n3/ε2) queries and O~(n6+n5/ε2) additional arithmetic operations. To the best of our knowledge, this is the first quantum speedup for volume estimation. Our algorithm is based on a refined framework for speeding up simulated annealing algorithms that might be of independent interest. This framework applies in the setting of \"Chebyshev cooling\", where the solution is expressed as a telescoping product of ratios, each having bounded variance. We develop several novel techniques when implementing our framework, including a theory of continuous-space quantum walks with rigorous bounds on discretization error.
}, url = {https://arxiv.org/abs/1908.03903}, author = {Shouvanik Chakrabarti and Andrew M. Childs and Shih-Han Hung and Tongyang Li and Chunhao Wang and Xiaodi Wu} } @article {3402, title = {Quantum algorithm for linear non-unitary dynamics with near-optimal dependence on all parameters}, year = {2023}, month = {12/6/2023}, abstract = {We introduce a family of identities that express general linear non-unitary evolution operators as a linear combination of unitary evolution operators, each solving a Hamiltonian simulation problem. This formulation can exponentially enhance the accuracy of the recently introduced linear combination of Hamiltonian simulation (LCHS) method [An, Liu, and Lin, Physical Review Letters, 2023]. For the first time, this approach enables quantum algorithms to solve linear differential equations with both optimal state preparation cost and near-optimal scaling in matrix queries on all parameters.
}, url = {https://arxiv.org/abs/2312.03916}, author = {Dong An and Andrew M. Childs and Lin Lin} } @article {3269, title = {Quantum Algorithms and the Power of Forgetting}, journal = {14th Innovations in Theoretical Computer Science Conference (ITCS 2023)}, volume = {251}, year = {2023}, pages = {37:1--37:22}, abstract = {The so-called welded tree problem provides an example of a black-box problem that can be solved exponentially faster by a quantum walk than by any classical algorithm [Andrew M. Childs et al., 2003]. Given the name of a special entrance vertex, a quantum walk can find another distinguished exit vertex using polynomially many queries, though without finding any particular path from entrance to exit. It has been an open problem for twenty years whether there is an efficient quantum algorithm for finding such a path, or if the path-finding problem is hard even for quantum computers. We show that a natural class of efficient quantum algorithms provably cannot find a path from entrance to exit. Specifically, we consider algorithms that, within each branch of their superposition, always store a set of vertex labels that form a connected subgraph including the entrance, and that only provide these vertex labels as inputs to the oracle. While this does not rule out the possibility of a quantum algorithm that efficiently finds a path, it is unclear how an algorithm could benefit by deviating from this behavior. Our no-go result suggests that, for some problems, quantum algorithms must necessarily forget the path they take to reach a solution in order to outperform classical computation.
}, isbn = {978-3-95977-263-1}, doi = {10.4230/LIPIcs.ITCS.2023.37}, url = {https://drops.dagstuhl.de/opus/volltexte/2023/17540}, author = {Andrew M. Childs and Coudron, Matthew and Gilani, Amin Shiraz}, editor = {Tauman Kalai, Yael} } @article {3400, title = {Quantum Algorithms for Simulating Nuclear Effective Field Theories}, year = {2023}, month = {12/8/2023}, abstract = {Quantum computers offer the potential to simulate nuclear processes that are classically intractable. With the goal of understanding the necessary quantum resources, we employ state-of-the-art Hamiltonian-simulation methods, and conduct a thorough algorithmic analysis, to estimate the qubit and gate costs to simulate low-energy effective field theories (EFTs) of nuclear physics. In particular, within the framework of nuclear lattice EFT, we obtain simulation costs for the leading-order pionless and pionful EFTs. We consider both static pions represented by a one-pion-exchange potential between the nucleons, and dynamical pions represented by relativistic bosonic fields coupled to non-relativistic nucleons. We examine the resource costs for the tasks of time evolution and energy estimation for physically relevant scales. We account for model errors associated with truncating either long-range interactions in the one-pion-exchange EFT or the pionic Hilbert space in the dynamical-pion EFT, and for algorithmic errors associated with product-formula approximations and quantum phase estimation. Our results show that the pionless EFT is the least costly to simulate and the dynamical-pion theory is the costliest. We demonstrate how symmetries of the low-energy nuclear Hamiltonians can be utilized to obtain tighter error bounds on the simulation algorithm. By retaining the locality of nucleonic interactions when mapped to qubits, we achieve reduced circuit depth and substantial parallelization. We further develop new methods to bound the algorithmic error for classes of fermionic Hamiltonians that preserve the number of fermions, and demonstrate that reasonably tight Trotter error bounds can be achieved by explicitly computing nested commutators of Hamiltonian terms. This work highlights the importance of combining physics insights and algorithmic advancement in reducing quantum-simulation costs.
}, url = {https://arxiv.org/abs/2312.05344}, author = {James D. Watson and Jacob Bringewatt and Alexander F. Shaw and Andrew M. Childs and Alexey V. Gorshkov and Zohreh Davoudi} } @article {3368, title = {Quantum computation of dynamical quantum phase transitions and entanglement tomography in a lattice gauge theory}, year = {2023}, month = {10/6/2023}, abstract = {Strongly-coupled gauge theories far from equilibrium may exhibit unique features that could illuminate the physics of the early universe and of hadron and ion colliders. Studying real-time phenomena has proven challenging with classical-simulation methods, but is a natural application of quantum simulation. To demonstrate this prospect, we quantum compute non-equal time correlation functions and perform entanglement tomography of non-equilibrium states of a simple lattice gauge theory, the Schwinger model, using a trapped-ion quantum computer by IonQ Inc. As an ideal target for near-term devices, a recently-predicted [Zache et al., Phys. Rev. Lett. 122, 050403 (2019)] dynamical quantum phase transition in this model is studied by preparing, quenching, and tracking the subsequent non-equilibrium dynamics in three ways: i) overlap echos signaling dynamical transitions, ii) non-equal time correlation functions with an underlying topological nature, and iii) the entanglement structure of non-equilibrium states, including entanglement Hamiltonians. These results constitute the first observation of a dynamical quantum phase transition in a lattice gauge theory on a quantum computer, and are a first step toward investigating topological phenomena in nuclear and high-energy physics using quantum technologies.
}, url = {https://arxiv.org/abs/2210.03089}, author = {Niklas Mueller and Joseph A. Carolan and Andrew Connelly and Zohreh Davoudi and Eugene F. Dumitrescu and K{\"u}bra Yeter-Aydeniz} } @article {3362, title = {Quantum Lego Expansion Pack: Enumerators from Tensor Networks}, year = {2023}, month = {8/9/2023}, abstract = {We provide the first tensor network method for computing quantum weight enumerator polynomials in the most general form. As a corollary, if a quantum code has a known tensor network construction of its encoding map, our method produces an algorithm that computes its distance. For non-(Pauli)-stabilizer codes, this constitutes the current best algorithm for computing the code distance. For degenerate stabilizer codes, it can provide up to an exponential speed up compared to the current methods. We also introduce a few novel applications of different weight enumerators. In particular, for any code built from the quantum lego method, we use enumerators to construct its (optimal) decoders under any i.i.d. single qubit or qudit error channels and discuss their applications for computing logical error rates. As a proof of principle, we perform exact analyses of the deformed surface codes, the holographic pentagon code, and the 2d Bacon-Shor code under (biased) Pauli noise and limited instances of coherent error at sizes that are inaccessible by brute force.
}, url = {https://arxiv.org/abs/2308.05152}, author = {ChunJun Cao and Michael J. Gullans and Brad Lackey and Zitao Wang} } @article {3355, title = {Quantum Sensing with Erasure Qubits}, year = {2023}, month = {10/2/2023}, abstract = {The dominant noise in an \"erasure qubit\" is an erasure -- a type of error whose occurrence and location can be detected. Erasure qubits have potential to reduce the overhead associated with fault tolerance. To date, research on erasure qubits has primarily focused on quantum computing and quantum networking applications. Here, we consider the applicability of erasure qubits to quantum sensing and metrology. We show theoretically that, for the same level of noise, an erasure qubit acts as a more precise sensor or clock compared to its non-erasure counterpart. We experimentally demonstrate this by artificially injecting either erasure errors (in the form of atom loss) or dephasing errors into a differential optical lattice clock comparison, and observe enhanced precision in the case of erasure errors for the same injected error rate. Similar benefits of erasure qubits to sensing can be realized in other quantum platforms like Rydberg atoms and superconducting qubits
}, url = {https://arxiv.org/abs/2310.01512}, author = {Pradeep Niroula and Jack Dolde and Xin Zheng and Jacob Bringewatt and Adam Ehrenberg and Kevin C. Cox and Jeff Thompson and Michael J. Gullans and Shimon Kolkowitz and Alexey V. Gorshkov} } @article {3397, title = {Quantum-centric Supercomputing for Materials Science: A Perspective on Challenges and Future Directions}, year = {2023}, month = {12/14/2023}, abstract = {Computational models are an essential tool for the design, characterization, and discovery of novel materials. Hard computational tasks in materials science stretch the limits of existing high-performance supercomputing centers, consuming much of their simulation, analysis, and data resources. Quantum computing, on the other hand, is an emerging technology with the potential to accelerate many of the computational tasks needed for materials science. In order to do that, the quantum technology must interact with conventional high-performance computing in several ways: approximate results validation, identification of hard problems, and synergies in quantum-centric supercomputing. In this paper, we provide a perspective on how quantum-centric supercomputing can help address critical computational problems in materials science, the challenges to face in order to solve representative use cases, and new suggested directions.
}, url = {https://arxiv.org/abs/2312.09733}, author = {Yuri Alexeev and Maximilian Amsler and Paul Baity and Marco Antonio Barroca and Sanzio Bassini and Torey Battelle and Daan Camps and David Casanova and Young jai Choi and Frederic T. Chong and Charles Chung and Chris Codella and Antonio D. Corcoles and James Cruise and Alberto Di Meglio and Jonathan Dubois and Ivan Duran and Thomas Eckl and Sophia Economou and Stephan Eidenbenz and Bruce Elmegreen and Clyde Fare and Ismael Faro and Cristina Sanz Fern{\'a}ndez and Rodrigo Neumann Barros Ferreira and Keisuke Fuji and Bryce Fuller and Laura Gagliardi and Giulia Galli and Jennifer R. Glick and Isacco Gobbi and Pranav Gokhale and Salvador de la Puente Gonzalez and Johannes Greiner and Bill Gropp and Michele Grossi and Emmanuel Gull and Burns Healy and Benchen Huang and Travis S. Humble and Nobuyasu Ito and Artur F. Izmaylov and Ali Javadi-Abhari and Douglas Jennewein and Shantenu Jha and Liang Jiang and Barbara Jones and Wibe Albert de Jong and Petar Jurcevic and William Kirby and Stefan Kister and Masahiro Kitagawa and Joel Klassen and Katherine Klymko and Kwangwon Koh and Masaaki Kondo and Doga Murat Kurkcuoglu and Krzysztof Kurowski and Teodoro Laino and Ryan Landfield and Matt Leininger and Vicente Leyton-Ortega and Ang Li and Meifeng Lin and Junyu Liu and Nicolas Lorente and Andre Luckow and Simon Martiel and Francisco Martin-Fernandez and Margaret Martonosi and Claire Marvinney and Arcesio Castaneda Medina and Dirk Merten and Antonio Mezzacapo and Kristel Michielsen and Abhishek Mitra and Tushar Mittal and Kyungsun Moon and Joel Moore and Mario Motta and Young-Hye Na and Yunseong Nam and Prineha Narang and Yu-ya Ohnishi and Daniele Ottaviani and Matthew Otten and Scott Pakin and Vincent R. Pascuzzi and Ed Penault and Tomasz Piontek and Jed Pitera and Patrick Rall and Gokul Subramanian Ravi and Niall Robertson and Matteo Rossi and Piotr Rydlichowski and Hoon Ryu and Georgy Samsonidze and Mitsuhisa Sato and Nishant Saurabh and Vidushi Sharma and Kunal Sharma and Soyoung Shin and George Slessman and Mathias Steiner and Iskandar Sitdikov and In-Saeng Suh and Eric Switzer and Wei Tang and Joel Thompson and Synge Todo and Minh Tran and Dimitar Trenev and Christian Trott and Huan-Hsin Tseng and Esin Tureci and David Garc{\'\i}a Valinas and Sofia Vallecorsa and Christopher Wever and Konrad Wojciechowski and Xiaodi Wu and Shinjae Yoo and Nobuyuki Yoshioka and Victor Wen-zhe Yu and Seiji Yunoki and Sergiy Zhuk and Dmitry Zubarev} } @article {2859, title = {Shadow process tomography of quantum channels}, journal = {Phys. Rev. A}, volume = {107}, year = {2023}, month = {4/4/2023}, abstract = {Quantum process tomography is a critical capability for building quantum computers, enabling quantum networks, and understanding quantum sensors. Like quantum state tomography, the process tomography of an arbitrary quantum channel requires a number of measurements that scale exponentially in the number of quantum bits affected. However, the recent field of shadow tomography, applied to quantum states, has demonstrated the ability to extract key information about a state with only polynomially many measurements. In this work, we apply the concepts of shadow state tomography to the challenge of characterizing quantum processes. We make use of the Choi isomorphism to directly apply rigorous bounds from shadow state tomography to shadow process tomography, and we find additional bounds on the number of measurements that are unique to process tomography. Our results, which include algorithms for implementing shadow process tomography enable new techniques including evaluation of channel concatenation and the application of channels to shadows of quantum states. This provides a dramatic improvement for understanding large-scale quantum systems.
}, doi = {https://doi.org/10.1103/PhysRevA.107.042403}, url = {https://arxiv.org/abs/2110.03629}, author = {Jonathan Kunjummen and Minh C. Tran and Daniel Carney and Jacob M. Taylor} } @article {3357, title = {Streaming quantum state purification}, year = {2023}, month = {9/28/2023}, abstract = {Quantum state purification is the task of recovering a nearly pure copy of an unknown pure quantum state using multiple noisy copies of the state. This basic task has applications to quantum communication over noisy channels and quantum computation with imperfect devices, but has only been studied previously for the case of qubits. We derive an efficient purification procedure based on the swap test for qudits of any dimension, starting with any initial error parameter. Treating the initial error parameter and the dimension as constants, we show that our procedure has sample complexity asymptotically optimal in the final error parameter. Our protocol has a simple recursive structure that can be applied when the states are provided one at a time in a streaming fashion, requiring only a small quantum memory to implement.
}, url = {https://arxiv.org/abs/2309.16387}, author = {Andrew M. Childs and Honghao Fu and Debbie Leung and Zhi Li and Maris Ozols and Vedang Vyas} } @article {3266, title = {Strongly incoherent gravity}, year = {2023}, month = {1/20/2023}, abstract = {While most fundamental interactions in nature are known to be mediated by quantized fields, the possibility has been raised that gravity may behave differently. Making this concept precise enough to test requires consistent models. Here we construct an explicit example of a theory where a non-entangling version of an arbitrary two-body potential V(r) arises from local measurements and feedback forces. While a variety of such theories exist, our construction causes particularly strong decoherence compared to more subtle approaches. Regardless, expectation values of observables obey the usual classical dynamics, while the interaction generates no entanglement. Applied to the Newtonian potential, this produces a non-relativistic model of gravity with fundamental loss of unitarity. The model contains a pair of free parameters, a substantial range of which is not excluded by observations to date. As an alternative to testing entanglement properties, we show that the entire remaining parameter space can be tested by looking for loss of quantum coherence in small systems like atom interferometers coupled to oscillating source masses.
}, url = {https://arxiv.org/abs/2301.08378}, author = {Daniel Carney and Jacob M. Taylor} } @article {3288, title = {Thermally driven quantum refrigerator autonomously resets superconducting qubit}, year = {2023}, month = {5/26/2023}, abstract = {The first thermal machines steered the industrial revolution, but their quantum analogs have yet to prove useful. Here, we demonstrate a useful quantum absorption refrigerator formed from superconducting circuits. We use it to reset a transmon qubit to a temperature lower than that achievable with any one available bath. The process is driven by a thermal gradient and is autonomous -- requires no external control. The refrigerator exploits an engineered three-body interaction between the target qubit and two auxiliary qudits coupled to thermal environments. The environments consist of microwave waveguides populated with synthesized thermal photons. The target qubit, if initially fully excited, reaches a steady-state excited-level population of 5\×10\−4\±5\×10\−4 (an effective temperature of 23.5~mK) in about 1.6~μs. Our results epitomize how quantum thermal machines can be leveraged for quantum information-processing tasks. They also initiate a path toward experimental studies of quantum thermodynamics with superconducting circuits coupled to propagating thermal microwave fields.
}, url = {https://arxiv.org/abs/2305.16710}, author = {Mohammed Ali Aamir and Paul Jamet Suria and Jos{\'e} Antonio Mar{\'\i}n Guzm{\'a}n and Claudia Castillo-Moreno and Jeffrey M. Epstein and Nicole Yunger Halpern and Simone Gasparinetti} } @article {3406, title = {{\AE} codes}, year = {2023}, month = {11/21/2023}, abstract = {Diatomic molecular codes [{arXiv:1911.00099}] are designed to encode quantum information in the orientation of a diatomic molecule, allowing error correction from small torques and changes in angular momentum. Here, we directly study noise native to atomic and molecular platforms -- spontaneous emission, stray electromagnetic fields, and Raman scattering -- and derive simple necessary and sufficient conditions for codes to protect against such noise. We identify existing and develop new absorption-emission ({\AE}) codes that are more practical than molecular codes, require lower average momentum, can directly protect against photonic processes up to arbitrary order, and are applicable to a broader set of atomic and molecular systems.
}, url = {https://arxiv.org/abs/2311.12324}, author = {Shubham P. Jain and Eric R. Hudson and Wesley C. Campbell and Victor V. Albert} } @article {3078, title = {Accurate and Efficient Quantum Computations of Molecular Properties Using Daubechies Wavelet Molecular Orbitals: A Benchmark Study against Experimental Data}, journal = {PRX Quantum}, volume = {3}, year = {2022}, month = {5/28/2022}, pages = {020360}, abstract = {Although quantum computation (QC) is regarded as a promising numerical method for computational quantum chemistry, current applications of quantum-chemistry calculations on quantum computers are limited to small molecules. This limitation can be ascribed to technical problems in building and manipulating more qubits and the associated complicated operations of quantum gates in a quantum circuit when the size of the molecular system becomes large. As a result, reducing the number of required qubits is necessary to make QC practical. Currently, the minimal STO-3G basis set is commonly used in benchmark studies because it requires the minimum number of spin orbitals. Nonetheless, the accuracy of using STO-3G is generally low and thus cannot provide useful predictions. We propose to adopt Daubechies wavelet functions as an accurate and efficient method for QCs of molecular electronic properties. We demonstrate that a minimal basis set constructed from Daubechies wavelet basis can yield accurate results through a better description of the molecular Hamiltonian, while keeping the number of spin orbitals minimal. With the improved Hamiltonian through Daubechies wavelets, we calculate vibrational frequencies for H2 and LiH using quantum-computing algorithm to show that the results are in excellent agreement with experimental data. As a result, we achieve quantum calculations in which accuracy is comparable with that of the full configuration interaction calculation using the cc-pVDZ basis set, whereas the computational cost is the same as that of a STO-3G calculation. Thus, our work provides a more efficient and accurate representation of the molecular Hamiltonian for efficient QCs of molecular systems, and for the first time demonstrates that predictions in agreement with experimental measurements are possible to be achieved with quantum resources available in near-term quantum computers.
}, doi = {https://doi.org/10.1103/PRXQuantum.3.020360}, url = {https://arxiv.org/abs/2205.14476}, author = {Cheng-Lin Hong and Ting Tsai and Jyh-Pin Chou and Peng-Jen Chen and Pei-Kai Tsai and Yu-Cheng Chen and En-Jui Kuo and David Srolovitz and Alice Hu and Yuan-Chung Cheng and Hsi-Sheng Goan} } @book {2982, title = {Analogue Quantum Simulation: A New Instrument for Scientific Understanding}, year = {2022}, pages = {83-102}, publisher = {Springer Nature}, organization = {Springer Nature}, abstract = {Analyzes analogue quantum simulation philosophically.\ Provides a framework to support the goals of scientists.\ Useful to both working scientists and philosophers of science.
}, doi = {https://doi.org/10.1007/978-3-030-87216-8_6}, author = {Dominik Hangleiter and Jacques Carolan and Karim P. Y. Th{\'e}bault} } @article {3001, title = {Approximating Output Probabilities of Shallow Quantum Circuits which are Geometrically-local in any Fixed Dimension}, journal = {Leibniz International Proceedings in Informatics (LIPIcs)}, volume = {232}, year = {2022}, month = {4/7/2022}, pages = {9:1--9:17}, type = {17th Conference on the Theory of Quantum Computation, Communication and Cryptography (TQC 2022)}, abstract = {We present a classical algorithm that, for any D-dimensional geometrically-local, quantum circuit C of polylogarithmic-depth, and any bit string x\∈0,1n, can compute the quantity |\<x|C|0\⊗n\>|2 to within any inverse-polynomial additive error in quasi-polynomial time, for any fixed dimension D. This is an extension of the result [CC21], which originally proved this result for D=3. To see why this is interesting, note that, while the D=1 case of this result follows from standard use of Matrix Product States, known for decades, the D=2 case required novel and interesting techniques introduced in [BGM19]. Extending to the case D=3 was even more laborious and required further new techniques introduced in [CC21]. Our work here shows that, while handling each new dimension has historically required a new insight, and fixed algorithmic primitive, based on known techniques for D\≤3, we can now handle any fixed dimension D\>3.
Our algorithm uses the Divide-and-Conquer framework of [CC21] to approximate the desired quantity via several instantiations of the same problem type, each involving D-dimensional circuits on about half the number of qubits as the original. This division step is then applied recursively, until the width of the recursively decomposed circuits in the Dth dimension is so small that they can effectively be regarded as (D\−1)-dimensional problems by absorbing the small width in the Dth dimension into the qudit structure at the cost of a moderate increase in runtime. The main technical challenge lies in ensuring that the more involved portions of the recursive circuit decomposition and error analysis from [CC21] still hold in higher dimensions, which requires small modifications to the analysis in some places.
The practical benefits of hybrid quantum information processing hardware that contains continuous-variable objects (bosonic modes such as mechanical or electromagnetic oscillators) in addition to traditional (discrete-variable) qubits have recently been demonstrated by experiments with bosonic codes that reach the break-even point for quantum error correction and by efficient Gaussian boson sampling simulation of the Franck-Condon spectra of triatomic molecules that is well beyond the capabilities of current qubit-only hardware. The goal of this Co-design Center for Quantum Advantage (C2QA) project is to develop an instruction set architecture (ISA) for hybrid qubit/bosonic mode systems that contains an inventory of the fundamental operations and measurements that are possible in such hardware. The corresponding abstract machine model (AMM) would also contain a description of the appropriate error models associated with the gates, measurements and time evolution of the hardware. This information has been implemented as an extension of Qiskit. Qiskit is an opensource software development toolkit (SDK) for simulating the quantum state of a quantum circuit on a system with Python 3.7+ and for running the same circuits on prototype hardware within the IBM Quantum Lab. We introduce the Bosonic Qiskit software to enable the simulation of hybrid qubit/bosonic systems using the existing Qiskit software development kit. This implementation can be used for simulating new hybrid systems, verifying proposed physical systems, and modeling systems larger than can currently be constructed. We also cover tutorials and example use cases included within the software to study Jaynes- Cummings models, bosonic Hubbard models, plotting Wigner functions and animations, and calculating maximum likelihood estimations using Wigner functions.
}, keywords = {Emerging Technologies (cs.ET), FOS: Computer and information sciences, FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2209.11153}, url = {https://arxiv.org/abs/2209.11153}, author = {Stavenger, Timothy J and Crane, Eleanor and Smith, Kevin and Kang, Christopher T and Girvin, Steven M and Wiebe, Nathan} } @article {3152, title = {Bounding the Minimum Time of a Quantum Measurement}, year = {2022}, month = {9/13/2022}, abstract = {Measurements take a singular role in quantum theory. While they are often idealized as an instantaneous process, this is in conflict with all other physical processes in nature. In this Letter, we adopt a standpoint where the interaction with an environment is a crucial ingredient for the occurrence of a measurement. Within this framework, we derive lower bounds on the time needed for a measurement to occur. Our bound scales proportionally to the change in entropy of the measured system, and inversely proportional to the number of possible measurement outcomes and the interaction strength driving the measurement. We evaluate our bound in two examples where the environment is modelled by bosonic modes and the measurement apparatus is modelled by spins or bosons.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2209.06248}, url = {https://arxiv.org/abs/2209.06248}, author = {Shettell, Nathan and Centrone, Federico and Garc{\'\i}a-Pintos, Luis Pedro} } @article {2930, title = {Classification of (2+1)D invertible fermionic topological phases with symmetry}, journal = {Phys. Rev. B}, volume = {105}, year = {2022}, month = {5/30/2022}, abstract = {We provide a classification of invertible topological phases of interacting fermions with symmetry in two spatial dimensions for general fermionic symmetry groups Gf and general values of the chiral central charge c\−. Here Gf is a central extension of a bosonic symmetry group Gb by fermion parity, (\−1)F, specified by a second cohomology class [ω2]\∈H2(Gb,Z2). Our approach proceeds by gauging fermion parity and classifying the resulting Gb symmetry-enriched topological orders while keeping track of certain additional data and constraints. We perform this analysis through two perspectives, using G-crossed braided tensor categories and Spin(2c\−)1 Chern-Simons theory coupled to a background G gauge field. These results give a way to characterize and classify invertible fermionic topological phases in terms of a concrete set of data and consistency equations, which is more physically transparent and computationally simpler than the more abstract methods using cobordism theory and spectral sequences. Our results also generalize and provide a different approach to the recent classification of fermionic symmetry-protected topological phases by Wang and Gu, which have chiral central charge c\−=0. We show how the 10-fold way classification of topological insulators and superconductors fits into our scheme, along with general non-perturbative constraints due to certain choices of c\− and Gf. Mathematically, our results also suggest an explicit general parameterization of deformation classes of (2+1)D invertible topological quantum field theories with Gf symmetry.\
}, doi = {https://doi.org/10.1103/PhysRevB.105.235143}, url = {https://arxiv.org/abs/2109.11039}, author = {Maissam Barkeshli and Yu-An Chen and Po-Shen Hsin and Naren Manjunath} } @article {3082, title = {Closing the Locality and Detection Loopholes in Multiparticle Entanglement Self-Testing}, journal = {Physical Review Letters}, volume = {128}, year = {2022}, month = {06/23/2022}, pages = {250401}, abstract = {First proposed by Mayers and Yao, self-testing provides a certification method to infer the underlying physics of quantum experiments in a black-box scenario. Numerous demonstrations have been reported to self-test various types of entangled states. However, all the multiparticle self-testing experiments reported so far suffer from both detection and locality loopholes. Here, we report the first experimental realization of multiparticle entanglement self-testing closing the locality loophole in a photonic system, and the detection loophole in a superconducting system, respectively. We certify three-party and four-party GHZ states with at least 0.84 (1) and 0.86 (3) fidelities in a device-independent way. These results can be viewed as a meaningful advance in multiparticle loophole-free self-testing, and also significant progress on the foundations of quantum entanglement certification.
}, doi = {https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.128.250401}, url = {https://www.researchgate.net/profile/Dian-Wu/publication/361497881_Closing_the_Locality_and_Detection_Loopholes_in_Multiparticle_Entanglement_Self-Testing/links/62b55a8c1010dc02cc57530c/Closing-the-Locality-and-Detection-Loopholes-in-Multiparticle-Entangl}, author = {Dian Wu and Qi Zhao and Can Wang and Liang Huang and Yang-Fan Jiang and Bing Bai and You Zhou and Xue-Mei Gu and Feng-Ming Liu and Ying-Qiu Mao and Qi-Chao Sun and Ming-Cheng Chen and Jun Zhang and Cheng-Zhi Peng and Xiao-Bo Zhu and Qiang Zhang and Chao-Yang Lu and Jian-Wei Pan} } @article {3144, title = {Codimension-2 defects and higher symmetries in (3+1)D topological phases}, year = {2022}, month = {8/15/2022}, abstract = {(3+1)D topological phases of matter can host a broad class of non-trivial topological defects of codimension-1, 2, and 3, of which the well-known point charges and flux loops are special cases. The complete algebraic structure of these defects defines a higher category, and can be viewed as an emergent higher symmetry. This plays a crucial role both in the classification of phases of matter and the possible fault-tolerant logical operations in topological quantum error correcting codes. In this paper, we study several examples of such higher codimension defects from distinct perspectives. We mainly study a class of invertible codimension-2 topological defects, which we refer to as twist strings. We provide a number of general constructions for twist strings, in terms of gauging lower dimensional invertible phases, layer constructions, and condensation defects. We study some special examples in the context of Z2 gauge theory with fermionic charges, in Z2\×Z2 gauge theory with bosonic charges, and also in non-Abelian discrete gauge theories based on dihedral (Dn) and alternating (A6) groups. The intersection between twist strings and Abelian flux loops sources Abelian point charges, which defines an H4 cohomology class that characterizes part of an underlying 3-group symmetry of the topological order. The equations involving background gauge fields for the 3-group symmetry have been explicitly written down for various cases. We also study examples of twist strings interacting with non-Abelian flux loops (defining part of a non-invertible higher symmetry), examples of non-invertible codimension-2 defects, and examples of interplay of codimension-2 defects with codimension-1 defects. We also find an example of geometric, not fully topological, twist strings in (3+1)D A6 gauge theory.
}, keywords = {FOS: Mathematics, FOS: Physical sciences, High Energy Physics - Theory (hep-th), Mathematical Physics (math-ph), Quantum Algebra (math.QA), Quantum Physics (quant-ph), Strongly Correlated Electrons (cond-mat.str-el)}, doi = {10.48550/ARXIV.2208.07367}, url = {https://arxiv.org/abs/2208.07367}, author = {Barkeshli, Maissam and Chen, Yu-An and Huang, Sheng-Jie and Kobayashi, Ryohei and Tantivasadakarn, Nathanan and Zhu, Guanyu} } @article {3184, title = {Continuous Symmetry Breaking in a Trapped-Ion Spin Chain}, year = {2022}, month = {11/2/2022}, abstract = {One-dimensional systems exhibiting a continuous symmetry can host quantum phases of matter with true long-range order only in the presence of sufficiently long-range interactions. In most physical systems, however, the interactions are short-ranged, hindering the emergence of such phases in one dimension. Here we use a one-dimensional trapped-ion quantum simulator to prepare states with long-range spin order that extends over the system size of up to 23 spins and is characteristic of the continuous symmetry-breaking phase of matter. Our preparation relies on simultaneous control over an array of tightly focused individual-addressing laser beams, generating long-range spin-spin interactions. We also observe a disordered phase with frustrated correlations. We further study the phases at different ranges of interaction and the out-of-equilibrium response to symmetry-breaking perturbations. This work opens an avenue to study new quantum phases and out-of-equilibrium dynamics in low-dimensional systems.
}, keywords = {FOS: Physical sciences, Quantum Gases (cond-mat.quant-gas), Quantum Physics (quant-ph), Statistical Mechanics (cond-mat.stat-mech), Strongly Correlated Electrons (cond-mat.str-el)}, doi = {10.48550/ARXIV.2211.01275}, url = {https://arxiv.org/abs/2211.01275}, author = {Feng, Lei and Katz, Or and Haack, Casey and Maghrebi, Mohammad and Gorshkov, Alexey V. and Gong, Zhexuan and Cetina, Marko and Monroe, Christopher} } @article {3072, title = {A Convergence Theory for Over-parameterized Variational Quantum Eigensolvers}, year = {2022}, month = {5/25/2022}, abstract = {The Variational Quantum Eigensolver (VQE) is a promising candidate for quantum applications on near-term Noisy Intermediate-Scale Quantum (NISQ) computers. Despite a lot of empirical studies and recent progress in theoretical understanding of VQE\&$\#$39;s optimization landscape, the convergence for optimizing VQE is far less understood. We provide the first rigorous analysis of the convergence of VQEs in the over-parameterization regime. By connecting the training dynamics with the Riemannian Gradient Flow on the unit-sphere, we establish a threshold on the sufficient number of parameters for efficient convergence, which depends polynomially on the system dimension and the spectral ratio, a property of the problem Hamiltonian, and could be resilient to gradient noise to some extent. We further illustrate that this overparameterization threshold could be vastly reduced for specific VQE instances by establishing an ansatz-dependent threshold paralleling our main result. We showcase that our ansatz-dependent threshold could serve as a proxy of the trainability of different VQE ansatzes without performing empirical experiments, which hence leads to a principled way of evaluating ansatz design. Finally, we conclude with a comprehensive empirical study that supports our theoretical findings
}, url = {https://arxiv.org/abs/2205.12481}, author = {Xuchen You and Shouvanik Chakrabarti and Xiaodi Wu} } @article {3013, title = {Deconfinement and Error Thresholds in Holography}, year = {2022}, month = {2/9/2022}, abstract = {We study the error threshold properties of holographic quantum error-correcting codes. We demonstrate that holographic CFTs admit an algebraic threshold, which is related to the confinement-deconfinement phase transition. We then apply geometric intuition from holography and the Hawking-Page phase transition to motivate the CFT result, and comment on potential extensions to other confining theories.
}, keywords = {FOS: Physical sciences, High Energy Physics - Theory (hep-th), Nuclear Theory (nucl-th), Quantum Physics (quant-ph), Strongly Correlated Electrons (cond-mat.str-el)}, doi = {10.48550/ARXIV.2202.04710}, url = {https://arxiv.org/abs/2202.04710}, author = {Bao, Ning and Cao, Charles and Zhu, Guanyu} } @article {3140, title = {Demonstration of three- and four-body interactions between trapped-ion spins}, year = {2022}, month = {9/12/2022}, abstract = {Quantum processors use the native interactions between effective spins to simulate Hamiltonians or execute quantum gates. In most processors, the native interactions are pairwise, limiting the efficiency of controlling entanglement between many qubits. Here we experimentally demonstrate a new class of native interactions between trapped-ion qubits, extending conventional pairwise interactions to higher order. We realize three- and four-body spin interactions as examples, showing that high-order spin polynomials may serve as a new toolbox for quantum information applications.
}, keywords = {Atomic Physics (physics.atom-ph), FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2209.05691}, url = {https://arxiv.org/abs/2209.05691}, author = {Katz, Or and Feng, Lei and Risinger, Andrew and Monroe, Christopher and Cetina, Marko} } @article {3205, title = {Efficient and practical quantum compiler towards multi-qubit systems with deep reinforcement learning}, year = {2022}, month = {4/14/2022}, abstract = {Efficient quantum compiling tactics greatly enhance the capability of quantum computers to execute complicated quantum algorithms. Due to its fundamental importance, a plethora of quantum compilers has been designed in past years. However, there are several caveats to current protocols, which are low optimality, high inference time, limited scalability, and lack of universality. To compensate for these defects, here we devise an efficient and practical quantum compiler assisted by advanced deep reinforcement learning (RL) techniques, i.e., data generation, deep Q-learning, and AQ* search. In this way, our protocol is compatible with various quantum machines and can be used to compile multi-qubit operators. We systematically evaluate the performance of our proposal in compiling quantum operators with both inverse-closed and inverse-free universal basis sets. In the task of single-qubit operator compiling, our proposal outperforms other RL-based quantum compilers in the measure of compiling sequence length and inference time. Meanwhile, the output solution is near-optimal, guaranteed by the Solovay-Kitaev theorem. Notably, for the inverse-free universal basis set, the achieved sequence length complexity is comparable with the inverse-based setting and dramatically advances previous methods. These empirical results contribute to improving the inverse-free Solovay-Kitaev theorem. In addition, for the first time, we demonstrate how to leverage RL-based quantum compilers to accomplish two-qubit operator compiling. The achieved results open an avenue for integrating RL with quantum compiling to unify efficiency and practicality and thus facilitate the exploration of quantum advantages.
}, keywords = {FOS: Computer and information sciences, FOS: Physical sciences, Machine Learning (cs.LG), Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2204.06904}, url = {https://arxiv.org/abs/2204.06904}, author = {Chen, Qiuhao and Du, Yuxuan and Zhao, Qi and Jiao, Yuling and Lu, Xiliang and Wu, Xingyao} } @article {2910, title = {Efficient Product Formulas for Commutators and Applications to Quantum Simulation}, journal = {Physical Review Research}, volume = {4}, year = {2022}, month = {03/10/2022}, chapter = {013191}, abstract = {We construct product formulas for exponentials of commutators and explore their applications. First, we directly construct a third-order product formula with six exponentials by solving polynomial equations obtained using the operator differential method. We then derive higher-order product formulas recursively from the third-order formula. We improve over previous recursive constructions, reducing the number of gates required to achieve the same accuracy. In addition, we demonstrate that the constituent linear terms in the commutator can be included at no extra cost. As an application, we show how to use the product formulas in a digital protocol for counterdiabatic driving, which increases the fidelity for quantum state preparation. We also discuss applications to quantum simulation of one-dimensional fermion chains with nearest- and next-nearest-neighbor hopping terms, and two-dimensional fractional quantum Hall phases.
}, doi = {https://doi.org/10.1103/PhysRevResearch.4.013191}, url = {https://arxiv.org/abs/2111.12177}, author = {Yu-An Chen and Andrew M. Childs and Mohammad Hafezi and Zhang Jiang and Hwanmun Kim and Yijia Xu} } @article {3153, title = {Error-correcting codes for fermionic quantum simulation}, year = {2022}, month = {10/16/2022}, abstract = {We provide ways to simulate fermions by qubits on 2d lattices using Z2 gauge theories (stabilizer codes). By studying the symplectic automorphisms of the Pauli module over the Laurent polynomial ring, we develop a systematic way to increase the code distances of stabilizer codes. We identify a family of stabilizer codes that can be used to simulate fermions with code distances of d=2,3,4,5,6,7 such that any \⌊d\−12\⌋-qubit error can be corrected. In particular, we demonstrate three stabilizer codes with code distances of d=3, d=4, and d=5, respectively, with all stabilizers and logical operators shown explicitly. The syndromes for all Pauli errors are provided. Finally, we introduce a syndrome-matching method to compute code distances numerically.
}, keywords = {FOS: Mathematics, FOS: Physical sciences, Mathematical Physics (math-ph), Quantum Algebra (math.QA), Quantum Physics (quant-ph), Strongly Correlated Electrons (cond-mat.str-el)}, doi = {10.48550/ARXIV.2210.08411}, url = {https://arxiv.org/abs/2210.08411}, author = {Chen, Yu-An and Alexey V. Gorshkov and Xu, Yijia} } @article {3016, title = {Euler-obstructed Cooper pairing: Nodal superconductivity and hinge Majorana zero modes}, journal = {Physical Review B}, volume = {105}, year = {2022}, month = {3/29/2022}, abstract = {Since the proposal of monopole Cooper pairing in [Phys. Rev. Lett. 120, 067003 (2018)], considerable research efforts have been dedicated to the study of Cooper pairing order parameters constrained (or obstructed) by the nontrivial normal-state band topology at Fermi surfaces in 3D systems. In the current work, we generalize the topologically obstructed pairings between Chern states (like the monopole Cooper pairing) by proposing Euler obstructed Cooper pairing in 3D systems. The Euler obstructed Cooper pairing widely exists between two Fermi surfaces with nontrivial band topology characterized by nonzero Euler numbers; such Fermi surfaces can exist in 3D PT-protected spinless-Dirac/nodal-line semimetals with negligible spin-orbit coupling, where PT is the space-time inversion symmetry. An Euler obstructed pairing channel must have pairing nodes on the pairing-relevant Fermi surfaces, and the total winding number of the pairing nodes is determined by the sum or difference of the Euler numbers on the Fermi surfaces. In particular, we find that when the normal state is time-reversal invariant and the pairing is weak, a sufficiently-dominant Euler obstructed pairing channel with zero total momentum leads to nodal superconductivity. If the Fermi surface splitting is small, the resultant nodal superconductor hosts hinge Majorana zero modes. The possible dominance of the Euler obstructed pairing channel near the superconducting transition and the robustness of the hinge Majorana zero modes against disorder are explicitly demonstrated using effective or tight-binding models. Our work presents the first class of higher-order nodal superconductivity originating from the topologically obstructed Cooper pairing.
}, doi = {https://doi.org/10.1103\%2Fphysrevb.105.104515}, url = {https://arxiv.org/abs/2109.02685}, author = {Jiabin Yu and Yu-An Chen and Sankar Das Sarma} } @article {3139, title = {Experimental Implementation of an Efficient Test of Quantumness}, year = {2022}, month = {9/28/2022}, abstract = {A test of quantumness is a protocol where a classical user issues challenges to a quantum device to determine if it exhibits non-classical behavior, under certain cryptographic assumptions. Recent attempts to implement such tests on current quantum computers rely on either interactive challenges with efficient verification, or non-interactive challenges with inefficient (exponential time) verification. In this paper, we execute an efficient non-interactive test of quantumness on an ion-trap quantum computer. Our results significantly exceed the bound for a classical device\&$\#$39;s success.
}, keywords = {FOS: Physical sciences, Other Condensed Matter (cond-mat.other), Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2209.14316}, url = {https://arxiv.org/abs/2209.14316}, author = {Lewis, Laura and Zhu, Daiwei and Gheorghiu, Alexandru and Noel, Crystal and Katz, Or and Harraz, Bahaa and Wang, Qingfeng and Risinger, Andrew and Feng, Lei and Biswas, Debopriyo and Egan, Laird and Vidick, Thomas and Cetina, Marko and Monroe, Christopher} } @article {3122, title = {Extracting Wilson loop operators and fractional statistics from a single bulk ground state}, year = {2022}, month = {9/28/2022}, abstract = {An essential aspect of topological phases of matter is the existence of Wilson loop operators that keep the ground state subspace invariant. Here we present and implement an unbiased numerical optimization scheme to systematically find the Wilson loop operators given a single ground state wave function of a gapped Hamiltonian on a disk. We then show how these Wilson loop operators can be cut and glued through further optimization to give operators that can create, move, and annihilate anyon excitations. We subsequently use these operators to determine the braiding statistics and topological twists of the anyons, yielding a way to fully extract topological order from a single wave function. We apply our method to the ground state of the perturbed toric code and doubled semion models with a magnetic field that is up to a half of the critical value. From a contemporary perspective, this can be thought of as a machine learning approach to discover emergent 1-form symmetries of a ground state wave function. From an application perspective, our approach can be relevant to find Wilson loop operators in current quantum simulators.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph), Strongly Correlated Electrons (cond-mat.str-el)}, doi = {10.48550/ARXIV.2209.14302}, url = {https://arxiv.org/abs/2209.14302}, author = {Cian, Ze-Pei and Hafezi, Mohammad and Barkeshli, Maissam} } @article {3179, title = {Group coset monogamy games and an application to device-independent continuous-variable QKD}, year = {2022}, month = {12/7/2022}, abstract = {We develop an extension of a recently introduced subspace coset state monogamy-of-entanglement game [Coladangelo, Liu, Liu, and Zhandry; Crypto\&$\#$39;21] to general group coset states, which are uniform superpositions over elements of a subgroup to which has been applied a group-theoretic generalization of the quantum one-time pad. We give a general bound on the winning probability of a monogamy game constructed from subgroup coset states that applies to a wide range of finite and infinite groups. To study the infinite-group case, we use and further develop a measure-theoretic formalism that allows us to express continuous-variable measurements as operator-valued generalizations of probability measures.
We apply the monogamy game bound to various physically relevant groups, yielding realizations of the game in continuous-variable modes as well as in rotational states of a polyatomic molecule. We obtain explicit strong bounds in the case of specific group-space and subgroup combinations. As an application, we provide the first proof of one sided-device independent security of a squeezed-state continuous-variable quantum key distribution protocol against general coherent attacks.
The algorithmic error of digital quantum simulations is usually explored in terms of the spectral norm distance between the actual and ideal evolution operators. In practice, this worst-case error analysis may be unnecessarily pessimistic. To address this, we develop a theory of average-case performance of Hamiltonian simulation with random initial states. We relate the average-case error to the Frobenius norm of the multiplicative error and give upper bounds for the product formula (PF) and truncated Taylor series methods. As applications, we estimate average-case error for digital Hamiltonian simulation of general lattice Hamiltonians and k-local Hamiltonians. In particular, for the nearest-neighbor Heisenberg chain with n spins, the error is quadratically reduced from O(n) in the worst case to O(n\−\−\√) on average for both the PF method and the Taylor series method. Numerical evidence suggests that this theory accurately characterizes the average error for concrete models. We also apply our results to error analysis in the simulation of quantum scrambling.
}, doi = {https://doi.org/10.1103/PhysRevLett.129.270502}, url = {https://arxiv.org/abs/2111.04773}, author = {Qi Zhao and You Zhou and Alexander F. Shaw and Tongyang Li and Andrew M. Childs} } @article {3190, title = {Higher-group symmetry in finite gauge theory and stabilizer codes}, year = {2022}, month = {11/21/2022}, abstract = {A large class of gapped phases of matter can be described by topological finite group gauge theories. In this paper, we derive the d-group global symmetry and its \&$\#$39;t Hooft anomaly for topological finite group gauge theories in (d+1) space-time dimensions, including non-Abelian gauge groups and Dijkgraaf-Witten twists. We focus on the 1-form symmetry generated by invertible (Abelian) magnetic defects and the higher-form symmetries generated by invertible topological defects decorated with lower dimensional gauged symmetry-protected topological (SPT) phases. We show that due to a generalization of the Witten effect and charge-flux attachment, the 1-form symmetry generated by the magnetic defects mixes with other symmetries into a higher group. We describe such higher-group symmetry in various lattice model examples. We discuss several applications, including the classification of fermionic SPT phases in (3+1)D for general fermionic symmetry groups, where we also derive a simpler formula for the [O5]\∈H5(BG,U(1)) obstruction than has appeared in previous work. We also show how the d-group symmetry is related to fault-tolerant non-Pauli logical gates and a refined Clifford hierarchy in stabilizer codes. We construct new logical gates in stabilizer codes using the d-group symmetry, such as the control-Z gate in (3+1)D Z2 toric code.
}, keywords = {FOS: Mathematics, FOS: Physical sciences, High Energy Physics - Theory (hep-th), Quantum Algebra (math.QA), Quantum Physics (quant-ph), Strongly Correlated Electrons (cond-mat.str-el)}, doi = {10.48550/ARXIV.2211.11764}, url = {https://arxiv.org/abs/2211.11764}, author = {Barkeshli, Maissam and Chen, Yu-An and Hsin, Po-Shen and Kobayashi, Ryohei} } @article {2607, title = {Implementing a Fast Unbounded Quantum Fanout Gate Using Power-Law Interactions}, journal = {Phys. Rev. Research}, volume = {4}, year = {2022}, month = {10/27/2022}, abstract = {The standard circuit model for quantum computation presumes the ability to directly perform gates between arbitrary pairs of qubits, which is unlikely to be practical for large-scale experiments. Power-law interactions with strength decaying as 1/rα in the distance r provide an experimentally realizable resource for information processing, whilst still retaining long-range connectivity. We leverage the power of these interactions to implement a fast quantum fanout gate with an arbitrary number of targets. Our implementation allows the quantum Fourier transform (QFT) and Shor\&$\#$39;s algorithm to be performed on a D-dimensional lattice in time logarithmic in the number of qubits for interactions with α\≤D. As a corollary, we show that power-law systems with α\≤D are difficult to simulate classically even for short times, under a standard assumption that factoring is classically intractable. Complementarily, we develop a new technique to give a general lower bound, linear in the size of the system, on the time required to implement the QFT and the fanout gate in systems that are constrained by a linear light cone. This allows us to prove an asymptotically tighter lower bound for long-range systems than is possible with previously available techniques.\
}, doi = {https://doi.org/10.1103/PhysRevResearch.4.L042016}, url = {https://arxiv.org/abs/2007.00662}, author = {Andrew Y. Guo and Abhinav Deshpande and Su-Kuan Chu and Zachary Eldredge and Przemyslaw Bienias and Dhruv Devulapalli and Yuan Su and Andrew M. Childs and Alexey V. Gorshkov} } @article {2814, title = {Kramers{\textquoteright} degeneracy for open systems in thermal equilibrium}, journal = {Phys. Rev. B}, volume = {105}, year = {2022}, month = {3/10/2022}, pages = {L121104}, doi = {https://doi.org/10.1103/PhysRevB.105.L121104}, url = {https://arxiv.org/abs/2105.02888}, author = {Simon Lieu and Max McGinley and Oles Shtanko and Nigel R. Cooper and Alexey V. Gorshkov} } @article {3142, title = {Many-Body Quantum Teleportation via Operator Spreading in the Traversable Wormhole Protocol}, journal = {Physical Review X}, volume = {12}, year = {2022}, month = {8/5/2022}, abstract = {By leveraging shared entanglement between a pair of qubits, one can teleport a quantum state from one particle to another. Recent advances have uncovered an intrinsically many-body generalization of quantum teleportation, with an elegant and surprising connection to gravity. In particular, the teleportation of quantum information relies on many-body dynamics, which originate from strongly-interacting systems that are holographically dual to gravity; from the gravitational perspective, such quantum teleportation can be understood as the transmission of information through a traversable wormhole. Here, we propose and analyze a new mechanism for many-body quantum teleportation -- dubbed peaked-size teleportation. Intriguingly, peaked-size teleportation utilizes precisely the same type of quantum circuit as traversable wormhole teleportation, yet has a completely distinct microscopic origin: it relies upon the spreading of local operators under generic thermalizing dynamics and not gravitational physics. We demonstrate the ubiquity of peaked-size teleportation, both analytically and numerically, across a diverse landscape of physical systems, including random unitary circuits, the Sachdev-Ye-Kitaev model (at high temperatures), one-dimensional spin chains and a bulk theory of gravity with stringy corrections. Our results pave the way towards using many-body quantum teleportation as a powerful experimental tool for: (i) characterizing the size distributions of operators in strongly-correlated systems and (ii) distinguishing between generic and intrinsically gravitational scrambling dynamics. To this end, we provide a detailed experimental blueprint for realizing many-body quantum teleportation in both trapped ions and Rydberg atom arrays; effects of decoherence and experimental imperfections are analyzed.
}, doi = {10.1103/physrevx.12.031013}, url = {https://arxiv.org/abs/2102.00010}, author = {Thomas Schuster and Bryce Kobrin and Ping Gao and Iris Cong and Emil T. Khabiboulline and Norbert M. Linke and Mikhail D. Lukin and Christopher Monroe and Beni Yoshida and Norman Y. Yao} } @article {3147, title = {Multi-Angle QAOA Does Not Always Need All Its Angles}, year = {2022}, month = {9/23/2022}, abstract = {Introducing additional tunable parameters to quantum circuits is a powerful way of improving performance without increasing hardware requirements. A recently introduced multi-angle extension of the quantum approximate optimization algorithm (ma-QAOA) significantly improves the solution from QAOA by allowing the parameters for each term in the Hamiltonian to vary independently. However, prior results suggest that there is considerable redundancy in parameters, the removal of which would reduce the cost of parameter optimization. In this work, we show numerically that problem symmetries can be used to reduce the number of parameters used by ma-QAOA without decreasing the solution quality. We study MaxCut on all 7,565 connected, non-isomorphic 8-node graphs with a non-trivial symmetry group and show numerically that in 67.4\% of these graphs, symmetry can be used to reduce the number of parameters with no decrease in the objective, with the average ratio of parameters reduced by 28.1\%. Moreover, we show that in 35.9\% of the graphs this can be achieved by simply using the largest symmetry. For the graphs where reducing the number of parameters leads to a decrease in the objective, the largest symmetry can be used to reduce the parameter count by 37.1\% at the cost of only a 6.1\% decrease in the objective.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2209.11839}, url = {https://arxiv.org/abs/2209.11839}, author = {Shi, Kaiyan and Herrman, Rebekah and Shaydulin, Ruslan and Chakrabarti, Shouvanik and Pistoia, Marco and Larson, Jeffrey} } @article {3141, title = {N-body interactions between trapped ion qubits via spin-dependent squeezing}, journal = {Physical Review Letters}, volume = {129}, year = {2022}, month = {8/5/2022}, abstract = {We describe a simple protocol for the single-step generation of N-body entangling interactions between trapped atomic ion qubits. We show that qubit state-dependent squeezing operations and displacement forces on the collective atomic motion can generate full N-body interactions. Similar to the M{\o}lmer-S{\o}rensen two-body Ising interaction at the core of most trapped ion quantum computers and simulators, the proposed operation is relatively insensitive to the state of motion. We show how this N-body gate operation allows the single-step implementation of a family of N-bit gate operations such as the powerful N-Toffoli gate, which flips a single qubit if and only if all other N-1 qubits are in a particular state.
}, doi = {10.1103/physrevlett.129.063603}, url = {https://arxiv.org/abs/2202.04230}, author = {Or Katz and Marko Cetina and Christopher Monroe} } @article {3196, title = {Optimal scaling quantum linear systems solver via discrete adiabatic theorem}, journal = {PRX Quantum}, volume = {3}, year = {2022}, month = {10/7/2022}, pages = {040303}, abstract = {Recently, several approaches to solving linear systems on a quantum computer have been formulated in terms of the quantum adiabatic theorem for a continuously varying Hamiltonian. Such approaches enabled near-linear scaling in the condition number κ of the linear system, without requiring a complicated variable-time amplitude amplification procedure. However, the most efficient of those procedures is still asymptotically sub-optimal by a factor of log(κ). Here, we prove a rigorous form of the adiabatic theorem that bounds the error in terms of the spectral gap for intrinsically discrete time evolutions. We use this discrete adiabatic theorem to develop a quantum algorithm for solving linear systems that is asymptotically optimal, in the sense that the complexity is strictly linear in κ, matching a known lower bound on the complexity. Our O(κlog(1/ϵ)) complexity is also optimal in terms of the combined scaling in κ and the precision ϵ. Compared to existing suboptimal methods, our algorithm is simpler and easier to implement. Moreover, we determine the constant factors in the algorithm, which would be suitable for determining the complexity in terms of gate counts for specific applications.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {https://journals.aps.org/prxquantum/pdf/10.1103/PRXQuantum.3.040303}, url = {https://arxiv.org/abs/2111.08152}, author = {Costa, Pedro C. S. and An, Dong and Sanders, Yuval R. and Su, Yuan and Babbush, Ryan and Berry, Dominic W.} } @article {3015, title = {Pauli Stabilizer Models of Twisted Quantum Doubles}, journal = {PRX Quantum}, volume = {3}, year = {2022}, month = {3/30/2022}, abstract = {We construct a Pauli stabilizer model for every two-dimensional Abelian topological order that admits a gapped boundary. Our primary example is a Pauli stabilizer model on four-dimensional qudits that belongs to the double semion (DS) phase of matter. The DS stabilizer Hamiltonian is constructed by condensing an emergent boson in a Z4 toric code, where the condensation is implemented by making certain two-body measurements. We rigorously verify the topological order of the DS stabilizer model by identifying an explicit finite-depth quantum circuit (with ancillary qubits) that maps its ground state subspace to that of a DS string-net model. We show that the construction of the DS stabilizer Hamiltonian generalizes to all twisted quantum doubles (TQDs) with Abelian anyons. This yields a Pauli stabilizer code on composite-dimensional qudits for each such TQD, implying that the classification of topological Pauli stabilizer codes extends well beyond stacks of toric codes - in fact, exhausting all Abelian anyon theories that admit a gapped boundary. We also demonstrate that symmetry-protected topological phases of matter characterized by type I and type II cocycles can be modeled by Pauli stabilizer Hamiltonians by gauging certain 1-form symmetries of the TQD stabilizer models.
}, doi = {https://doi.org/10.1103\%2Fprxquantum.3.010353}, url = {https://arxiv.org/abs/2112.11394}, author = {Tyler D. Ellison and Yu-An Chen and Arpit Dua and Wilbur Shirley and Nathanan Tantivasadakarn and Dominic J. Williamson} } @article {3193, title = {Pauli topological subsystem codes from Abelian anyon theories}, year = {2022}, month = {11/7/2022}, abstract = {We construct Pauli topological subsystem codes characterized by arbitrary two-dimensional Abelian anyon theories--this includes anyon theories with degenerate braiding relations and those without a gapped boundary to the vacuum. Our work both extends the classification of two-dimensional Pauli topological subsystem codes to systems of composite-dimensional qudits and establishes that the classification is at least as rich as that of Abelian anyon theories. We exemplify the construction with topological subsystem codes defined on four-dimensional qudits based on the Z(1)4 anyon theory with degenerate braiding relations and the chiral semion theory--both of which cannot be captured by topological stabilizer codes. The construction proceeds by \"gauging out\" certain anyon types of a topological stabilizer code. This amounts to defining a gauge group generated by the stabilizer group of the topological stabilizer code and a set of anyonic string operators for the anyon types that are gauged out. The resulting topological subsystem code is characterized by an anyon theory containing a proper subset of the anyons of the topological stabilizer code. We thereby show that every Abelian anyon theory is a subtheory of a stack of toric codes and a certain family of twisted quantum doubles that generalize the double semion anyon theory. We further prove a number of general statements about the logical operators of translation invariant topological subsystem codes and define their associated anyon theories in terms of higher-form symmetries.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph), Strongly Correlated Electrons (cond-mat.str-el)}, doi = {10.48550/ARXIV.2211.03798}, url = {https://arxiv.org/abs/2211.03798}, author = {Ellison, Tyler D. and Chen, Yu-An and Dua, Arpit and Shirley, Wilbur and Tantivasadakarn, Nathanan and Williamson, Dominic J.} } @article {3150, title = {Quantum Algorithms for Sampling Log-Concave Distributions and Estimating Normalizing Constants}, journal = {Advances in Neural Information Processing Systems (NeurIPS 2022)}, volume = {35}, year = {2022}, month = {10/12/2022}, keywords = {FOS: Computer and information sciences, FOS: Mathematics, FOS: Physical sciences, Machine Learning (cs.LG), Optimization and Control (math.OC), Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2210.06539}, url = {https://arxiv.org/abs/2210.06539}, author = {Andrew M. Childs and Li, Tongyang and Liu, Jin-Peng and Wang, Chunhao and Zhang, Ruizhe} } @article {3137, title = {Quantum Depth in the Random Oracle Model}, year = {2022}, month = {10/12/2022}, abstract = {We give a comprehensive characterization of the computational power of shallow quantum circuits combined with classical computation. Specifically, for classes of search problems, we show that the following statements hold, relative to a random oracle:
(a) BPPQNCBPP\≠BQP. This refutes Jozsa\&$\#$39;s conjecture [QIP 05] in the random oracle model. As a result, this gives the first instantiatable separation between the classes by replacing the oracle with a cryptographic hash function, yielding a resolution to one of Aaronson\&$\#$39;s ten semi-grand challenges in quantum computing.
(b) BPPQNC⊈QNCBPP and QNCBPP⊈BPPQNC. This shows that there is a subtle interplay between classical computation and shallow quantum computation. In fact, for the second separation, we establish that, for some problems, the ability to perform adaptive measurements in a single shallow quantum circuit, is more useful than the ability to perform polynomially many shallow quantum circuits without adaptive measurements.
(c) There exists a 2-message proof of quantum depth protocol. Such a protocol allows a classical verifier to efficiently certify that a prover must be performing a computation of some minimum quantum depth. Our proof of quantum depth can be instantiated using the recent proof of quantumness construction by Yamakawa and Zhandry [STOC 22].
The divide-and-conquer framework, used extensively in classical algorithm design, recursively breaks a problem of size n into smaller subproblems (say, a copies of size n/b each), along with some auxiliary work of cost Caux(n), to give a recurrence relation
C(n)\≤aC(n/b)+Caux(n)
for the classical complexity C(n). We describe a quantum divide-and-conquer framework that, in certain cases, yields an analogous recurrence relation
CQ(n)\≤a\−\−\√CQ(n/b)+O(CauxQ(n))
that characterizes the quantum query complexity. We apply this framework to obtain near-optimal quantum query complexities for various string problems, such as (i) recognizing regular languages; (ii) decision versions of String Rotation and String Suffix; and natural parameterized versions of (iii) Longest Increasing Subsequence and (iv) Longest Common Subsequence.
We introduce a flexible and graphically intuitive framework that constructs complex quantum error correction codes from simple codes or states, generalizing code concatenation. More specifically, we represent the complex code constructions as tensor networks built from the tensors of simple codes or states in a modular fashion. Using a set of local moves known as operator pushing, one can derive properties of the more complex codes, such as transversal non-Clifford gates, by tracing the flow of operators in the network. The framework endows a network geometry to any code it builds and is valid for constructing stabilizer codes as well as non-stabilizer codes over qubits and qudits. For a contractible tensor network, the sequence of contractions also constructs a decoding/encoding circuit. To highlight the framework\&$\#$39;s range of capabilities and to provide a tutorial, we lay out some examples where we glue together simple stabilizer codes to construct non-trivial codes. These examples include the toric code and its variants, a holographic code with transversal non-Clifford operators, a 3d stabilizer code, and other stabilizer codes with interesting properties. Surprisingly, we find that the surface code is equivalent to the 2d Bacon-Shor code after \"dualizing\" its tensor network encoding map.
}, doi = {https://journals.aps.org/prxquantum/pdf/10.1103/PRXQuantum.3.020332}, url = {https://arxiv.org/abs/2109.08158}, author = {ChunJun Cao and Brad Lackey} } @article {3191, title = {Quantum Natural Proof: A New Perspective of Hybrid Quantum-Classical Program Verification}, year = {2022}, month = {11/11/2022}, abstract = {Many quantum programs are assured by formal verification, but such verification is usually laborious and time-consuming. This paper proposes quantum natural proof (QNP), an automated proof system for verifying hybrid quantum-classical algorithms. Natural proofs are a subclass of proofs that are amenable to completely automated reasoning, provide sound but incomplete procedures, and capture common reasoning tactics in program verification. The core of QNP is a type-guided quantum proof system, named Qafny, which views quantum operations as some classical array operations that can be modeled as proof rules in a classical separation logic framework, suitable for automated reasoning. We proved the soundness and completeness of the Qafny proof system as well as the soundness of the proof system compilation from Qafny to Dafny. By using the QNP implementation in Dafny, automated verification can be efficiently perform for many hybrid quantum-classical algorithms, including GHZ, Shor\&$\#$39;s, Grover\&$\#$39;s, and quantum walk algorithms, which saves a great amount of human efforts. In addition, quantum programs written in Qafny can be compiled to quantum circuits so that every verified quantum program can be run on a quantum machine.
}, keywords = {FOS: Computer and information sciences, FOS: Physical sciences, Programming Languages (cs.PL), Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2211.06411}, url = {https://arxiv.org/abs/2211.06411}, author = {Li, Liyi and Zhu, Mingwei and Lee, Yi and Chang, Le and Wu, Xiaodi} } @article {2996, title = {Quantum Routing with Teleportation}, year = {2022}, month = {4/8/2022}, abstract = {We study the problem of implementing arbitrary permutations of qubits under interaction constraints in quantum systems that allow for arbitrarily fast local operations and classical communication (LOCC). In particular, we show examples of speedups over swap-based and more general unitary routing methods by distributing entanglement and using LOCC to perform quantum teleportation. We further describe an example of an interaction graph for which teleportation gives a logarithmic speedup in the worst-case routing time over swap-based routing. We also study limits on the speedup afforded by quantum teleportation - showing an O(NlogN\−\−\−\−\−\−\−\√) upper bound on the separation in routing time for any interaction graph - and give tighter bounds for some common classes of graphs.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2204.04185}, url = {https://arxiv.org/abs/2204.04185}, author = {Devulapalli, Dhruv and Schoute, Eddie and Bapat, Aniruddha and Andrew M. Childs and Alexey V. Gorshkov} } @article {2997, title = {Quantum Simulation for High Energy Physics}, year = {2022}, month = {4/7/2022}, abstract = {It is for the first time that Quantum Simulation for High Energy Physics (HEP) is studied in the U.S. decadal particle-physics community planning, and in fact until recently, this was not considered a mainstream topic in the community. This fact speaks of a remarkable rate of growth of this subfield over the past few years, stimulated by the impressive advancements in Quantum Information Sciences (QIS) and associated technologies over the past decade, and the significant investment in this area by the government and private sectors in the U.S. and other countries. High-energy physicists have quickly identified problems of importance to our understanding of nature at the most fundamental level, from tiniest distances to cosmological extents, that are intractable with classical computers but may benefit from quantum advantage. They have initiated, and continue to carry out, a vigorous program in theory, algorithm, and hardware co-design for simulations of relevance to the HEP mission. This community whitepaper is an attempt to bring this exciting and yet challenging area of research to the spotlight, and to elaborate on what the promises, requirements, challenges, and potential solutions are over the next decade and beyond.
}, keywords = {FOS: Physical sciences, High Energy Physics - Lattice (hep-lat), High Energy Physics - Phenomenology (hep-ph), High Energy Physics - Theory (hep-th), Nuclear Theory (nucl-th), Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2204.03381}, url = {https://arxiv.org/abs/2204.03381}, author = {Bauer, Christian W. and Davoudi, Zohreh and Balantekin, A. Baha and Bhattacharya, Tanmoy and Carena, Marcela and de Jong, Wibe A. and Draper, Patrick and El-Khadra, Aida and Gemelke, Nate and Hanada, Masanori and Kharzeev, Dmitri and Lamm, Henry and Li, Ying-Ying and Liu, Junyu and Lukin, Mikhail and Meurice, Yannick and Monroe, Christopher and Nachman, Benjamin and Pagano, Guido and Preskill, John and Rinaldi, Enrico and Roggero, Alessandro and Santiago, David I. and Savage, Martin J. and Siddiqi, Irfan and Siopsis, George and Van Zanten, David and Wiebe, Nathan and Yamauchi, Yukari and Yeter-Aydeniz, K{\"u}bra and Zorzetti, Silvia} } @article {3187, title = {Quantum simulation of real-space dynamics}, journal = {Quantum}, volume = {6}, year = {2022}, month = {11/8/2022}, pages = {860}, abstract = {Quantum simulation is a prominent application of quantum computers. While there is extensive previous work on simulating finite-dimensional systems, less is known about quantum algorithms for real-space dynamics. We conduct a systematic study of such algorithms. In particular, we show that the dynamics of a d-dimensional Schr{\"o}dinger equation with η particles can be simulated with gate complexity O~(ηdFpoly(log(g\′/ϵ))), where ϵ is the discretization error, g\′ controls the higher-order derivatives of the wave function, and F measures the time-integrated strength of the potential. Compared to the best previous results, this exponentially improves the dependence on ϵ and g\′ from poly(g\′/ϵ) to poly(log(g\′/ϵ)) and polynomially improves the dependence on T and d, while maintaining best known performance with respect to η. For the case of Coulomb interactions, we give an algorithm using η3(d+η)Tpoly(log(ηdTg\′/(Δϵ)))/Δ one- and two-qubit gates, and another using η3(4d)d/2Tpoly(log(ηdTg\′/(Δϵ)))/Δ one- and two-qubit gates and QRAM operations, where T is the evolution time and the parameter Δ regulates the unbounded Coulomb interaction. We give applications to several computational problems, including faster real-space simulation of quantum chemistry, rigorous analysis of discretization error for simulation of a uniform electron gas, and a quadratic improvement to a quantum algorithm for escaping saddle points in nonconvex optimization.
}, doi = {https://doi.org/10.22331/q-2022-11-17-860}, url = {https://doi.org/10.22331\%2Fq-2022-11-17-860}, author = {Andrew M. Childs and Jiaqi Leng and Tongyang Li and Jin-Peng Liu and Chenyi Zhang} } @article {3146, title = {Quantum state tomography via non-convex Riemannian gradient descent}, year = {2022}, month = {10/10/2022}, abstract = {The recovery of an unknown density matrix of large size requires huge computational resources. The recent Factored Gradient Descent (FGD) algorithm and its variants achieved state-of-the-art performance since they could mitigate the dimensionality barrier by utilizing some of the underlying structures of the density matrix. Despite their theoretical guarantee of a linear convergence rate, the convergence in practical scenarios is still slow because the contracting factor of the FGD algorithms depends on the condition number κ of the ground truth state. Consequently, the total number of iterations can be as large as O(κ\−\−\√ln(1ε)) to achieve the estimation error ε. In this work, we derive a quantum state tomography scheme that improves the dependence on κ to the logarithmic scale; namely, our algorithm could achieve the approximation error ε in O(ln(1κε)) steps. The improvement comes from the application of the non-convex Riemannian gradient descent (RGD). The contracting factor in our approach is thus a universal constant that is independent of the given state. Our theoretical results of extremely fast convergence and nearly optimal error bounds are corroborated by numerical results.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2210.04717}, url = {https://arxiv.org/abs/2210.04717}, author = {Hsu, Ming-Chien and Kuo, En-Jui and Yu, Wei-Hsuan and Cai, Jian-Feng and Hsieh, Min-Hsiu} } @article {3009, title = {Self-Testing of a Single Quantum System: Theory and Experiment}, year = {2022}, month = {3/17/2022}, abstract = {Certifying individual quantum devices with minimal assumptions is crucial for the development of quantum technologies. Here, we investigate how to leverage single-system contextuality to realize self-testing. We develop a robust self-testing protocol based on the simplest contextuality witness for the simplest contextual quantum system, the Klyachko-Can-Binicio{\u g}lu-Shumovsky (KCBS) inequality for the qutrit. We establish a lower bound on the fidelity of the state and the measurements (to an ideal configuration) as a function of the value of the witness under a pragmatic assumption on the measurements we call the KCBS orthogonality condition. We apply the method in an experiment with randomly chosen measurements on a single trapped 40Ca+ and near-perfect detection efficiency. The observed statistics allow us to self-test the system and provide the first experimental demonstration of quantum self-testing of a single system. Further, we quantify and report that deviations from our assumptions are minimal, an aspect previously overlooked by contextuality experiments.
}, keywords = {Atomic Physics (physics.atom-ph), FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {https://doi.org/10.48550/arXiv.2203.09003}, url = {https://arxiv.org/abs/2203.09003}, author = {Hu, Xiao-Min and Xie, Yi and Arora, Atul Singh and Ai, Ming-Zhong and Bharti, Kishor and Zhang, Jie and Wu, Wei and Chen, Ping-Xing and Cui, Jin-Ming and Liu, Bi-Heng and Huang, Yun-Feng and Li, Chuan-Feng and Guo, Guang-Can and Roland, J{\'e}r{\'e}mie and Cabello, Ad{\'a}n and Kwek, Leong-Chuan} } @article {3017, title = {Shadow Distillation: Quantum Error Mitigation with Classical Shadows for Near-Term Quantum Processors}, year = {2022}, month = {3/14/2022}, abstract = {Mitigating errors in quantum information processing devices is especially important in the absence of fault tolerance. An effective method in suppressing state-preparation errors is using multiple copies to distill the ideal component from a noisy quantum state. Here, we use classical shadows and randomized measurements to circumvent the need for coherent access to multiple copies at an exponential cost. We study the scaling of resources using numerical simulations and find that the overhead is still favorable compared to full state tomography. We optimize measurement resources under realistic experimental constraints and apply our method to an experiment preparing Greenberger-Horne-Zeilinger (GHZ) state with trapped ions. In addition to improving stabilizer measurements, the analysis of the improved results reveals the nature of errors affecting the experiment. Hence, our results provide a directly applicable method for mitigating errors in near-term quantum computers.
}, keywords = {FOS: Physical sciences, Quantum Physics (quant-ph)}, doi = {10.48550/ARXIV.2203.07309}, url = {https://arxiv.org/abs/2203.07309}, author = {Seif, Alireza and Cian, Ze-Pei and Zhou, Sisi and Chen, Senrui and Jiang, Liang} } @article {2999, title = {Snowmass 2021 White Paper: Tabletop experiments for infrared quantum gravity}, year = {2022}, month = {3/22/2022}, abstract = {Progress in the quantum readout and control of mechanical devices from single atoms to large masses may enable a first generation of experiments probing the gravitational interaction in the quantum regime, conceivably within the next decade. In this Snowmass whitepaper, we briefly outline the possibilities and challenges facing the realization of these experiments. In particular, we emphasize the need for detailed theories of modifications to the usual effective QFT of gravitons in the infrared regime E/L3< The absence of clear signals from particle dark matter in direct detection experiments motivates new approaches in disparate regions of viable parameter space. In this Snowmass white paper, we outline the Windchime project, a program to build a large array of quantum-enhanced mechanical sensors. The ultimate aim is to build a detector capable of searching for Planck mass-scale dark matter purely through its gravitational coupling to ordinary matter. In the shorter term, we aim to search for a number of other physics targets, especially some ultralight dark matter candidates. Here, we discuss the basic design, open R\&D challenges and opportunities, current experimental efforts, and both short- and long-term physics targets of the Windchime project. The National Institute of Standards and Technology is in the process of selecting publickey cryptographic algorithms through a public, competition-like process. The new publickey cryptography standards will specify additional digital signature, public-key encryption, and key-establishment algorithms to augment Federal Information Processing Standard (FIPS) 186-4, Digital Signature Standard (DSS), as well as NIST Special Publication (SP) 800-56A Revision 3, Recommendation for Pair-Wise Key-Establishment Schemes Using Discrete Logarithm Cryptography, and SP 800-56B Revision 2, Recommendation for Pair-Wise Key Establishment Using Integer Factorization Cryptography. It is intended that these algorithms will be capable of protecting sensitive information well into the foreseeable future, including after the advent of quantum computers. This report describes the evaluation and selection process of the NIST Post-Quantum Cryptography Standardization process third-round candidates based on public feedback and internal review. The report summarizes each of the 15 third-round candidate algorithms and identifies those selected for standardization, as well as those that will continue to be evaluated in a fourth round of analysis. The public-key encryption and key-establishment algorithm that will be standardized is CRYSTALS\–KYBER. The digital signatures that will be standardized are CRYSTALS\–Dilithium, FALCON, and SPHINCS+. While there are multiple signature algorithms selected, NIST recommends CRYSTALS\–Dilithium as the primary algorithm to be implemented. In addition, four of the alternate key-establishment candidate algorithms will advance to a fourth round of evaluation: BIKE, Classic McEliece, HQC, and SIKE. These candidates are still being considered for future standardization. NIST will also issue a new Call for Proposals for public-key digital signature algorithms to augment and diversify its signature portfolio. Tailored topological stabilizer codes in two dimensions have been shown to exhibit high storage threshold error rates and improved subthreshold performance under biased Pauli noise. Three-dimensional (3D) topological codes can allow for several advantages including a transversal implementation of non-Clifford logical gates, single-shot decoding strategies, parallelized decoding in the case of fracton codes as well as construction of fractal lattice codes. Motivated by this, we tailor 3D topological codes for enhanced storage performance under biased Pauli noise. We present Clifford deformations of various 3D topological codes, such that they exhibit a threshold error rate of 50\% under infinitely biased Pauli noise. Our examples include the 3D surface code on the cubic lattice, the 3D surface code on a checkerboard lattice that lends itself to a subsystem code with a single-shot decoder, the 3D color code, as well as fracton models such as the X-cube model, the Sierpinski model and the Haah code. We use the belief propagation with ordered statistics decoder (BP-OSD) to study threshold error rates at finite bias. We also present a rotated layout for the 3D surface code, which uses roughly half the number of physical qubits for the same code distance under appropriate boundary conditions. Imposing coprime periodic dimensions on this rotated layout leads to logical operators of weight O(n) at infinite bias and a corresponding exp[\−O(n)] subthreshold scaling of the logical failure rate, where n is the number of physical qubits in the code. Even though this scaling is unstable due to the existence of logical representations with O(1) low-rate Pauli errors, the number of such representations scales only polynomially for the Clifford-deformed code, leading to an enhanced effective distance. We construct a novel three-dimensional quantum cellular automaton (QCA) based on a system with short-range entangled bulk and chiral semion boundary topological order. We argue that either the QCA is nontrivial, i.e. not a finite-depth circuit of local quantum gates, or there exists a two-dimensional commuting projector Hamiltonian realizing the chiral semion topological order (characterized by U(1)2 Chern-Simons theory). Our QCA is obtained by first constructing the Walker-Wang Hamiltonian of a certain premodular tensor category of order four, then condensing the deconfined bulk boson at the level of lattice operators. We show that the resulting Hamiltonian hosts chiral semion surface topological order in the presence of a boundary and can be realized as a non-Pauli stabilizer code on qubits, from which the QCA is defined. The construction is then generalized to a class of QCAs defined by non-Pauli stabilizer codes on 2n-dimensional qudits that feature surface anyons described by U(1)2n Chern-Simons theory. Our results support the conjecture that the group of nontrivial three-dimensional QCAs is isomorphic to the Witt group of non-degenerate braided fusion categories. Quantum walks provide a framework for understanding and designing quantum algorithms that is both intuitive and universal. To leverage the computational power of these walks, it is important to be able to programmably modify the graph a walker traverses while maintaining coherence. Here, we do this by combining the fast, programmable control provided by optical tweezer arrays with the scalable, homogeneous environment of an optical lattice. Using this new combination of tools we study continuous-time quantum walks of single atoms on a 2D square lattice, and perform proof-of-principle demonstrations of spatial search using these walks. When scaled to more particles, the capabilities demonstrated here can be extended to study a variety of problems in quantum information science and quantum simulation, including the deterministic assembly of ground and excited states in Hubbard models with tunable interactions, and performing versions of spatial search in a larger graph with increased connectivity, where search by quantum walk can be more effective. We construct an explicit and solvable toy model for the AdS/CFT correspondence in the form of an approximate quantum error correction code with a non-trivial center in the code subalgebra. Specifically, we use the Bacon-Shor codes and perfect tensors to construct a gauge code (or a stabilizer code with gauge-fixing), which we call the holographic hybrid code. This code admits a local log-depth encoding/decoding circuit, and can be represented as a holographic tensor network which satisfies an analog of the Ryu-Takayanagi formula and reproduces features of the sub-region duality. We then construct approximate versions of the holographic hybrid codes by \"skewing\" the code subspace, where the size of skewing is analogous to the size of the gravitational constant in holography. These approximate hybrid codes are not necessarily stabilizer codes, but they can be expressed as the superposition of holographic tensor networks that are stabilizer codes. For such constructions, different logical states, representing different bulk matter content, can \"back-react\" on the emergent geometry, resembling a key feature of gravity. The locality of the bulk degrees of freedom becomes subspace-dependent and approximate. Such subspace-dependence is manifest in the form of bulk operator reconstruction from the boundary. Exact complementary error correction breaks down for certain bipartition of the boundary degrees of freedom; however, a limited, state-dependent form is preserved for particular subspaces. We also construct an example where the connected two-point correlation functions can have a power-law decay. Coupled with known constraints from holography, a weakly back-reacting bulk also forces these skewed tensor network models to the \"large N limit\" where they are built by concatenating a large N number of copies. Photocurrent (PC) measurements can reveal the relaxation dynamics of photo-excited hot carriers beyond the linear response of conventional transport experiments, a regime important for carrier multiplication. In graphene subject to a magnetic field, PC measurements are able to probe the existence of Landau levels with different edge chiralities which is exclusive to relativistic electron systems. Here, we report the accurate measurement of PC in graphene in the quantum Hall regime. Prominent PC oscillations as a function of gate voltage on samples\&$\#$39; edges are observed. These oscillation amplitudes form an envelope which depends on the strength of the magnetic field, as does the PCs\&$\#$39; power dependence and their saturation behavior. We explain these experimental observations through a model using optical Bloch equations, incorporating relaxations through acoustic-, optical- phonons and Coulomb interactions. The simulated PC agrees with our experimental results, leading to a unified understanding of the chiral PC in graphene at various magnetic field strengths, and providing hints for the occurrence of a sizable carrier multiplication.\ Our paper arXiv:2101.11629 contains a technical error which changes some of the conclusions. We thank Streltsov, Pedernales, and Plenio for bringing the essence of this error to our attention. Here we explain the error, examine its consequences, and suggest methods to overcome the resulting weakness in the proposed experiment. \"Magic\" is the degree to which a state cannot be approximated by Clifford gates. We study mana, a measure of magic, in the ground state of the Z3 Potts model, and argue that it is a broadly useful diagnostic for many-body physics. In particular we find that the q=3 ground state has large mana at the model\&$\#$39;s critical point, and that this mana resides in the system\&$\#$39;s correlations. We explain the form of the mana by a simple tensor-counting calculation based on a MERA representation of the state. Because mana is present at all length scales, we conclude that the conformal field theory describing the 3-state Potts model critical point is magical. These results control the difficulty of preparing the Potts ground state on an error-corrected quantum computer, and constrain tensor network models of AdS-CFT. As we approach the era of quantum advantage, when quantum computers (QCs) can outperform any classical computer on particular tasks, there remains the difficult challenge of how to validate their performance. While algorithmic success can be easily verified in some instances such as number factoring or oracular algorithms, these approaches only provide pass/fail information for a single QC. On the other hand, a comparison between different QCs on the same arbitrary circuit provides a lower-bound for generic validation: a quantum computation is only as valid as the agreement between the results produced on different QCs. Such an approach is also at the heart of evaluating metrological standards such as disparate atomic clocks. In this paper, we report a cross-platform QC comparison using randomized and correlated measurements that results in a wealth of information on the QC systems. We execute several quantum circuits on widely different physical QC platforms and analyze the cross-platform fidelities. While there has been extensive previous work on efficient quantum algorithms for linear differential equations, analogous progress for nonlinear differential equations has been severely limited due to the linearity of quantum mechanics. Despite this obstacle, we develop a quantum algorithm for initial value problems described by dissipative quadratic n-dimensional ordinary differential equations. Assuming R\<1, where R is a parameter characterizing the ratio of the nonlinearity to the linear dissipation, this algorithm has complexity T2poly(logT,logn)/ϵ, where T is the evolution time and ϵ is the allowed error in the output quantum state. This is an exponential improvement over the best previous quantum algorithms, whose complexity is exponential in T. We achieve this improvement using the method of Carleman linearization, for which we give an improved convergence theorem. This method maps a system of nonlinear differential equations to an infinite-dimensional system of linear differential equations, which we discretize, truncate, and solve using the forward Euler method and the quantum linear system algorithm. We also provide a lower bound on the worst-case complexity of quantum algorithms for general quadratic differential equations, showing that the problem is intractable for R\≥2\–\√. Finally, we discuss potential applications of this approach to problems arising in biology as well as in fluid and plasma dynamics. Estimating the expectation value of an observable is a fundamental task in quantum computation. Unfortunately, it is often impossible to obtain such estimates directly, as the computer is restricted to measuring in a fixed computational basis. One common solution splits the observable into a weighted sum of Pauli operators and measures each separately, at the cost of many measurements. An improved version first groups mutually commuting Pauli operators together and then measures all operators within each group simultaneously. The effectiveness of this depends on two factors. First, to enable simultaneous measurement, circuits are required to rotate each group to the computational basis. In our work, we present two efficient circuit constructions that suitably rotate any group of k commuting n-qubit Pauli operators using at most kn\−k(k+1)/2 and O(kn/logk) two-qubit gates respectively. Second, metrics that justifiably measure the effectiveness of a grouping are required. In our work, we propose two natural metrics that operate under the assumption that measurements are distributed optimally among groups. Motivated by our new metrics, we introduce SORTED INSERTION, a grouping strategy that is explicitly aware of the weighting of each Pauli operator in the observable. Our methods are numerically illustrated in the context of the Variational Quantum Eigensolver, where the observables in question are molecular Hamiltonians. As measured by our metrics, SORTED INSERTION outperforms four conventional greedy colouring algorithms that seek the minimum number of groups. We study the role of coherence in closed and open quantum batteries. We obtain upper bounds to the work performed or energy exchanged by both closed and open quantum batteries in terms of coherence. Specifically, we show that the energy storage can be bounded by the Hilbert-Schmidt coherence of the density matrix in the spectral basis of the unitary operator that encodes the evolution of the battery. We also show that an analogous bound can be obtained in terms of the battery\&$\#$39;s Hamiltonian coherence in the basis of the unitary operator by evaluating their commutator. We apply these bounds to a 4-state quantum system and the anisotropic XY Ising model in the closed system case, and the Spin-Boson model in the open case.\ Cellular automata are interacting classical bits that display diverse emergent behaviors, from fractals to random-number generators to Turing-complete computation. We discover that quantum cellular automata (QCA) can exhibit complexity in the sense of the complexity science that describes biology, sociology, and economics. QCA exhibit complexity when evolving under \"Goldilocks rules\" that we define by balancing activity and stasis. Our Goldilocks rules generate robust dynamical features (entangled breathers), network structure and dynamics consistent with complexity, and persistent entropy fluctuations. Present-day experimental platforms -- Rydberg arrays, trapped ions, and superconducting qubits -- can implement our Goldilocks protocols, making testable the link between complexity science and quantum computation exposed by our QCA. We construct infinitely many new exactly solvable local commuting projector lattice Hamiltonian models for general bosonic beyond group cohomology invertible topological phases of order two and four in any spacetime dimensions, whose boundaries are characterized by gravitational anomalies. Examples include the beyond group cohomology invertible phase without symmetry in (4+1)D that has an anomalous boundary Z2 topological order with fermionic particle and fermionic loop excitations that have mutual π statistics. We argue that this construction gives a new non-trivial quantum cellular automaton (QCA) in (4+1)D of order two. We also present an explicit construction of gapped symmetric boundary state for the bosonic beyond group cohomology invertible phase with unitary Z2 symmetry in (4+1)D. We discuss new quantum phase transitions protected by different invertible phases across the transitions. Simulating the dynamics of quantum systems is an important application of quantum computers and has seen a variety of implementations on current hardware. We show that by introducing quantum gates implementing unitary transformations generated by the symmetries of the system, one can induce destructive interference between the errors from different steps of the simulation, effectively giving faster quantum simulation by symmetry protection. We derive rigorous bounds on the error of a symmetry-protected simulation algorithm and identify conditions for optimal symmetry protection. In particular, when the symmetry transformations are chosen as powers of a unitary, the error of the algorithm is approximately projected to the so-called quantum Zeno subspaces. We prove a bound on this approximation error, exponentially improving a recent result of Burgarth, Facchi, Gramegna, and Pascazio. We apply our technique to the simulations of the XXZ Heisenberg interactions with local disorder and the Schwinger model in quantum field theory. For both systems, our algorithm can reduce the simulation error by several orders of magnitude over the unprotected simulation. Finally, we provide numerical evidence suggesting that our technique can also protect simulation against other types of coherent, temporally correlated errors, such as the 1/f noise commonly found in solid-state experiments. In this note, I review a recent approach to quantum gravity that \"gravitizes\" quantum mechanics by emerging geometry and gravity from complex quantum states. Drawing further insights from tensor network toy models in AdS/CFT, I propose that approximate quantum error correction codes, when re-adapted into the aforementioned framework, also has promise in emerging gravity in near-flat geometries. Modern quantum machine learning (QML) methods involve variationally optimizing a parameterized quantum circuit on a training data set, and subsequently making predictions on a testing data set (i.e., generalizing). In this work, we provide a comprehensive study of generalization performance in QML after training on a limited number N of training data points. We show that the generalization error of a quantum machine learning model with T trainable gates scales at worst as T/N\−\−\−\−\√. When only K< In this paper, we derive the explicit formula for higher cup products on hypercubic lattices, based on the recently developed geometrical interpretation in the simplicial case. We illustrate how this formalism can elucidate lattice constructions on hypercubic lattices for various models and deriving them from spacetime actions. In particular, we demonstrate explicitly that the (3+1)D SPT S=12\∫w22+w41 (where w1 and w2 are the first and second Stiefel-Whitney classes) is dual to the 3-fermion Walker-Wang model constructed on the cubic lattice by Burnell-Chen-Fidkowski-Vishwanath. Other examples include the double-semion model, and also the {\textquoteleft}fermionic\&$\#$39; toric code in arbitrary dimensions on hypercubic lattices. In addition, we extend previous constructions of exact boson-fermion dualities and the Gu-Wen Grassmann Integral to arbitrary dimensions. Another result which may be of independent interest is a derivation of a cochain-level action for the generalized double-semion model, reproducing a recently derived action on the cohomology level. Quantum computers can produce a quantum encoding of the solution of a system of differential equations exponentially faster than a classical algorithm can produce an explicit description. However, while high-precision quantum algorithms for linear ordinary differential equations are well established, the best previous quantum algorithms for linear partial differential equations (PDEs) have complexity poly(1/ε), where ε is the error tolerance. By developing quantum algorithms based on adaptive-order finite difference methods and spectral methods, we improve the complexity of quantum algorithms for linear PDEs to be poly(d,log(1/ε)), where d is the spatial dimension. Our algorithms apply high-precision quantum linear system algorithms to systems whose condition numbers and approximation errors we bound. We develop a finite difference algorithm for the Poisson equation and a spectral algorithm for more general second-order elliptic equations.\ We consider a class of holographic tensor networks that are efficiently contractible variational ansatze, manifestly (approximate) quantum error correction codes, and can support power-law correlation functions. In the case when the network consists of a single type of tensor that also acts as an erasure correction code, we show that it cannot be both locally contractible and sustain power-law correlation functions. Motivated by this no-go theorem, and the desirability of local contractibility for an efficient variational ansatz, we provide guidelines for constructing networks consisting of multiple types of tensors that can support power-law correlation. We also provide an explicit construction of one such network, which approximates the holographic HaPPY pentagon code in the limit where variational parameters are taken to be small. We investigate the existence of constant-round post-quantum black-box zero-knowledge protocols for NP. As a main result, we show that there is no constant-round post-quantum black-box zero-knowledge argument for NP unless NP\⊆BQP. As constant-round black-box zero-knowledge arguments for NP exist in the classical setting, our main result points out a fundamental difference between post-quantum and classical zero-knowledge protocols. Combining previous results, we conclude that unless NP\⊆BQP, constant-round post-quantum zero-knowledge protocols for NP exist if and only if we use non-black-box techniques or relax certain security requirements such as relaxing standard zero-knowledge to ϵ-zero-knowledge. Additionally, we also prove that three-round and public-coin constant-round post-quantum black-box ϵ-zero-knowledge arguments for NP do not exist unless NP\⊆BQP. Achieving quantum computational advantage requires solving a classically intractable problem on a quantum device. Natural proposals rely upon the intrinsic hardness of classically simulating quantum mechanics; however, verifying the output is itself classically intractable. On the other hand, certain quantum algorithms (e.g. prime factorization via Shor\&$\#$39;s algorithm) are efficiently verifiable, but require more resources than what is available on near-term devices. One way to bridge the gap between verifiability and implementation is to use \"interactions\" between a prover and a verifier. By leveraging cryptographic functions, such protocols enable the classical verifier to enforce consistency in a quantum prover\&$\#$39;s responses across multiple rounds of interaction. In this work, we demonstrate the first implementation of an interactive quantum advantage protocol, using an ion trap quantum computer. We execute two complementary protocols -- one based upon the learning with errors problem and another where the cryptographic construction implements a computational Bell test. To perform multiple rounds of interaction, we implement mid-circuit measurements on a subset of trapped ion qubits, with subsequent coherent evolution. For both protocols, the performance exceeds the asymptotic bound for classical behavior; maintaining this fidelity at scale would conclusively demonstrate verifiable quantum advantage. Spin-wave excitations in ensembles of atoms are gaining attention as a quantum information resource. However, current techniques with atomic spin waves do not achieve universal quantum information processing. We conduct a theoretical analysis of methods to create a high-capacity universal quantum processor and network node using an ensemble of laser-cooled atoms, trapped in a one-dimensional periodic potential and coupled to a ring cavity. We describe how to establish linear quantum processing using a lambda-scheme in a rubidium-atom system, calculate the expected experimental operational fidelities. Second, we derive an efficient method to achieve linear controllability with a single ensemble of atoms, rather than two-ensembles as proposed in [K. C. Cox et al. Spin-Wave Quantum Computing with Atoms in a Single-Mode Cavity, preprint 2021]. Finally, we propose to use the spin-wave processor for continuous-variable quantum information processing and present a scheme to generate large dual-rail cluster states useful for deterministic computing.\ Magic can be distributed non-locally in many-body entangled states, such as the low energy states of condensed matter systems. Using the Bravyi-Kitaev magic state distillation protocol, we find that non-local magic is distillable and can improve the distillation outcome. We analyze a few explicit examples and show that spin squeezing can be used to convert non-distillable states into distillable ones. It is interesting to observe that all optical materials with a positive refractive index have a value of index that is of order unity. Surprisingly, though, a deep understanding of the mechanisms that lead to this universal behavior seems to be lacking. Moreover, this observation is difficult to reconcile with the fact that a single, isolated atom is known to have a giant optical response, as characterized by a resonant scattering cross section that far exceeds its physical size. Here, we theoretically and numerically investigate the evolution of the optical properties of an ensemble of ideal atoms as a function of density, starting from the dilute gas limit, including the effects of multiple scattering and near-field interactions. Interestingly, despite the giant response of an isolated atom, we find that the maximum index does not indefinitely grow with increasing density, but rather reaches a limiting value n\≈1.7. We propose an explanation based upon strong-disorder renormalization group theory, in which the near-field interaction combined with random atomic positions results in an inhomogeneous broadening of atomic resonance frequencies. This mechanism ensures that regardless of the physical atomic density, light at any given frequency only interacts with at most a few near-resonant atoms per cubic wavelength, thus limiting the maximum index attainable. Our work is a promising first step to understand the limits of refractive index from a bottom-up, atomic physics perspective, and also introduces renormalization group as a powerful tool to understand the generally complex problem of multiple scattering of light overall. Variational Quantum Algorithms (VQAs) may be a path to quantum advantage on Noisy Intermediate-Scale Quantum (NISQ) computers. A natural question is whether noise on NISQ devices places fundamental limitations on VQA performance. We rigorously prove a serious limitation for noisy VQAs, in that the noise causes the training landscape to have a barren plateau (i.e., vanishing gradient). Specifically, for the local Pauli noise considered, we prove that the gradient vanishes exponentially in the number of qubits n if the depth of the ansatz grows linearly with n. These noise-induced barren plateaus (NIBPs) are conceptually different from noise-free barren plateaus, which are linked to random parameter initialization. Our result is formulated for a generic ansatz that includes as special cases the Quantum Alternating Operator Ansatz and the Unitary Coupled Cluster Ansatz, among others. For the former, our numerical heuristics demonstrate the NIBP phenomenon for a realistic hardware noise model. While quantum computers are naturally well-suited to implementing linear operations, it is less clear how to implement nonlinear operations on quantum computers. However, nonlinear subroutines may prove key to a range of applications of quantum computing from solving nonlinear equations to data processing and quantum machine learning. Here we develop algorithms for implementing nonlinear transformations of input quantum states. Our algorithms are framed around the concept of a weighted state, a mathematical entity describing the output of an operational procedure involving both quantum circuits and classical post-processing. The conventional framework for defining and understanding phases of matter requires thermodynamic equilibrium. Extensions to non-equilibrium systems have led to surprising insights into the nature of many-body thermalization and the discovery of novel phases of matter, often catalyzed by driving the system periodically. The inherent heating from such Floquet drives can be tempered by including strong disorder in the system, but this can also mask the generality of non-equilibrium phases. In this work, we utilize a trapped-ion quantum simulator to observe signatures of a non-equilibrium driven phase without disorder: the prethermal discrete time crystal (PDTC). Here, many-body heating is suppressed not by disorder-induced many-body localization, but instead via high-frequency driving, leading to an expansive time window where non-equilibrium phases can emerge. We observe a number of key features that distinguish the PDTC from its many-body-localized disordered counterpart, such as the drive-frequency control of its lifetime and the dependence of time-crystalline order on the energy density of the initial state. Floquet prethermalization is thus presented as a general strategy for creating, stabilizing and studying intrinsically out-of-equilibrium phases of matter. Many-body open quantum systems balance internal dynamics against decoherence from interactions with an environment. Here, we explore this balance via random quantum circuits implemented on a trapped ion quantum computer, where the system evolution is represented by unitary gates with interspersed projective measurements. As the measurement rate is varied, a purification phase transition is predicted to emerge at a critical point akin to a fault-tolerent threshold. We probe the \"pure\" phase, where the system is rapidly projected to a deterministic state conditioned on the measurement outcomes, and the \"mixed\" or \"coding\" phase, where the initial state becomes partially encoded into a quantum error correcting codespace. We find convincing evidence of the two phases and show numerically that, with modest system scaling, critical properties of the transition clearly emerge. Thermalization is a ubiquitous process of statistical physics, in which details of few-body observables are washed out in favor of a featureless steady state. Even in isolated quantum many-body systems, limited to reversible dynamics, thermalization typically prevails. However, in these systems, there is another possibility: many-body localization (MBL) can result in preservation of a non-thermal state. While disorder has long been considered an essential ingredient for this phenomenon, recent theoretical work has suggested that a quantum many-body system with a uniformly increasing field -- but no disorder -- can also exhibit MBL, resulting in {\textquoteleft}Stark MBL.\&$\#$39; Here we realize Stark MBL in a trapped-ion quantum simulator and demonstrate its key properties: halting of thermalization and slow propagation of correlations. Tailoring the interactions between ionic spins in an effective field gradient, we directly observe their microscopic equilibration for a variety of initial states, and we apply single-site control to measure correlations between separate regions of the spin chain. Further, by engineering a varying gradient, we create a disorder-free system with coexisting long-lived thermalized and nonthermal regions. The results demonstrate the unexpected generality of MBL, with implications about the fundamental requirements for thermalization and with potential uses in engineering long-lived non-equilibrium quantum matter. Recently, several approaches to solving linear systems on a quantum computer have been formulated in terms of the quantum adiabatic theorem for a continuously varying Hamiltonian. Such approaches enabled near-linear scaling in the condition number κ of the linear system, without requiring a complicated variable-time amplitude amplification procedure. However, the most efficient of those procedures is still asymptotically sub-optimal by a factor of log(κ). Here, we prove a rigorous form of the adiabatic theorem that bounds the error in terms of the spectral gap for intrinsically discrete time evolutions. We use this discrete adiabatic theorem to develop a quantum algorithm for solving linear systems that is asymptotically optimal, in the sense that the complexity is strictly linear in κ, matching a known lower bound on the complexity. Our O(κlog(1/ε)) complexity is also optimal in terms of the combined scaling in κ and the precision ε. Compared to existing suboptimal methods, our algorithm is simpler and easier to implement. Moreover, we determine the constant factors in the algorithm, which would be suitable for determining the complexity in terms of gate counts for specific applications.\ The only coupling dark matter is guaranteed to have with the standard model is through gravity. Here we propose a concept for direct dark matter detection using only this gravitational coupling. We suggest that an array of quantum-limited mechanical impulse sensors may be capable of detecting the correlated gravitational force created by a passing dark matter particle. We consider the effects of irreducible noise from couplings of the sensors to the environment and noise due to the quantum measurement process. We show that the signal from Planck-scale dark matter is in principle detectable using a large number of gram-scale sensors in a meter-scale array with sufficiently low quantum noise, and discuss some experimental challenges en route to achieving this target. As quantum computing progresses steadily from theory into practice, programmers will face Quantum walks are a promising framework for developing quantum algorithms and quantum simulations. They represent an important test case for the application of quantum computers. Here we present different forms of discrete-time quantum walks (DTQWs) and show their equivalence for physical realizations. Using an appropriate digital mapping of the position space on which a walker evolves to the multiqubit states of a quantum processor, we present different configurations of quantum circuits for the implementation of DTQWs in one-dimensional position space. We provide example circuits for a five-qubit processor and address scalability to higher dimensions as well as larger quantum processors. \ Identifying the best arm of a multi-armed bandit is a central problem in bandit optimization. We study a quantum computational version of this problem with coherent oracle access to states encoding the reward probabilities of each arm as quantum amplitudes. Specifically, we show that we can find the best arm with fixed confidence using O~(\∑ni=2Δ\−2i\−\−\−\−\−\−\−\−\√) quantum queries, where Δi represents the difference between the mean reward of the best arm and the ith-best arm. This algorithm, based on variable-time amplitude amplification and estimation, gives a quadratic speedup compared to the best possible classical result. We also prove a matching quantum lower bound (up to poly-logarithmic factors). Quantum computers are expected to surpass the computational capabilities of classical computers during this decade, and achieve disruptive impact on numerous industry sectors, particularly finance. In fact, finance is estimated to be the first industry sector to benefit from Quantum Computing not only in the medium and long terms, but even in the short term. This review paper presents the state of the art of quantum algorithms for financial applications, with particular focus to those use cases that can be solved via Machine Learning. In this work, we initiate the study of the Minimum Circuit Size Problem (MCSP) in the quantum setting. MCSP is a problem to compute the circuit complexity of Boolean functions. It is a fascinating problem in complexity theory---its hardness is mysterious, and a better understanding of its hardness can have surprising implications to many fields in computer science. We study quantum algorithms that learn properties of a matrix using queries that return its action on an input vector. We show that for various problems, including computing the trace, determinant, or rank of a matrix or solving a linear system that it specifies, quantum computers do not provide an asymptotic speedup over classical computation. On the other hand, we show that for some problems, such as computing the parities of rows or columns or deciding if there are two identical rows or columns, quantum computers provide exponential speedup. We demonstrate this by showing equivalence between models that provide matrix-vector products, vector-matrix products, and vector-matrix-vector products, whereas the power of these models can vary significantly for classical computation. We study quantum algorithms that learn properties of a matrix using queries that return its action on an input vector. We show that for various problems, including computing the trace, determinant, or rank of a matrix or solving a linear system that it specifies, quantum computers do not provide an asymptotic speedup over classical computation. On the other hand, we show that for some problems, such as computing the parities of rows or columns or deciding if there are two identical rows or columns, quantum computers provide exponential speedup. We demonstrate this by showing equivalence between models that provide matrix-vector products, vector-matrix products, and vector-matrix-vector products, whereas the power of these models can vary significantly for classical computation. We present methods for implementing arbitrary permutations of qubits under interaction constraints. Our protocols make use of previous methods for rapidly reversing the order of qubits along a path. Given nearest-neighbor interactions on a path of length n, we show that there exists a constant ϵ\≈0.034 such that the quantum routing time is at most (1\−ϵ)n, whereas any swap-based protocol needs at least time n\−1. This represents the first known quantum advantage over swap-based routing methods and also gives improved quantum routing times for realistic architectures such as grids. Furthermore, we show that our algorithm approaches a quantum routing time of 2n/3 in expectation for uniformly random permutations, whereas swap-based protocols require time n asymptotically. Additionally, we consider sparse permutations that route k\≤n qubits and give algorithms with quantum routing time at most n/3+O(k2) on paths and at most 2r/3+O(k2) on general graphs with radius r. Quantum self-testing is a device-independent way to certify quantum states and measurements using only the input-output statistics, with minimal assumptions about the quantum devices. Due to the high demand on tolerable noise, however, experimental self-testing was limited to two-photon systems. Here, we demonstrate the first robust self-testing for multi-particle quantum entanglement. We prepare two examples of four-photon graph states, the Greenberger-Horne-Zeilinger (GHZ) states with a fidelity of 0.957(2) and the linear cluster states with a fidelity of 0.945(2). Based on the observed input-output statistics, we certify the genuine four-photon entanglement and further estimate their qualities with respect to realistic noise in a device-independent manner. We present a method for network-capable quantum computing that relies on holographic spin-wave excitations stored collectively in ensembles of qubits. We construct an orthogonal basis of spin waves in a one-dimensional array and show that high-fidelity universal linear controllability can be achieved using only phase shifts, applied in both momentum and position space. Neither single-site addressability nor high single-qubit cooperativity is required, and the spin waves can be read out with high efficiency into a single cavity mode for quantum computing and networking applications.\ We suggest a test of a central prediction of perturbatively quantized general relativity: the coherent communication of quantum information between massive objects through gravity. To do this, we introduce the concept of interactive quantum information sensing, a protocol tailored to the verification of dynamical entanglement generation between a pair of systems. Concretely, we propose to monitor the periodic wavefunction collapse and revival in an atomic interferometer which is gravitationally coupled to a mechanical oscillator. We prove a theorem which shows that, under the assumption of time-translation invariance, this collapse and revival is possible if and only if the gravitational interaction forms an entangling channel. Remarkably, as this approach improves at moderate temperatures and relies primarily upon atomic coherence, our numerical estimates indicate feasibility with current devices. The Lie-Trotter formula, together with its higher-order generalizations, provides a direct approach to decomposing the exponential of a sum of operators. Despite significant effort, the error scaling of such product formulas remains poorly understood. We develop a theory of Trotter error that overcomes the limitations of prior approaches based on truncating the Baker-Campbell-Hausdorff expansion. Our analysis directly exploits the commutativity of operator summands, producing tighter error bounds for both real- and imaginary-time evolutions. Whereas previous work achieves similar goals for systems with geometric locality or Lie-algebraic structure, our approach holds in general. We give a host of improved algorithms for digital quantum simulation and quantum Monte Carlo methods, including simulations of second-quantized plane-wave electronic structure, k-local Hamiltonians, rapidly decaying power-law interactions, clustered Hamiltonians, the transverse field Ising model, and quantum ferromagnets, nearly matching or even outperforming the best previous results. We obtain further speedups using the fact that product formulas can preserve the locality of the simulated system. Specifically, we show that local observables can be simulated with complexity independent of the system size for power-law interacting systems, which implies a Lieb-Robinson bound as a byproduct. Our analysis reproduces known tight bounds for first- and second-order formulas. Our higher-order bound overestimates the complexity of simulating a one-dimensional Heisenberg model with an even-odd ordering of terms by only a factor of 5, and is close to tight for power-law interactions and other orderings of terms. This suggests that our theory can accurately characterize Trotter error in terms of both asymptotic scaling and constant prefactor. We give an upper bound on the resources required for valuable quantum advantage in pricing derivatives. To do so, we give the first complete resource estimates for useful quantum derivative pricing, using autocallable and Target Accrual Redemption Forward (TARF) derivatives as benchmark use cases. We uncover blocking challenges in known approaches and introduce a new method for quantum derivative pricing - the re-parameterization method - that avoids them. This method combines pre-trained variational circuits with fault-tolerant quantum computing to dramatically reduce resource requirements. We find that the benchmark use cases we examine require 7.5k logical qubits and a T-depth of 46 million and thus estimate that quantum advantage would require a logical clock speed of 10Mhz. While the resource requirements given here are out of reach of current systems, we hope they will provide a roadmap for further improvements in algorithms, implementations, and planned hardware architectures.\ The generation of certifiable randomness is the most fundamental informationtheoretic task that meaningfully separates quantum devices from their classical counterparts. We propose a protocol for exponential certified randomness expansion using a\ single quantum device. The protocol calls for the device to implement a simple quantum circuit of constant depth on a 2D lattice of qubits. The output of the circuit can be\ verified classically in linear time, and is guaranteed to contain a polynomial number of\ certified random bits assuming that the device used to generate the output operated using\ a (classical or quantum) circuit of sub-logarithmic depth. This assumption contrasts with\ the locality assumption used for randomness certification based on Bell inequality violation and more recent proposals for randomness certification based on computational\ assumptions. Furthermore, to demonstrate randomness generation it is sufficient for a\ device to sample from the ideal output distribution within constant statistical distance.\ \ Our procedure is inspired by recent work of Bravyi et al. (Science 362(6412):308\–311,\ 2018), who introduced a relational problem that can be solved by a constant-depth quantum circuit, but provably cannot be solved by any classical circuit of sub-logarithmic\ depth. We develop the discovery of Bravyi et al. into a framework for robust randomness\ expansion. Our results lead to a new proposal for a demonstrated quantum advantage\ that has some advantages compared to existing proposals. First, our proposal does not\ rest on any complexity-theoretic conjectures, but relies on the physical assumption that\ the adversarial device being tested implements a circuit of sub-logarithmic depth. Second, success on our task can be easily verified in classical linear time. Finally, our task\ is more noise-tolerant than most other existing proposals that can only tolerate multiplicative error, or require additional conjectures from complexity theory; in contrast, we\ are able to allow a small constant additive error in total variation distance between the\ sampled and ideal distributions. Electrons and ions trapped with electromagnetic fields have long served as important high-precision metrological instruments, and more recently have also been proposed as a platform for quantum information processing. Here we point out that these systems can also be used as highly sensitive detectors of passing charged particles, due to the combination of their extreme charge-to-mass ratio and low-noise quantum readout and control. In particular, these systems can be used to detect energy depositions many orders of magnitude below typical ionization scales. As an illustration, we show that current devices can be used to provide competitive sensitivity to models where ambient dark matter particles carry small electric millicharges < Long-range Rydberg interactions, in combination with electromagnetically induced transparency (EIT), give rise to strongly interacting photons where the strength, sign, and form of the interactions are widely tunable and controllable. Such control can be applied to both coherent and dissipative interactions, which provides the potential to generate novel few-photon states. Recently it has been shown that Rydberg-EIT is a rare system in which three-body interactions can be as strong or stronger than two-body interactions. In this work, we study a three-body scattering loss for Rydberg-EIT in a wide regime of single and two-photon detunings. Our numerical simulations of the full three-body wavefunction and analytical estimates based on Fermi\&$\#$39;s Golden Rule strongly suggest that the observed features in the outgoing photonic correlations are caused by the resonant enhancement of the three-body losses. We consider the use of quantum-limited mechanical force sensors to detect ultralight (sub-meV) dark matter candidates which are weakly coupled to the standard model. We show that mechanical sensors with masses around or below the milligram scale, operating around the standard quantum limit, would enable novel searches for dark matter with natural frequencies around the kHz scale. This would complement existing strategies based on torsion balances, atom interferometers, and atomic clock systems If gravitational perturbations are quantized into gravitons in analogy with the electromagnetic field and photons, the resulting graviton interactions should lead to an entangling interaction between massive objects. We suggest a test of this prediction. To do this, we introduce the concept of interactive quantum information sensing. This novel sensing protocol is tailored to provable verification of weak dynamical entanglement generation between a pair of systems. We show that this protocol is highly robust to typical thermal noise sources. The sensitivity can moreover be increased both using an initial thermal state and/or an initial phase of entangling via a non-gravitational interaction. We outline a concrete implementation testing the ability of the gravitational field to generate entanglement between an atomic interferometer and mechanical oscillator. Preliminary numerical estimates suggest that near-term devices could feasibly be used to perform the experiment. There are myriad quantum computing approaches, each having its own set of challenges to understand and effectively control their operation. Electrons confined in arrays of semiconductor nanostructures, called quantum dots (QDs), is one such approach. The easy access to control parameters, fast measurements, long qubit lifetimes, and the potential for scalability make QDs especially attractive. However, as the size of the QD array grows, so does the number of parameters needed for control and thus the tuning complexity. The current practice of manually tuning the qubits is a relatively time-consuming procedure and is inherently impractical for scaling up and applications. In this work, we report on the in situ implementation of an auto-tuning protocol proposed by Kalantre et al. [arXiv:1712.04914]. In particular, we discuss how to establish a seamless communication protocol between a machine learning (ML)-based auto-tuner and the experimental apparatus. We then show that a ML algorithm trained exclusively on synthetic data coming from a physical model to quantitatively classify the state of the QD device, combined with an optimization routine, can be used to replace manual tuning of gate voltages in devices. A success rate of over 85 \% is determined for tuning to a double quantum dot regime when at least one of the plunger gates is initiated sufficiently close to the desired state. Modifications to the training network, fitness function, and optimizer are discussed as a path towards further improvement in the success rate when starting both near and far detuned from the target double dot range. The quantum measurement of any observable naturally leads to noise added by the act of measurement. Approaches to evade or reduce this noise can lead to substantial improvements in a wide variety of sensors, from laser interferometers to precision magnetometers and more. In this paper, we develop a measurement protocol based upon pioneering work by the gravitational wave community which allows for reduction of added noise from measurement by coupling an optical field to the momentum of a small mirror. As a specific implementation, we present a continuous measurement protocol using a double-ring optomechanical cavity. We demonstrate that with experimentally-relevant parameters, this protocol can lead to significant back-action noise evasion, yielding measurement noise below the standard quantum limit over many decades of frequency. In a recent seminal work, Bitansky and Shmueli (STOC \&$\#$39;20) gave the first construction of a constant round zero-knowledge argument for NP secure against quantum attacks. However, their construction has several drawbacks compared to the classical counterparts. Specifically, their construction only achieves computational soundness, requires strong assumptions of quantum hardness of learning with errors (QLWE assumption) and the existence of quantum fully homomorphic encryption (QFHE), and relies on non-black-box simulation. In this paper, we resolve these issues at the cost of weakening the notion of zero-knowledge to what is called ε-zero-knowledge. Concretely, we construct the following protocols: - We construct a constant round interactive proof for NP that satisfies statistical soundness and black-box ε-zero-knowledge against quantum attacks assuming the existence of collapsing hash functions, which is a quantum counterpart of collision-resistant hash functions. Interestingly, this construction is just an adapted version of the classical protocol by Goldreich and Kahan (JoC \&$\#$39;96) though the proof of ε-zero-knowledge property against quantum adversaries requires novel ideas. - We construct a constant round interactive argument for NP that satisfies computational soundness and black-box ε-zero-knowledge against quantum attacks only assuming the existence of post-quantum one-way functions. At the heart of our results is a new quantum rewinding technique that enables a simulator to extract a committed message of a malicious verifier while simulating verifier\&$\#$39;s internal state in an appropriate sense. Using the tensor Radon transform and related numerical methods, we study how bulk geometries can be explicitly reconstructed from boundary entanglement entropies in the specific case of AdS3/CFT2. We find that, given the boundary entanglement entropies of a 2d CFT, this framework provides a quantitative measure that detects whether the bulk dual is geometric in the perturbative (near AdS) limit. In the case where a well-defined bulk geometry exists, we explicitly reconstruct the unique bulk metric tensor once a gauge choice is made. We then examine the emergent bulk geometries for static and dynamical scenarios in holography and in many-body systems. Apart from the physics results, our work demonstrates that numerical methods are feasible and effective in the study of bulk reconstruction in AdS/CFT. Quantum computers can sometimes exponentially outperform classical ones, but only for problems with sufficient structure. While it is well known that query problems with full permutation symmetry can have at most polynomial quantum speedup -- even for partial functions -- it is unclear how far this condition must be relaxed to enable exponential speedup. In particular, it is natural to ask whether exponential speedup is possible for (partial) graph properties, in which the input describes a graph and the output can only depend on its isomorphism class. We show that the answer to this question depends strongly on the input model. In the adjacency matrix model, we prove that the bounded-error randomized query complexity R of any graph property P has R(P)=O(Q(P)6), where Q is the bounded-error quantum query complexity. This negatively resolves an open question of Montanaro and de Wolf in the adjacency matrix model. More generally, we prove R(P)=O(Q(P)3l) for any l-uniform hypergraph property P in the adjacency matrix model. In direct contrast, in the adjacency list model for bounded-degree graphs, we exhibit a promise problem that shows an exponential separation between the randomized and quantum query complexities. Modular mixed-species ion-trap networks are a promising framework for scalable quantum information processing, where one species acts as a memory qubit and another as a communication qubit. This architecture requires high-fidelity mixed-species entangling gates to transfer information from communication to memory qubits through their collective motion. We investigate the character of the motional modes of a mixed-species ion chain for entangling operations and also sympathetic cooling. We find that the laser power required for high-fidelity entangling gates based on transverse modes is at least an order of magnitude higher than that based on axial modes for widely different masses of the two species. We also find that for even moderate mass differences, the transverse modes are much harder to cool than the axial modes regardless of the ion chain configuration. Therefore, transverse modes conventionally used for operations in single-species ion chains may not be well suited for mixed-species chains with widely different masses. A conjecture of Jozsa [Jozsa06] states that any polynomial-time quantum computation can be simulated by polylogarithmic-depth quantum computation interleaved with polynomial-depth classical computation. Separately, Aaronson [Aaronson05, Aaronson11, Aaronson14] conjectured that there exists an oracle O such that BQPO\≠(BPPBQNC)O. These conjectures are intriguing allusions to the unresolved potential of combining classical and low-depth quantum computation. In this work we show that the Welded Tree Problem, which is an oracle problem that can be solved in quantum polynomial time as shown by Childs et al. [ChildsCDFGS03], cannot be solved in BPPBQNC, nor can it be solved in the class that Jozsa describes. This proves Aaronson\&$\#$39;s oracle separation conjecture and provides a counterpoint to Jozsa\&$\#$39;s conjecture relative to the Welded Tree oracle problem. More precisely, we define two complexity classes, HQC and JC whose languages are decided by two different families of interleaved quantum-classical circuits. HQC contains BPPBQNC and is therefore relevant to Aaronson\&$\#$39;s conjecture, while JC captures the model of computation that Jozsa considers. We show that the Welded Tree Problem gives an oracle separation between either of {JC,HQC} and BQP. Therefore, even when interleaved with arbitrary polynomial-time classical computation, greater \"quantum depth\" leads to strictly greater computational ability in this relativized setting. We present the first Monte Carlo based global QCD analysis of spin-averaged and spin-dependent parton distribution functions (PDFs) that includes nucleon isovector matrix elements in coordinate space from lattice QCD. We investigate the degree of universality of the extracted PDFs when the lattice and experimental data are treated under the same conditions within the Bayesian likelihood analysis. For the unpolarized sector, we find rather weak constraints from the current lattice data on the phenomenological PDFs, and difficulties in describing the lattice matrix elements at large spatial distances. In contrast, for the polarized PDFs we find good agreement between experiment and lattice data, with the latter providing significant constraints on the spin-dependent isovector quark and antiquark distributions In a recent breakthrough, Mahadev constructed a classical verification of quantum computation (CVQC) protocol for a classical client to delegate decision problems in BQP to an untrusted quantum prover under computational assumptions. In this work, we explore further the feasibility of CVQC with the more general sampling problems in BQP and with the desirable blindness property. We contribute affirmative solutions to both as follows. (1) Motivated by the sampling nature of many quantum applications (e.g., quantum algorithms for machine learning and quantum supremacy tasks), we initiate the study of CVQC for quantum sampling problems (denoted by SampBQP). More precisely, in a CVQC protocol for a SampBQP problem, the prover and the verifier are given an input x\∈{0,1}n and a quantum circuit C, and the goal of the classical client is to learn a sample from the output z\←C(x) up to a small error, from its interaction with an untrusted prover. We demonstrate its feasibility by constructing a four-message CVQC protocol for SampBQP based on the quantum Learning With Error assumption. (2) The blindness of CVQC protocols refers to a property of the protocol where the prover learns nothing, and hence is blind, about the client\&$\#$39;s input. It is a highly desirable property that has been intensively studied for the delegation of quantum computation. We provide a simple yet powerful generic compiler that transforms any CVQC protocol to a blind one while preserving its completeness and soundness errors as well as the number of rounds. Applying our compiler to (a parallel repetition of) Mahadev\&$\#$39;s CVQC protocol for BQP and our CVQC protocol for SampBQP yields the first constant-round blind CVQC protocol for BQP and SampBQP respectively, with negligible completeness and soundness errors. Photon blockade is the result of the interplay between the quantized nature of light and strong optical nonlinearities, whereby strong photon-photon repulsion prevents a quantum optical system from absorbing multiple photons. We theoretically study a single atom coupled to the light field, described by the resonantly driven Jaynes--Cummings model, in which case the photon blockade breaks down in a second order phase transition at a critical drive strength. We show that this transition is associated to the spontaneous breaking of an anti-unitary PT-symmetry. Within a semiclassical approximation we calculate the expectation values of observables in the steady state. We then move beyond the semiclassical approximation and approach the critical point from the disordered (blockaded) phase by reducing the Lindblad quantum master equation to a classical rate equation that we solve. The width of the steady-state distribution in Fock space is found to diverge as we approach the critical point with a simple power-law, allowing us to calculate the critical scaling of steady state observables without invoking mean-field theory. We propose a simple physical toy model for biased diffusion in the space of occupation numbers, which captures the universal properties of the steady state. We list several experimental platforms where this phenomenon may be observed. Quantum computers can efficiently simulate the dynamics of quantum systems. In this paper, we study the cost of digitally simulating the dynamics of several physically relevant systems using the first-order product formula algorithm. We show that the errors from different Trotterization steps in the algorithm can interfere destructively, yielding a much smaller error than previously estimated. In particular, we prove that the total error in simulating a nearest-neighbor interacting system of n sites for time t using the first-order product formula with r time slices is O(nt/r+nt3/r2) when nt2/r is less than a small constant. Given an error tolerance ε, the error bound yields an estimate of max{O(n2t/ε),O(n2t3/2/ε1/2)} for the total gate count of the simulation. The estimate is tighter than previous bounds and matches the empirical performance observed in Childs et al. [PNAS 115, 9456-9461 (2018)]. We also provide numerical evidence for potential improvements and conjecture an even tighter estimate for the gate count.\ There are many possible architectures for future quantum computers that designers will need to choose between. However, the process of evaluating a particular connectivity graph\&$\#$39;s performance as a quantum architecture can be difficult. In this paper, we establish a connection between a quantity known as the isoperimetric number and a lower bound on the time required to create highly entangled states. The metric we propose counts resources based on the use of two-qubit unitary operations, while allowing for arbitrarily fast measurements and classical feedback. We describe how these results can be applied to the evaluation of the hierarchical architecture proposed in Phys. Rev. A 98, 062328 (2018). We also show that the time-complexity bound we place on the creation of highly-entangled states can be saturated up to a multiplicative factor logarithmic in the number of qubits. Dissipation generally leads to the decoherence of a quantum state. In contrast, numerous recent proposals have illustrated that dissipation can also be tailored to stabilize many-body entangled quantum states. While the focus of these works has been primarily on engineering the non-equilibrium steady state, we investigate the build-up of entanglement in the quantum trajectories. Specifically, we analyze the competition between two different dissipation channels arising from two incompatible continuous monitoring protocols. The first protocol locks the phase of neighboring sites upon registering a quantum jump, thereby generating a long-range entanglement through the system, while the second one destroys the coherence via dephasing mechanism. By studying the unraveling of stochastic quantum trajectories associated with the continuous monitoring protocols, we present a transition for the scaling of the averaged trajectory entanglement entropies, from critical scaling to area-law behavior. Our work provides novel insights into the occurrence of a measurement-induced phase transition within a continuous monitoring protocol. Ultracold systems offer an unprecedented level of control of interactions between atoms. An important challenge is to achieve a similar level of control of the interactions between photons. Towards this goal, we propose a realization of a novel Lennard-Jones-like potential between photons coupled to the Rydberg states via electromagnetically induced transparency (EIT). This potential is achieved by tuning Rydberg states to a F{{\"o}}rster resonance with other Rydberg states. We consider few-body problems in 1D and 2D geometries and show the existence of self-bound clusters (\"molecules\") of photons. We demonstrate that for a few-body problem, the multi-body interactions have a significant impact on the geometry of the molecular ground state. This leads to phenomena without counterparts in conventional systems: For example, three photons in 2D preferentially arrange themselves in a line-configuration rather than in an equilateral-triangle configuration. Our result opens a new avenue for studies of many-body phenomena with strongly interacting photons. Quantum error correction protects fragile quantum information by encoding it in a larger quantum system whose extra degrees of freedom enable the detection and correction of errors. An encoded logical qubit thus carries increased complexity compared to a bare physical qubit. Fault-tolerant protocols contain the spread of errors and are essential for realizing error suppression with an error-corrected logical qubit. Here we experimentally demonstrate fault-tolerant preparation, rotation, error syndrome extraction, and measurement on a logical qubit encoded in the 9-qubit Bacon-Shor code. For the logical qubit, we measure an average fault-tolerant preparation and measurement error of 0.6\% and a transversal Clifford gate with an error of 0.3\% after error correction. The result is an encoded logical qubit whose logical fidelity exceeds the fidelity of the entangling operations used to create it. We compare these operations with non-fault-tolerant protocols capable of generating arbitrary logical states, and observe the expected increase in error. We directly measure the four Bacon-Shor stabilizer generators and are able to detect single qubit Pauli errors. These results show that fault-tolerant quantum systems are currently capable of logical primitives with error rates lower than their constituent parts. With the future addition of intermediate measurements, the full power of scalable quantum error-correction can be achieved.\ The only coupling dark matter is guaranteed to have with the standard model is through gravity. Here we propose a concept for direct dark matter detection using only this gravitational coupling, enabling a new regime of detection. Leveraging dramatic advances in the ability to create, maintain, and probe quantum states of massive objects, we suggest that an array of quantum-limited impulse sensors may be capable of detecting the correlated gravitational force created by a passing dark matter particle. We present two concrete realizations of this scheme, using either mechanical resonators or freely-falling masses. With currently available technology, a meter-scale apparatus of this type could detect any dark matter candidate around the Planck mass or heavier. In quantum many-body systems with local interactions, quantum information and entanglement cannot spread outside of a \"linear light cone,\" which expands at an emergent velocity analogous to the speed of light. Yet most non-relativistic physical systems realized in nature have long-range interactions: two degrees of freedom separated by a distance r interact with potential energy V(r)\∝1/rα. In systems with long-range interactions, we rigorously establish a hierarchy of linear light cones: at the same α, some quantum information processing tasks are constrained by a linear light cone while others are not. In one spatial dimension, commutators of local operators \〈ψ|[Ox(t),Oy]|ψ\〉 are negligible in every state |ψ\〉 when |x\−y|≳vt, where v is finite when α\>3 (Lieb-Robinson light cone); in a typical state |ψ\〉 drawn from the infinite temperature ensemble, v is finite when α\>52 (Frobenius light cone); in non-interacting systems, v is finite in every state when α\>2 (free light cone). These bounds apply to time-dependent systems and are optimal up to subalgebraic improvements. Our theorems regarding the Lieb-Robinson and free light cones, and their tightness, also generalize to arbitrary dimensions. We discuss the implications of our bounds on the growth of connected correlators and of topological order, the clustering of correlations in gapped systems, and the digital simulation of systems with long-range interactions. In addition, we show that quantum state transfer and many-body quantum chaos are bounded by the Frobenius light cone, and therefore are poorly constrained by all Lieb-Robinson bounds. Numerous astrophysical and cosmological observations are best explained by the existence of dark matter, a mass density which interacts only very weakly with visible, baryonic matter. Searching for the extremely weak signals produced by this dark matter strongly motivate the development of new, ultra-sensitive detector technologies. Paradigmatic advances in the control and readout of massive mechanical systems, in both the classical and quantum regimes, have enabled unprecedented levels of sensitivity. In this white paper, we outline recent ideas in the potential use of a range of solid-state mechanical sensing technologies to aid in the search for dark matter in a number of energy scales and with a variety of coupling mechanisms. It was shown recently, building on work of Alexakis, Balehowksy, and Nachman that the geometry of (some portion of) a manifold with boundary is uniquely fixed by the areas of a foliation of two-dimensional disk-shaped surfaces anchored to the boundary. In the context of AdS/CFT, this implies that (a portion of) a four-dimensional bulk geometry can be fixed uniquely from the entanglement entropies of disk-shaped boundary regions, subject to several constraints. In this Note, we loosen some of these constraints, in particular allowing for the bulk foliation of extremal surfaces to be local and removing the constraint of disk topology; these generalizations ensure uniqueness of more of the deep bulk geometry by allowing for e.g. surfaces anchored on disconnected asymptotic boundaries, or HRT surfaces past a phase transition. We also explore in more depth the generality of the local foliation requirement, showing that even in a highly dynamical geometry like AdS-Vaidya it is satisfied. We propose a time-independent Hamiltonian protocol for the reversal of qubit ordering in a chain of N spins. Our protocol has an easily implementable nearest-neighbor, transverse-field Ising model Hamiltonian with time-independent, non-uniform couplings. Under appropriate normalization, we implement this state reversal three times faster than a naive approach using SWAP gates, in time comparable to a protocol of Raussendorf [Phys. Rev. A 72, 052301 (2005)] that requires dynamical control. We also prove lower bounds on state reversal by using results on the entanglement capacity of Hamiltonians and show that we are within a factor 1.502(1+1/N) of the shortest time possible. Our lower bound holds for all nearest-neighbor qubit protocols with arbitrary finite ancilla spaces and local operations and classical communication. Finally, we extend our protocol to an infinite family of nearest-neighbor, time-independent Hamiltonian protocols for state reversal. This includes chains with nearly uniform coupling that may be especially feasible for experimental implementation.\ In a recent breakthrough, Mahadev constructed an interactive protocol that enables a purely classical party to delegate any quantum computation to an untrusted quantum prover. In this work, we show that this same task can in fact be performed non-interactively and in zero-knowledge. Single photons coupled to atomic systems have shown to be a promising platform for developing quantum technologies. Yet a bright on-demand, highly pure and highly indistinguishable single-photon source compatible with atomic platforms is lacking. In this work, we demonstrate such a source based on a strongly interacting Rydberg system. The large optical nonlinearities in a blockaded Rydberg ensemble convert coherent light into a single-collective excitation that can be coherently retrieved as a quantum field. We observe a single-transverse-mode efficiency up to 0.18(2), g(2)=2.0(1.5)\×10\−4, and indistinguishability of 0.982(7), making this system promising for scalable quantum information applications. Accounting for losses, we infer a generation probability up to 0.40(4). Furthermore, we investigate the effects of contaminant Rydberg excitations on the source efficiency. Finally, we introduce metrics to benchmark the performance of on-demand single-photon sources.\ We propose a generic light cone phase diagram for chaotic long-range r\−α interacting systems, where a linear light cone appears for α\≥d+1/2 in d dimension. Utilizing the dephasing nature of quantum chaos, we argue that the universal behavior of the squared commutator is described by a stochastic model, for which the exact phase diagram is known. We provide an interpretation in terms of the L{\'e}vy flights and show that this suffices to capture the scaling of the squared commutator. We verify these phenomena in numerical computation of a long-range spin chain with up to 200 sites.\ Quantum systems are promising candidates for sensing of weak signals as they can provide unrivaled performance when estimating parameters of external fields. However, when trying to detect weak signals that are hidden by background noise, the signal-to-noise-ratio is a more relevant metric than raw sensitivity. We identify, under modest assumptions about the statistical properties of the signal and noise, the optimal quantum control to detect an external signal in the presence of background noise using a quantum sensor. Interestingly, for white background noise, the optimal solution is the simple and well-known spin-locking control scheme. We further generalize, using numerical techniques, these results to the background noise being a correlated Lorentzian spectrum. We show that for increasing correlation time, pulse based sequences such as CPMG are also close to the optimal control for detecting the signal, with the crossover dependent on the signal frequency. These results show that an optimal detection scheme can be easily implemented in near-term quantum sensors without the need for complicated pulse shaping. \ Experiments in matter wave interferometry and optomechanics are increasing the spatial extent of wavefunctions of massive quantum systems; this gives rise to new sources of decoherence that must be characterized. Here\ we calculate the position space decoherence of a quantum particle due to interaction\ with a fluctuating classical background gas for several different force laws. We begin\ with the calculation of this effect for the Newton potential. To our knowledge, this\ calculation has not been done before. We then solve the same problem in the case\ of a Yukawa interaction, which interpolates between our long-range force result and\ the well-studied formula for collisional decoherence from a contact interaction. Unlike the contact interaction case, where the decoherence rate becomes independent\ of distance for large quantum particle separations, we observe that a long-range\ interaction leads to quadratic scaling of the decoherence rate with distance even at\ large separations. This work is relevant to the generation of massive superposition\ in optomechanical and atom beam experiments, and to conclude we comment on\ the use of this decoherence signal for gravitational detection of dark matter.\ Variational Quantum Circuits (VQCs), or the so-called quantum neural-networks, are predicted to be one of the most important near-term quantum applications, not only because of their similar promises as classical neural-networks, but also because of their feasibility on near-term noisy intermediate-size quantum (NISQ) machines. The need for gradient information in the training procedure of VQC applications has stimulated the development of auto-differentiation techniques for quantum circuits. We propose the first formalization of this technique, not only in the context of quantum circuits but also for imperative quantum programs (e.g., with controls), inspired by the success of differentiable programming languages in classical machine learning. In particular, we overcome a few unique difficulties caused by exotic quantum features (such as quantum no-cloning) and provide a rigorous formulation of differentiation applied to bounded-loop imperative quantum programs, its code-transformation rules, as well as a sound logic to reason about their correctness. Moreover, we have implemented our code transformation in OCaml and demonstrated the resource-efficiency of our scheme both analytically and empirically. We also conduct a case study of training a VQC instance with controls, which shows the advantage of our scheme over existing auto-differentiation for quantum circuits without controls. The seminal theoretical works of Berezinskii, Kosterlitz, and Thouless presented a new paradigm for phase transitions in condensed matter that are driven by topological excitations. These transitions have been extensively studied in the context of two-dimensional XY models -- coupled compasses -- and have generated interest in the context of quantum simulation. Here, we use a circuit quantum-electrodynamics architecture to study the critical behavior of engineered XY models through their dynamical response. In particular, we examine not only the unfrustrated case but also the fully-frustrated case which leads to enhanced degeneracy associated with the spin rotational [U(1)] and discrete chiral (Z2) symmetries. The nature of the transition in the frustrated case has posed a challenge for theoretical studies while direct experimental probes remain elusive. Here we identify the transition temperatures for both the unfrustrated and fully-frustrated XY models by probing a Josephson junction array close to equilibrium using weak microwave excitations and measuring the temperature dependence of the effective damping obtained from the complex reflection coefficient. We argue that our probing technique is primarily sensitive to the dynamics of the U(1) part. Drawing independent samples from high-dimensional probability distributions represents the major computational bottleneck for modern algorithms, including powerful machine learning frameworks such as deep learning. The quest for discovering larger families of distributions for which sampling can be efficiently realized has inspired an exploration beyond established computing methods and turning to novel physical devices that leverage the principles of quantum computation. Quantum annealing embodies a promising computational paradigm that is intimately related to the complexity of energy landscapes in Gibbs distributions, which relate the probabilities of system states to the energies of these states. Here, we study the sampling properties of physical realizations of quantum annealers which are implemented through programmable lattices of superconducting flux qubits. Comprehensive statistical analysis of the data produced by these quantum machines shows that quantum annealers behave as samplers that generate independent configurations from low-temperature noisy Gibbs distributions. We show that the structure of the output distribution probes the intrinsic physical properties of the quantum device such as effective temperature of individual qubits and magnitude of local qubit noise, which result in a non-linear response function and spurious interactions that are absent in the hardware implementation. We anticipate that our methodology will find widespread use in characterization of future generations of quantum annealers and other emerging analog computing devices. While recent work suggests that quantum computers can speed up the solution of semidefinite programs, little is known about the quantum complexity of more general convex optimization. We present a quantum algorithm that can optimize a convex function over an n-dimensional convex body using O~(n) queries to oracles that evaluate the objective function and determine membership in the convex body. This represents a quadratic improvement over the best-known classical algorithm. We also study limitations on the power of quantum computers for general convex optimization, showing that it requires Ω~(n\−\−\√) evaluation queries and Ω(n\−\−\√) membership queries. We study how efficiently a k-element set S\⊆[n] can be learned from a uniform superposition |S\〉 of its elements. One can think of |S\〉=\∑i\∈S|i\〉/|S|\−\−\−\√ as the quantum version of a uniformly random sample over S, as in the classical analysis of the {\textquoteleft}{\textquoteleft}coupon collector problem.\&$\#$39;\&$\#$39; We show that if k is close to n, then we can learn S using asymptotically fewer quantum samples than random samples. In particular, if there are n\−k=O(1) missing elements then O(k) copies of |S\〉 suffice, in contrast to the Θ(klogk) random samples needed by a classical coupon collector. On the other hand, if n\−k=Ω(k), then Ω(klogk) quantum samples are~necessary. More generally, we give tight bounds on the number of quantum samples needed for every k and n, and we give efficient quantum learning algorithms. We also give tight bounds in the model where we can additionally reflect through |S\〉. Finally, we relate coupon collection to a known example separating proper and improper PAC learning that turns out to show no separation in the quantum case. Recently developed quantum algorithms address computational challenges in numerical analysis by performing linear algebra in Hilbert space. Such algorithms can produce a quantum state proportional to the solution of a d-dimensional system of linear equations or linear differential equations with complexity poly(logd). While several of these algorithms approximate the solution to within ε with complexity poly(log(1/ε)), no such algorithm was previously known for differential equations with time-dependent coefficients. Here we develop a quantum algorithm for linear ordinary differential equations based on so-called spectral methods, an alternative to finite difference methods that approximates the solution globally. Using this approach, we give a quantum algorithm for time-dependent initial and boundary value problems with complexity poly(logd,log(1/ε)). The quantum walk formalism is a widely used and highly successful framework for modeling quantum systems, such as simulations of the Dirac equation, different dynamics in both the low and high energy regime, and for developing a wide range of quantum algorithms. Here we present the circuit-based implementation of a discrete-time quantum walk in position space on a five-qubit trapped-ion quantum processor. We encode the space of walker positions in particular multi-qubit states and program the system to operate with different quantum walk parameters, experimentally realizing a Dirac cellular automaton with tunable mass parameter. The quantum walk circuits and position state mapping scale favorably to a larger model and physical systems, allowing the implementation of any algorithm based on discrete-time quantum walks algorithm and the dynamics associated with the discretized version of the Dirac equation. We present a classical algorithm that, for any 3D geometrically-local, constant-depth quantum circuit C, and any bit string x\∈{0,1}n, can compute the quantity |\<x|C|0\⊗n\>|2 to within any inverse-polynomial additive error in quasi-polynomial time. It is known that it is $\#$P-hard to compute this same quantity to within 2\−n2 additive error [Mov20]. The previous best known algorithm for this problem used O(2n1/3poly(1/ε)) time to compute probabilities to within additive error ε [BGM20]. Notably, the [BGM20] paper included an elegant polynomial time algorithm for the same estimation task with 2D circuits, which makes a novel use of 1D Matrix Product States carefully tailored to the 2D geometry of the circuit in question. Surprisingly, it is not clear that it is possible to extend this use of MPS to address the case of 3D circuits in polynomial time. This raises a natural question as to whether the computational complexity of the 3D problem might be drastically higher than that of the 2D problem. In this work we address this question by exhibiting a quasi-polynomial time algorithm for the 3D case. In order to surpass the technical barriers encountered by previously known techniques we are forced to pursue a novel approach: Our algorithm has a Divide-and-Conquer structure, constructing a recursive sub-division of the given 3D circuit using carefully designed block-encodings, each creating a 3D-local circuit on at most half the number of qubits as the original. This division step is then applied recursively, expressing the original quantity as a weighted sum of smaller and smaller 3D-local quantum circuits. A central technical challenge is to control correlations arising from the entanglement that may exist between the different circuit \"pieces\" produced this way. We present a classical algorithm that, for any 3D geometrically-local, polylogarithmic-depth quantum circuit C acting on n qubits, and any bit string x\∈{0,1}n, can compute the quantity |\<x|C|0\⊗n\>|2 to within any inverse-polynomial additive error in quasi-polynomial time. It is known that it is $\#$P-hard to compute this same quantity to within 2\−n2 additive error [Mov20, KMM21]. The previous best known algorithm for this problem used O(2n1/3poly(1/ϵ)) time to compute probabilities to within additive error ϵ [BGM20]. Notably, the [BGM20] paper included an elegant polynomial time algorithm for this estimation task restricted to 2D circuits, which makes a novel use of 1D Matrix Product States (MPS) carefully tailored to the 2D geometry of the circuit in question. Surprisingly, it is not clear that it is possible to extend this use of MPS to address the case of 3D circuits in polynomial time. This raises a natural question as to whether the computational complexity of the 3D problem might be drastically higher than that of the 2D problem. In this work we address this question by exhibiting a quasi-polynomial time algorithm for the 3D case. In order to surpass the technical barriers encountered by previously known techniques we are forced to pursue a novel approach. Quantum nano-devices are fundamental systems in quantum thermodynamics that have been the subject of profound interest in recent years. Among these, quantum batteries play a very important role. In this paper we lay down a theory of random quantum batteries and provide a systematic way of computing the average work and work fluctuations in such devices by investigating their typical behavior. We show that the performance of random quantum batteries exhibits typicality and depends only on the spectral properties of the time evolving operator, the initial state and the measuring Hamiltonian. At given revival times a random quantum battery features a quantum advantage over classical random batteries. Our method is particularly apt to be used both for exactly solvable models like the Jaynes-Cummings model or in perturbation theory, e.g., systems subject to harmonic perturbations. We also study the setting of quantum adiabatic random batteries. Deep learning-based blind image deblurring plays an essential role in solving image blur since all existing kernels are limited in modeling the real world blur. Thus far, researchers focus on powerful models to handle the deblurring problem and achieve decent results. For this work, in a new aspect, we discover the great opportunity for image enhancement (e.g., deblurring) directly from RAW images and investigate novel neural network structures benefiting RAW-based learning. However, to the best of our knowledge, there is no available RAW image deblurring dataset. Therefore, we built a new dataset containing both RAW images and processed sRGB images and design a new model to utilize the unique characteristics of RAW images. The proposed deblurring model, trained solely from RAW images, achieves the state-of-art performance and outweighs those trained on processed sRGB images. Furthermore, with fine-tuning, the proposed model, trained on our new dataset, can generalize to other sensors. Additionally, by a series of experiments, we demonstrate that existing deblurring models can also be improved by training on the RAW images in our new dataset. Ultimately, we show a new venue for further opportunities based on the devised novel raw-based deblurring method and the brand-new Deblur-RAW dataset. Rydberg polaritons provide an example of a rare type of system where three-body interactions can be as strong or even stronger than two-body interactions. The three-body interactions can be either dispersive or dissipative, with both types possibly giving rise to exotic, strongly-interacting, and topological phases of matter. Despite past theoretical and experimental studies of the regime with dispersive interaction, the dissipative regime is still mostly unexplored. Using a renormalization group technique to solve the three-body Schr{\"o}dinger equation, we show how the shape and strength of dissipative three-body forces can be universally enhanced for Rydberg polaritons. We demonstrate how these interactions relate to the transmission through a single-mode cavity, which can be used as a probe of the three-body physics in current experiment We construct quantum error-correcting codes that embed a finite-dimensional code space in the infinite-dimensional Hilbert space of rotational states of a rigid body. These codes, which protect against both drift in the body\’s orientation and small changes in its angular momentum, may be well suited for robust storage and coherent processing of quantum information using rotational states of a polyatomic molecule. Extensions of such codes to rigid bodies with a symmetry axis are compatible with rotational states of diatomic molecules as well as nuclear states of molecules and atoms. We also describe codes associated with general non-Abelian groups and develop orthogonality relations for coset spaces, laying the groundwork for quantum information processing with exotic configuration spaces. We present an algorithmic framework for quantum-inspired classical algorithms on close-to-low-rank matrices, generalizing the series of results started by Tang\&$\#$39;s breakthrough quantum-inspired algorithm for recommendation systems [STOC\&$\#$39;19]. Motivated by quantum linear algebra algorithms and the quantum singular value transformation (SVT) framework of Gily{\'e}n et al. [STOC\&$\#$39;19], we develop classical algorithms for SVT that run in time independent of input dimension, under suitable quantum-inspired sampling assumptions. Our results give compelling evidence that in the corresponding QRAM data structure input model, quantum SVT does not yield exponential quantum speedups. Since the quantum SVT framework generalizes essentially all known techniques for quantum linear algebra, our results, combined with sampling lemmas from previous work, suffice to generalize all recent results about dequantizing quantum machine learning algorithms. In particular, our classical SVT framework recovers and often improves the dequantization results on recommendation systems, principal component analysis, supervised clustering, support vector machines, low-rank regression, and semidefinite program solving. We also give additional dequantization results on low-rank Hamiltonian simulation and discriminant analysis. Our improvements come from identifying the key feature of the quantum-inspired input model that is at the core of all prior quantum-inspired results: l2-norm sampling can approximate matrix products in time independent of their dimension. We reduce all our main results to this fact, making our exposition concise, self-contained, and intuitive. Results are reported from a search for a class of composite dark matter models with feeble, long-range interactions with normal matter. We search for impulses arising from passing dark matter particles by monitoring the mechanical motion of an optically levitated nanogram mass over the course of several days. Assuming such particles constitute the dominant component of dark matter, this search places upper limits on their interaction with neutrons of αn\≤1.2\×10\−7 at 95\% confidence for dark matter masses between 1--10 TeV and mediator masses mφ\≤0.1 eV. Due to the large enhancement of the cross-section for dark matter to coherently scatter from a nanogram mass (\∼1029 times that for a single neutron) and the ability to detect momentum transfers as small as \∼200 MeV/c, these results provide sensitivity to certain classes of composite dark matter models that substantially exceeds existing searches, including those employing kg-scale or ton-scale targets. Extensions of these techniques can enable directionally-sensitive searches for a broad class of previously inaccessible heavy dark matter candidates.\ Secure two-party computation considers the problem of two parties computing a joint function of their private inputs without revealing anything beyond the output of the computation. In this work, we take the first steps towards understanding the setting in which the two parties want to evaluate a joint quantum functionality while using only a classical channel between them. Our first result indicates that it is in general impossible to realize a two-party quantum functionality against malicious adversaries with black-box simulation, relying only on classical channels. The negative result stems from reducing the existence of a black-box simulator to an extractor for classical proof of quantum knowledge, which in turn leads to violation of the quantum no-cloning. Next, we introduce the notion of oblivious quantum function evaluation (OQFE). An OQFE is a two-party quantum cryptographic primitive with one fully classical party (Alice) whose input is (a classical description of a) quantum unitary, U, and a quantum party (Bob) whose input is a quantum state, ψ. In particular, Alice receives a classical output corresponding to the measurement of U(ψ) while Bob receives no output. In OQFE, Bob remains oblivious to Alice\&$\#$39;s input, while Alice learns nothing about ψ more than what can be learned from the output. We present two constructions, one secure against semi-honest parties and the other against malicious parties. Due to the no-go result mentioned above, we consider what is arguably the best possible notion obtainable in our model concerning malicious adversaries: one-sided simulation security. Our protocol relies on the assumption of injective homomorphic trapdoor OWFs, which in turn rely on the LWE problem. As a result, we put forward a first, simple and modular, construction of one-sided quantum two-party computation and quantum oblivious transfer over classical networks. Secure delegated quantum computing allows a computationally weak client to outsource an arbitrary quantum computation to an untrusted quantum server in a privacy-preserving manner. One of the promising candidates to achieve classical delegation of quantum computation is classical-client remote state preparation (RSPCC), where a client remotely prepares a quantum state using a classical channel. However, the privacy loss incurred by employing RSPCC as a sub-module is unclear. Strongly long-range interacting quantum systems---those with interactions decaying as a power-law 1/rα in the distance r on a D-dimensional lattice for α\≤D---have received significant interest in recent years. They are present in leading experimental platforms for quantum computation and simulation, as well as in theoretical models of quantum information scrambling and fast entanglement creation. Since no notion of locality is expected in such systems, a general understanding of their dynamics is lacking. As a first step towards rectifying this problem, we prove two new Lieb-Robinson-type bounds that constrain the time for signaling and scrambling in strongly long-range interacting systems, for which no tight bounds were previously known. Our first bound applies to systems mappable to free-particle Hamiltonians with long-range hopping, and is saturable for α\≤D/2. Our second bound pertains to generic long-range interacting spin Hamiltonians, and leads to a tight lower bound for the signaling time to extensive subsets of the system for all α\<D. This result also lower-bounds the scrambling time, and suggests a path towards achieving a tight scrambling bound that can prove the long-standing fast scrambling conjecture.\ The National Institute of Standards and Technology is in the process of selecting one or more public-key cryptographic algorithms through a public, competition-like process. The new public-key cryptography standards will specify one or more additional digital signatures, public-key encryption, and key-establishment algorithms to augment Federal Information Processing Standard (FIPS) 186-4, Digital Signature Standard (DSS), as well as NIST Special Publication (SP) 800-56A Revision 3, Recommendation for Pair-Wise Key-Establishment Schemes Using Discrete Logarithm Cryptography, and SP 800-56B Revision 2, Recommendation for Pair-Wise Key Establishment Using Integer Factorization Cryptography. It is intended that these algorithms will be capable of protecting sensitive information well into the foreseeable future, including after the advent of quantum computers. The NIST Post-Quantum Cryptography Standardization Process began in 2017 with 69 candidate algorithms that met both the minimum acceptance criteria and submission requirements. The first round lasted until January 2019, during which candidate algorithms were evaluated based on their security, performance, and other characteristics. NIST selected 26 algorithms to advance to the second round for more analysis. This report describes the evaluation and selection process, based on public feedback and internal review, of the second-round candidates. The report summarizes the 26 second-round candidate algorithms and identifies those selected to move forward to the third round of the competition. The third-round finalist public-key encryption and key-establishment algorithms are Classic McEliece, CRYSTALS-KYBER, NTRU, and SABER. The third-round finalists for digital signatures are CRYSTALS-DILITHIUM, FALCON, and Rainbow. These finalists will be considered for standardization at the end of the third round. In addition, eight alternate candidate algorithms will also advance to the third round: BIKE, FrodoKEM, HQC, NTRU Prime, SIKE, GeMSS, Picnic, and SPHINCS+. These additional candidates are still being considered for standardization, although this is unlikely to occur at the end of the third round. NIST hopes that the announcement of these finalists and additional candidates will serve to focus the cryptographic community\’s attention during the next round. We investigate sublinear classical and quantum algorithms for matrix games, a fundamental problem in optimization and machine learning, with provable guarantees. Given a matrix A\∈Rn\×d, sublinear algorithms for the matrix game minx\∈Xmaxy\∈Yy⊤Ax were previously known only for two special cases: (1) Y being the l1-norm unit ball, and (2) X being either the l1- or the l2-norm unit ball. We give a sublinear classical algorithm that can interpolate smoothly between these two cases: for any fixed q\∈(1,2], we solve the matrix game where X is a lq-norm unit ball within additive error ε in time O~((n+d)/ε2). We also provide a corresponding sublinear quantum algorithm that solves the same task in time O~((n\−\−\√+d\−\−\√)poly(1/ε)) with a quadratic improvement in both n and d. Both our classical and quantum algorithms are optimal in the dimension parameters n and d up to poly-logarithmic factors. Finally, we propose sublinear classical and quantum algorithms for the approximate Carath{\'e}odory problem and the lq-margin support vector machines as applications. Aaronson and Ambainis (2009) and Chailloux (2018) showed that fully symmetric (partial) functions do not admit exponential quantum query speedups. This raises a natural question: how symmetric must a function be before it cannot exhibit a large quantum speedup? The difficulty of simulating quantum dynamics depends on the norm of the Hamiltonian. When the Hamiltonian varies with time, the simulation complexity should only depend on this quantity instantaneously. We develop quantum simulation algorithms that exploit this intuition. For the case of sparse Hamiltonian simulation, the gate complexity scales with the L1 norm \∫t0dτ||H(τ)||max, whereas the best previous results scale with tmaxτ\∈[0,t]||H(τ)||max. We also show analogous results for Hamiltonians that are linear combinations of unitaries. Our approaches thus provide an improvement over previous simulation algorithms that can be substantial when the Hamiltonian varies significantly. We introduce two new techniques: a classical sampler of time-dependent Hamiltonians and a rescaling principle for the Schr{\"o}dinger equation. The rescaled Dyson-series algorithm is nearly optimal with respect to all parameters of interest, whereas the sampling-based approach is easier to realize for near-term simulation. By leveraging the L1-norm information, we obtain polynomial speedups for semi-classical simulations of scattering processes in quantum chemistry. Quantum walks are a promising framework for developing quantum algorithms and quantum simulations. Quantum walks represent an important test case for the application of quantum computers. Here we present different forms of discrete-time quantum walks and show their equivalence for physical realizations. Using an appropriate digital mapping of the position space on which a walker evolves onto the multi-qubit states in a quantum processor, we present different configurations of quantum circuits for the implementation of discrete-time quantum walks in one-dimensional position space. With example circuits for a five qubit machine we address scalability to higher dimensions and larger quantum processors. We study scrambling in a model consisting of a number N of M-component quantum rotors coupled by random infinite-range interactions. This model is known to have both a paramagnetic phase and a spin glass phase separated by second order phase transition. We calculate in perturbation theory the squared commutator of rotor fields at different sites in the paramagnetic phase, to leading non-trivial order at large N and large M. This quantity diagnoses the onset of quantum chaos in this system, and we show that the squared commutator grows exponentially with time, with a Lyapunov exponent proportional to 1M. At high temperature, the Lyapunov exponent limits to a value set by the microscopic couplings, while at low temperature, the exponent exhibits a T4 dependence on temperature T.\ Quantum computer architectures impose restrictions on qubit interactions. We propose efficient circuit transformations that modify a given quantum circuit to fit an architecture, allowing for any initial and final mapping of circuit qubits to architecture qubits. To achieve this, we first consider the qubit movement subproblem and use the routing via matchings framework to prove tighter bounds on parallel routing. In practice, we only need to perform partial permutations, so we generalize routing via matchings to that setting. We give new routing procedures for common architecture graphs and for the generalized hierarchical product of graphs, which produces subgraphs of the Cartesian product. Secondly, for serial routing, we consider the token swapping framework and extend a 4-approximation algorithm for general graphs to support partial permutations. We apply these routing procedures to give several circuit transformations, using various heuristic qubit placement subroutines. We implement these transformations in software and compare their performance for large quantum circuits on grid and modular architectures, identifying strategies that work well in practice. Just as classical information technology rests on a foundation built of interconnected information-processing systems, quantum information technology (QIT) must do the same. A critical component of such systems is the interconnect, a device or process that allows transfer of information between disparate physical media, for example, semiconductor electronics, individual atoms, light pulses in optical fiber, or microwave fields. While interconnects have been well engineered for decades in the realm of classical information technology, quantum interconnects (QuICs) present special challenges, as they must allow the transfer of fragile quantum states between different physical parts or degrees of freedom of the system. The diversity of QIT platforms (superconducting, atomic, solid-state color center, optical, etc.) that will form a quantum internet poses additional challenges. As quantum systems scale to larger size, the quantum interconnect bottleneck is imminent, and is emerging as a grand challenge for QIT. For these reasons, it is the position of the community represented by participants of the NSF workshop on Quantum Interconnects that accelerating QuIC research is crucial for sustained development of a national quantum science and technology program. Given the diversity of QIT platforms, materials used, applications, and infrastructure required, a convergent research program including partnership between academia, industry and national laboratories is required. This document is a summary from a U.S. National Science Foundation supported workshop held on 31 October - 1 November 2019 in Alexandria, VA. Attendees were charged to identify the scientific and community needs, opportunities, and significant challenges for quantum interconnects over the next 2-5 years.\ Product formulas can be used to simulate Hamiltonian dynamics on a quantum computer by approximating the exponential of a sum of operators by a product of exponentials of the individual summands. This approach is both straightforward and surprisingly efficient. We show that by simply randomizing how the summands are ordered, one can prove stronger bounds on the quality of approximation and thereby give more efficient simulations. Indeed, we show that these bounds can be asymptotically better than previous bounds that exploit commutation between the summands, despite using much less information about the structure of the Hamiltonian. Numerical evidence suggests that our randomized algorithm may be advantageous even for near-term quantum simulation. Quantum computing leverages the quantum resources of superposition and entanglement to efficiently solve computational problems considered intractable for classical computers. Examples include calculating molecular and nuclear structure, simulating strongly-interacting electron systems, and modeling aspects of material function. While substantial theoretical advances have been made in mapping these problems to quantum algorithms, there remains a large gap between the resource requirements for solving such problems and the capabilities of currently available quantum hardware. Bridging this gap will require a co-design approach, where the expression of algorithms is developed in conjunction with the hardware itself to optimize execution. Here, we describe a scalable co-design framework for solving chemistry problems on a trapped ion quantum computer, and apply it to compute the ground-state energy of the water molecule. The robust operation of the trapped ion quantum computer yields energy estimates with errors approaching the chemical accuracy, which is the target threshold necessary for predicting the rates of chemical reaction dynamics. According to the holographic bound, there is only a finite density of degrees of freedom in space when gravity is taken into account. Conventional quantum field theory does not conform to this bound, since in this framework, infinitely many degrees of freedom may be localized to any given region of space. In this essay, we explore the viewpoint that quantum field theory may emerge from an underlying theory that is locally finite-dimensional, and we construct a locally finite-dimensional version of a Klein-Gordon scalar field using generalized Clifford algebras. Demanding that the finite-dimensional field operators obey a suitable version of the canonical commutation relations makes this construction essentially unique. We then find that enforcing local finite dimensionality in a holographically consistent way leads to a huge suppression of the quantum contribution to vacuum energy, to the point that the theoretical prediction becomes plausibly consistent with observations. The propagation of information in non-relativistic quantum systems obeys a speed limit known as a Lieb-Robinson bound. We derive a new Lieb-Robinson bound for systems with interactions that decay with distance r as a power law, 1/rα. The bound implies an effective light cone tighter than all previous bounds. Our approach is based on a technique for approximating the time evolution of a system, which was first introduced as part of a quantum simulation algorithm by Haah et al. [arXiv:1801.03922]. To bound the error of the approximation, we use a known Lieb-Robinson bound that is weaker than the bound we establish. This result brings the analysis full circle, suggesting a deep connection between Lieb-Robinson bounds and digital quantum simulation. In addition to the new Lieb-Robinson bound, our analysis also gives an error bound for the Haah et al. quantum simulation algorithm when used to simulate power-law decaying interactions. In particular, we show that the gate count of the algorithm scales with the system size better than existing algorithms when α\>3D (where D is the number of dimensions). Product formulas provide a straightforward yet surprisingly efficient approach to quantum simulation. We show that this algorithm can simulate an n-qubit Hamiltonian with nearest-neighbor interactions evolving for time t using only (nt)1+o(1) gates. While it is reasonable to expect this complexity---in particular, this was claimed without rigorous justification by Jordan, Lee, and Preskill---we are not aware of a straightforward proof. Our approach is based on an analysis of the local error structure of product formulas, as introduced by Descombes and Thalhammer and significantly simplified here. We prove error bounds for canonical product formulas, which include well-known constructions such as the Lie-Trotter-Suzuki formulas. We also develop a local error representation for time-dependent Hamiltonian simulation, and we discuss generalizations to periodic boundary conditions, constant-range interactions, and higher dimensions. Combined with a previous lower bound, our result implies that product formulas can simulate lattice Hamiltonians with nearly optimal gate complexity. Confinement is a ubiquitous mechanism in nature, whereby particles feel an attractive force that increases without bound as they separate. A prominent example is color confinement in particle physics, in which baryons and mesons are produced by quark confinement. Analogously, confinement can also occur in low-energy quantum many-body systems when elementary excitations are confined into bound quasiparticles. Here, we report the first observation of magnetic domain wall confinement in interacting spin chains with a trapped-ion quantum simulator. By measuring how correlations spread, we show that confinement can dramatically suppress information propagation and thermalization in such many-body systems. We are able to quantitatively determine the excitation energy of domain wall bound states from non-equilibrium quench dynamics. Furthermore, we study the number of domain wall excitations created for different quench parameters, in a regime that is difficult to model with classical computers. This work demonstrates the capability of quantum simulators for investigating exotic high-energy physics phenomena, such as quark collision and string breaking his whitepaper is an outcome of the workshop Intersections between Nuclear Physics and Quantum Information held at Argonne National Laboratory on 28-30 March 2018 [www.phy.anl.gov/npqi2018/]. The workshop brought together 116 national and international experts in nuclear physics and quantum information science to explore opportunities for the two fields to collaborate on topics of interest to the U.S. Department of Energy (DOE) Office of Science, Office of Nuclear Physics, and more broadly to U.S. society and industry. The workshop consisted of 22 invited and 10 contributed talks, as well as three panel discussion sessions. Topics discussed included quantum computation, quantum simulation, quantum sensing, nuclear physics detectors, nuclear many-body problem, entanglement at collider energies, and lattice gauge theories. Dissipation can usually induce detrimental decoherence in a quantum system. However, engineered dissipation can be used to prepare and stabilize coherent quantum many-body states. Here, we show that by engineering dissipators containing photon pair operators, one can stabilize an exotic dark state, which is a condensate of photon pairs with a phase-nematic order. In this system, the usual superfluid order parameter, i.e. single-photon correlation, is absent, while the photon pair correlation exhibits long-range order. Although the dark state is not unique due to multiple parity sectors, we devise an additional type of dissipators to stabilize the dark state in a particular parity sector via a diffusive annihilation process which obeys Glauber dynamics in an Ising model. Furthermore, we propose an implementation of these photon-pair dissipators in circuit-QED architecture.\ Laser-cooled and trapped atomic ions form an ideal standard for the simulation of interacting quantum spin models. Effective spins are represented by appropriate internal energy levels within each ion, and the spins can be measured with near-perfect efficiency using state-dependent fluorescence techniques. By applying optical fields that exert optical dipole forces on the ions, their Coulomb interaction can be modulated in ways that give rise to long-range and tunable spin-spin interactions that can be reconfigured by shaping the spectrum and pattern of the laser fields. Here we review the theoretical mapping of atomic ions to interacting spin systems, the experimental preparation of complex equilibrium states, and the study of dynamical processes of this many-body interacting quantum system. The use of such quantum simulators for studying spin models may inform our understanding of exotic quantum materials and shed light on interacting quantum systems that cannot be modeled with conventional computers.\ We present a quantum algorithm for simulating the wave equation under Dirichlet and Neumann boundary conditions. The algorithm uses Hamiltonian simulation and quantum linear system algorithms as subroutines. It relies on factorizations of discretized Laplacian operators to allow for improved scaling in truncation errors and improved scaling for state preparation relative to general purpose linear differential equation algorithms. We also consider using Hamiltonian simulation for Klein-Gordon equations and Maxwell\&$\#$39;s equations. Quantum computers and simulators may offer significant advantages over their classical counterparts, providing insights into quantum many-body systems and possibly solving exponentially hard problems, such as optimization and satisfiability. Here we report the first implementation of a shallow-depth Quantum Approximate Optimization Algorithm (QAOA) using an analog quantum simulator to estimate the ground state energy of the transverse field Ising model with tunable long-range interactions. First, we exhaustively search the variational control parameters to approximate the ground state energy with up to 40 trapped-ion qubits. We then interface the quantum simulator with a classical algorithm to more efficiently find the optimal set of parameters that minimizes the resulting energy of the system. We finally sample from the full probability distribution of the QAOA output with single-shot and efficient measurements of every qubit.\ The great promise of quantum computers comes with the dual challenges of building them and finding their useful applications. We argue that these two challenges should be considered together, by co-designing full stack quantum computer systems along with their applications in order to hasten their development and potential for scientific discovery. In this context, we identify scientific and community needs, opportunities, and significant challenges for the development of quantum computers for science over the next 2-10 years. This document is written by a community of university, national laboratory, and industrial researchers in the field of Quantum Information Science and Technology, and is based on a summary from a U.S. National Science Foundation workshop on Quantum Computing held on October 21-22, 2019 in Alexandria, VA. Quantum simulators are a promising technology on the spectrum of quantum devices from specialized quantum experiments to universal quantum computers. These quantum devices utilize entanglement and many-particle behaviors to explore and solve hard scientific, engineering, and computational problems. Rapid development over the last two decades has produced more than 300 quantum simulators in operation worldwide using a wide variety of experimental platforms. Recent advances in several physical architectures promise a golden age of quantum simulators ranging from highly optimized special purpose simulators to flexible programmable devices. These developments have enabled a convergence of ideas drawn from fundamental physics, computer science, and device engineering. They have strong potential to address problems of societal importance, ranging from understanding vital chemical processes, to enabling the design of new materials with enhanced performance, to solving complex computational problems. It is the position of the community, as represented by participants of the NSF workshop on \"Programmable Quantum Simulators,\" that investment in a national quantum simulator program is a high priority in order to accelerate the progress in this field and to result in the first practical applications of quantum machines. Such a program should address two areas of emphasis: (1) support for creating quantum simulator prototypes usable by the broader scientific community, complementary to the present universal quantum computer effort in industry; and (2) support for fundamental research carried out by a blend of multi-investigator, multi-disciplinary collaborations with resources for quantum simulator software, hardware, and education.\ The study of quantum generative models is well-motivated, not only because of its importance in quantum machine learning and quantum chemistry but also because of the perspective of its implementation on near-term quantum machines. Inspired by previous studies on the adversarial training of classical and quantum generative models, we propose the first design of quantum Wasserstein Generative Adversarial Networks (WGANs), which has been shown to improve the robustness and the scalability of the adversarial training of quantum generative models even on noisy quantum hardware. Specifically, we propose a definition of the Wasserstein semimetric between quantum data, which inherits a few key theoretical merits of its classical counterpart. We also demonstrate how to turn the quantum Wasserstein semimetric into a concrete design of quantum WGANs that can be efficiently implemented on quantum machines. Our numerical study, via classical simulation of quantum systems, shows the more robust and scalable numerical performance of our quantum WGANs over other quantum GAN proposals. As a surprising application, our quantum WGAN has been used to generate a 3-qubit quantum circuit of ~50 gates that well approximates a 3-qubit 1-d Hamiltonian simulation circuit that requires over 10k gates using standard techniques. Semidefinite programming (SDP) is a central topic in mathematical optimization with extensive studies on its efficient solvers. Recently, quantum algorithms with superpolynomial speedups for solving SDPs have been proposed assuming access to its constraint matrices in quantum superposition. Mutually inspired by both classical and quantum SDP solvers, in this paper we present a sublinear classical algorithm for solving low-rank SDPs which is asymptotically as good as existing quantum algorithms. Specifically, given an SDP with m constraint matrices, each of dimension n and rank poly(logn), our algorithm gives a succinct description and any entry of the solution matrix in time O(m\⋅poly(logn,1/ε)) given access to a sample-based low-overhead data structure of the constraint matrices, where ε is the precision of the solution. In addition, we apply our algorithm to a quantum state learning task as an application. Technically, our approach aligns with both the SDP solvers based on the matrix multiplicative weight (MMW) framework and the recent studies of quantum-inspired machine learning algorithms. The cost of solving SDPs by MMW mainly comes from the exponentiation of Hermitian matrices, and we propose two new technical ingredients (compared to previous sample-based algorithms) for this task that may be of independent interest: . Weighted sampling: assuming sampling access to each individual constraint matrix A1,\…,Aτ, we propose a procedure that gives a good approximation of A=A1+...+Aτ. . Symmetric approximation: we propose a sampling procedure that gives low-rank spectral decomposition of a Hermitian matrix A. This improves upon previous sampling procedures that only give low-rank singular value decompositions, losing the signs of eigenvalues. The multi-scale entanglement renormalization ansatz (MERA) postulates the existence of quantum circuits that renormalize entanglement in real space at different length scales. Chern insulators, however, cannot have scale-invariant discrete MERA circuits with finite bond dimension. In this Letter, we show that the continuous MERA (cMERA), a modified version of MERA adapted for field theories, possesses a fixed point wavefunction with nonzero Chern number. Additionally, it is well known that reversed MERA circuits can be used to prepare quantum states efficiently in time that scales logarithmically with the size of the system. However, state preparation via MERA typically requires the advent of a full-fledged universal quantum computer. In this Letter, we demonstrate that our cMERA circuit can potentially be realized in existing analog quantum computers, i.e., an ultracold atomic Fermi gas in an optical lattice with light-induced spin-orbit coupling.\ We present a general theory of quantum information propagation in chaotic quantum many-body systems. The generic expectation in such systems is that quantum information does not propagate in localized form; instead, it tends to spread out and scramble into a form that is inaccessible to local measurements. To characterize this spreading, we define an information speed via a quench-type experiment and derive a general formula for it as a function of the entanglement density of the initial state. As the entanglement density varies from zero to one, the information speed varies from the entanglement speed to the butterfly speed. We verify that the formula holds both for a quantum chaotic spin chain and in field theories with an AdS/CFT gravity dual. For the second case, we study in detail the dynamics of entanglement in two-sided Vaidya-AdS-Reissner-Nordstrom black branes. We also show that, with an appropriate decoding process, quantum information can be construed as moving at the information speed, and, in the case of AdS/CFT, we show that a locally detectable signal propagates at the information speed in a spatially local variant of the traversable wormhole setup. This paper proposes a privacy protocol for distributed average consensus algorithms on bounded real-valued inputs that guarantees statistical privacy of honest agents\&$\#$39; inputs against colluding (passive adversarial) agents, if the set of colluding agents is not a vertex cut in the underlying communication network. This implies that privacy of agents\&$\#$39; inputs is preserved against t number of arbitrary colluding agents if the connectivity of the communication network is at least (t+1). A similar privacy protocol has been proposed for the case of bounded integral inputs in our previous paper~\cite{gupta2018information}. However, many applications of distributed consensus concerning distributed control or state estimation deal with real-valued inputs. Thus, in this paper we propose an extension of the privacy protocol in~\cite{gupta2018information}, for bounded real-valued agents\&$\#$39; inputs, where bounds are known apriori to all the agents.\ The National Institute of Standards and Technology is in the process of selecting one or more We investigate quantum algorithms for classification, a fundamental problem in machine learning, with provable guarantees. Given n d-dimensional data points, the state-of-the-art (and optimal) classical algorithm for training classifiers with constant margin runs in O~(n+d) time. We design sublinear quantum algorithms for the same task running in O~(n\−\−\√+d\−\−\√) time, a quadratic improvement in both n and d. Moreover, our algorithms use the standard quantization of the classical input and generate the same classical output, suggesting minimal overheads when used as subroutines for end-to-end applications. We also demonstrate a tight lower bound (up to poly-log factors) and discuss the possibility of implementation on near-term quantum machines. As a side result, we also give sublinear quantum algorithms for approximating the equilibria of n-dimensional matrix zero-sum games with optimal complexity Θ~(n\−\−\√).\ The Ryu-Takayanagi and Hubeny-Rangamani-Takayanagi formulae suggest that bulk geometry emerges from the entanglement structure of the boundary theory. Using these formulae, we build on a result of Alexakis, Balehowsky, and Nachman to show that in four bulk dimensions, the entanglement entropies of boundary regions of disk topology uniquely fix the bulk metric in any region foliated by the corresponding HRT surfaces. More generally, for a bulk of any dimension , knowledge of the (variations of the) areas of two-dimensional boundary-anchored extremal surfaces of disk topology uniquely fixes the bulk metric wherever these surfaces reach. This result is covariant and not reliant on any symmetry assumptions; its applicability thus includes regions of strong dynamical gravity such as the early-time interior of black holes formed from collapse. While we only show uniqueness of the metric, the approach we present provides a clear path towards an\textit {explicit} spacetime metric reconstruction. We develop and implement automated methods for optimizing quantum circuits of the size and type expected in quantum computations that outperform classical computers. We show how to handle continuous gate parameters and report a collection of fast algorithms capable of optimizing large-scale quantum circuits. For the suite of benchmarks considered, we obtain substantial reductions in gate counts. In particular, we provide better optimization in significantly less time than previous approaches, while making minimal structural changes so as to preserve the basic layout of the underlying quantum algorithms. Our results help bridge the gap between the computations that can be run on existing hardware and those that are expected to outperform classical computers.\ In this note, we explore the possibility that certain high-energy holographic CFT states correspond to black hole microstates with a geometrical behind-the-horizon region, modelled by a portion of a second asymptotic region terminating at an end-of-the-world (ETW) brane. We study the time-dependent physics of this behind-the-horizon region, whose ETW boundary geometry takes the form of a closed FRW spacetime. We show that in many cases, this behind-the-horizon physics can be probed directly by looking at the time dependence of entanglement entropy for sufficiently large spatial CFT subsystems. We study in particular states defined via Euclidean evolution from conformal boundary states and give specific predictions for the behavior of the entanglement entropy in this case. We perform analogous calculations for the SYK model and find qualitative agreement with our expectations. A fascinating possibility is that for certain states, we might have gravity localized to the ETW brane as in the Randall-Sundrum II scenario for cosmology. In this case, the effective description of physics beyond the horizon could be a big bang/big crunch cosmology of the same dimensionality as the CFT. In this case, the d-dimensional CFT describing the black hole microstate would give a precise, microscopic description of the d-dimensional cosmological physics.\ Superconductivity provides a canonical example of a quantum phase of matter. When superconducting islands are connected by Josephson junctions in a lattice, the low temperature state of the system can map to the celebrated XY model and its associated universality classes. This has been used to experimentally implement realizations of Mott insulator and Berezinskii--Kosterlitz--Thouless (BKT) transitions to vortex dynamics analogous to those in type-II superconductors. When an external magnetic field is added, the effective spins of the XY model become frustrated, leading to the formation of topological defects (vortices). Here we observe the many-body dynamics of such an array, including frustration, via its coupling to a superconducting microwave cavity. We take the design of the transmon qubit, but replace the single junction between two antenna pads with the complete array. This allows us to probe the system at 10 mK with minimal self-heating by using weak coherent states at the single (microwave) photon level to probe the resonance frequency of the cavity. We observe signatures of ordered vortex lattice at rational flux fillings of the array.\ From dice to modern complex circuits, there have been many attempts to build increasingly better devices to generate random numbers. Today, randomness is fundamental to security and cryptographic systems, as well as safeguarding privacy. A key challenge with random number generators is that it is hard to ensure that their outputs are unpredictable. For a random number generator based on a physical process, such as a noisy classical system or an elementary quantum measurement, a detailed model describing the underlying physics is required to assert unpredictability. Such a model must make a number of assumptions that may not be valid, thereby compromising the integrity of the device. However, it is possible to exploit the phenomenon of quantum nonlocality with a loophole-free Bell test to build a random number generator that can produce output that is unpredictable to any adversary limited only by general physical principles. With recent technological developments, it is now possible to carry out such a loophole-free Bell test. Here we present certified randomness obtained from a photonic Bell experiment and extract 1024 random bits uniform to within 10\−12. These random bits could not have been predicted within any physical theory that prohibits superluminal signaling and allows one to make independent measurement choices. To certify and quantify the randomness, we describe a new protocol that is optimized for apparatuses characterized by a low per-trial violation of Bell inequalities. We thus enlisted an experimental result that fundamentally challenges the notion of determinism to build a system that can increase trust in random sources. In the future, random number generators based on loophole-free Bell tests may play a role in increasing the security and trust of our cryptographic systems and infrastructure. It is well known that correlations predicted by quantum mechanics cannot be explained by any classical (local-realistic) theory. The relative strength of quantum and classical correlations is usually studied in the context of Bell inequalities, but this tells us little about the geometry of the quantum set of correlations. In other words, we do not have good intuition about what the quantum set actually looks like. In this paper we study the geometry of the quantum set using standard tools from convex geometry. We find explicit examples of rather counter-intuitive features in the simplest non-trivial Bell scenario (two parties, two inputs and two outputs) and illustrate them using 2-dimensional slice plots. We also show that even more complex features appear in Bell scenarios with more inputs or more parties. Finally, we discuss the limitations that the geometry of the quantum set imposes on the task of self-testing. Trapped atomic ions are an ideal candidate for quantum network nodes, with long-lived identical qubit memories that can be locally entangled through their Coulomb interaction and remotely entangled through photonic channels. The integrity of this photonic interface is generally reliant on purity of single photons produced by the quantum memory. Here we demonstrate a single-photon source for quantum networking based on a trapped 138Ba+ ion with a single photon purity of g2(0)=(8.1\±2.3)\×10\−5 without background subtraction. We further optimize the tradeoff between the photonic generation rate and the memory-photon entanglement fidelity for the case of polarization photonic qubits by tailoring the spatial mode of the collected light.\ We propose an asynchronous distributed average consensus algorithm that guarantees information-theoretic privacy of honest agents\&$\#$39; inputs against colluding passive adversarial agents, as long as the set of colluding passive adversarial agents is not a vertex cut in the underlying communication network. This implies that a network with (t+1)-connectivity guarantees information-theoretic privacy of honest agents\&$\#$39; inputs against any t colluding agents. The proposed protocol is formed by composing a distributed privacy mechanism we provide with any (non-private) distributed average consensus algorithm. The agent\&$\#$39; inputs are bounded integers, where the bounds are apriori known to all the agents. We propose an asynchronous distributed average consensus algorithm that guarantees information-theoretic privacy of honest agents\&$\#$39; inputs against colluding semi-honest (passively adversarial) agents, as long as the set of colluding semi-honest agents is not a vertex cut in the underlying communication network. This implies that a network with\ If a measurement is made on one half of a bipartite system then, conditioned on the outcome, the other half has a new reduced state. If these reduced states defy classical explanation \— that is, if shared randomness cannot produce these reduced states for all possible measurements \— the bipartite state is said to be steerable. Determining which states are steerable is a challenging problem even for low dimensions. In the case of two-qubit systems a criterion is known for T-states (that is, those with maximally mixed marginals) under projective measurements. In the current work we introduce the concept of keyring models \— a special class of local hidden state model. When the measurements made correspond to real projectors, these allow us to study steerability beyond T-states. Using keyring models, we completely solve the steering problem for real projective measurements when the state arises from mixing a pure two-qubit state with uniform noise. We also give a partial solution in the case when the uniform noise is replaced by independent depolarizing channels. Our results imply that Werner states, which are a special case of the previous states, are unsteerable under real projective measurements if and only if their efficiency is at most 2/π. The problem of Oblivious RAM (ORAM) has traditionally been studied in a single-server setting, but more recently the multi-server setting has also been considered. Yet it is still unclear whether the multi-server setting has any inherent advantages, e.g., whether the multi-server setting can be used to achieve stronger security goals or provably better efficiency than is possible in the single-server case. In this work, we construct a perfectly secure 3-server ORAM scheme that outperforms the best known single-server scheme by a logarithmic factor. In the process, we also show, for the first time, that there exist specific algorithms for which multiple servers can overcome known lower bounds in the single-server setting.\ One of the applications of quantum technology is to use quantum states and measurements to communicate which offers more reliable security promises. Quantum data hiding, which gives the source party the ability of sharing data among multiple receivers and revealing it at a later time depending on his/her will, is one of the promising information sharing schemes which may address practical security issues. In this work, we propose a novel quantum data hiding protocol. By concatenating different subprotocols which apply to rather symmetric hiding scenarios, we cover a variety of more general hiding scenarios. We provide the general requirements for constructing such protocols and give explicit examples of encoding states for five parties. We also proved the security of the protocol in sense that the achievable information by unauthorized operations asymptotically goes to zero. In addition, due to the capability of the sender to manipulate his/her subsystem, the sender is able to abort the protocol remotely at any time before he/she reveals the information. In order to deal with IR divergences arising in QED or perturbative quantum gravity scattering processes, one can either calculate inclusive quantities or use dressed asymptotic states. We consider incoming superpositions of momentum eigenstates and show that in calculations of cross-sections these two approaches yield different answers: in the inclusive formalism no interference occurs for incoming finite superpositions and wavepackets do not scatter at all, while the dressed formalism yields the expected interference terms. This suggests that rather than Fock space states, one should use Faddeev-Kulish-type dressed states to correctly describe physical processes involving incoming superpositions. We interpret this in terms of selection rules due to large U(1) gauge symmetries and BMS supertranslations. Quantum mechanical scattering resonances for colliding particles occur when a continuum scattering state couples to a discrete bound state between them. The coupling also causes the bound state to interact with itself via the continuum and leads to a shift in the bound state energy, but, lacking knowledge of the bare bound state energy, measuring this self-energy via the resonance position has remained elusive. Here, we report on the direct observation of self-interaction by using a nano-eV atom collider to track the position of a magnetically-tunable Feshbach resonance through a parameter space spanned by energy and magnetic field. Our system of potassium and rubidium atoms displays a strongly non-monotonic resonance trajectory with an exceptionally large self-interaction energy arising from an interplay between the Feshbach bound state and a different, virtual bound state at a fixed energy near threshold. Bound states of massive particles, such as nuclei, atoms or molecules, are ubiquitous in nature and constitute the bulk of the visible world around us. In contrast, photons typically only weakly influence each other due to their very weak interactions and vanishing mass. We report the observation of traveling three-photon bound states in a quantum nonlinear medium where the interactions between photons are mediated by atomic Rydberg states. In particular, photon correlation and conditional phase measurements reveal the distinct features associated with three-photon and two-photon bound states. Such photonic trimers and dimers can be viewed as quantum solitons with shape-preserving wavefunctions that depend on the constituent photon number. The observed bunching and strongly nonlinear optical phase are quantitatively described by an effective field theory (EFT) of Rydberg-induced photon-photon interactions, which demonstrates the presence of a substantial effective three-body force between the photons. These observations pave the way towards the realization, studies, and control of strongly interacting quantum many-body states of light. A major application for atomic ensembles consists of a quantum memory for light, in which an optical state can be reversibly converted to a collective atomic excitation on demand. There exists a well-known fundamental bound on the storage error, when the ensemble is describable by a continuous medium governed by the Maxwell-Bloch equations. The validity of this model can break down, however, in systems such as dense, ordered atomic arrays, where strong interference in emission can give rise to phenomena such as subradiance and \"selective\" radiance. Here, we develop a general formalism that finds the maximum storage efficiency for a collection of atoms with discrete, known positions, and a given spatial mode in which an optical field is sent. As an example, we apply this technique to study a finite two-dimensional square array of atoms. We show that such a system enables a storage error that scales with atom number Na like \∼(logNa)2/N2a, and that, remarkably, an array of just 4\×4 atoms in principle allows for an efficiency comparable to a disordered ensemble with optical depth of around 600. We study the dissipative propagation of quantized light in interacting Rydberg media under the conditions of electromagnetically induced transparency (EIT). Rydberg blockade physics in optically dense atomic media leads to strong dissipative interactions between single photons. The regime of high incoming photon flux constitutes a challenging many-body dissipative problem. We experimentally study in detail for the first time the pulse shapes and the second-order correlation function of the outgoing field and compare our data with simulations based on two novel theoretical approaches well-suited to treat this many-photon limit. At low incoming flux, we report good agreement between both theories and the experiment. For higher input flux, the intensity of the outgoing light is lower than that obtained from theoretical predictions. We explain this discrepancy using a simple phenomenological model taking into account pollutants, which are nearly-stationary Rydberg excitations coming from the reabsorption of scattered probe photons. At high incoming photon rates, the blockade physics results in unconventional shapes of measured correlation functions.\ How many quantum queries are required to determine the coefficients of a degree-d polynomial in n variables? We present and analyze quantum algorithms for this multivariate polynomial interpolation problem over the fields Fq, R, and C. We show that kC and 2kC queries suffice to achieve probability 1 for C and R, respectively, where kC = \⌈ 1 n+1 ( n+d d )\⌉ except for d = 2 and four other special cases. For Fq, we show that \⌈ d n+d ( n+d d )\⌉ queries suffice to achieve probability approaching 1 for large field order q. The classical query complexity of this problem is ( n+d d ), so our result provides a speedup by a factor of n + 1, n+1 2 , and n+d d for C, R, and Fq, respectively. Thus we find a much larger gap between classical and quantum algorithms than the univariate case, where the speedup is by a factor of 2. For the case of Fq, we conjecture that 2kC queries also suffice to achieve probability approaching 1 for large field order q, although we leave this as an open problem. We illustrate the existence of single-excitation bound states for propagating photons interacting with N two-level atoms. These bound states can be calculated from an effective spin model, and their existence relies on dissipation in the system. The appearance of these bound states is in a one-to-one correspondence with zeros in the single-photon transmission and with divergent bunching in the second-order photon-photon correlation function. We also formulate a dissipative version of Levinson\&$\#$39;s theorem for this system by looking at the relation between the number of bound states and the winding number of the transmission phases. This theorem allows a direct experimental measurement of the number of bound states using the measured transmission phases. We show that Ramsey spectroscopy of fermionic alkaline-earth atoms in a square-well trap provides an efficient and accurate estimate for the eigenspectrum of a density matrix whose n copies are stored in the nuclear spins of n such atoms. This spectrum estimation is enabled by the high symmetry of the interaction Hamiltonian, dictated, in turn, by the decoupling of the nuclear spin from the electrons and by the shape of the square-well trap. Practical performance of this procedure and its potential applications to quantum computing, quantum simulation, and time-keeping with alkalineearth atoms are discussed. We consider the general form of \"Correlated Worldline\" (CWL) theories of quantum gravity. We show that one can have 2 different kinds of CWL theory, in which the generating functional is written as either a sum or a product over multiple copies of the coupled matter and gravitational fields. In both versions, the paths in a functional formulation are correlated via gravity itself, causing a breakdown of the superposition principle; however, the product form survives consistency tests not satisfied by the summed form. To better understand the structure of these two theories, we show how to perform diagrammatic expansions in the gravitational coupling for each version of CWL theory, using particle propagation and scalar fields as examples. We explicitly calculate contributions to 2-point and 4-point functions, again for each version of the theory, up to 2nd-order in the gravitational coupling. Recent advances in cooling, control, and measurement of mechanical systems in the quantum regime have opened the possibility of the first direct observation of quantum gravity, at scales achievable in experiments. This paper gives a broad overview of this idea, using some matter-wave and optomechanical systems to illustrate the predictions of a variety of models of low-energy quantum gravity. We first review the treatment of perturbatively quantized general relativity as an effective quantum field theory, and consider the particular challenges of observing quantum effects in this framework. We then move on to a variety of alternative models, such as those in which gravity is classical, emergent, or responsible for a breakdown of quantum mechanics. Recent advances in cooling, control, and measurement of mechanical systems in the quantum regime have opened the possibility of the first direct observation of quantum gravity, at scales achievable in experiments. This paper gives a broad overview of this idea, using some matter-wave and optomechanical systems to illustrate the predictions of a variety of models of low-energy quantum gravity. We first review the treatment of perturbatively quantized general relativity as an effective quantum field theory, and consider the particular challenges of observing quantum effects in this framework. We then move on to a variety of alternative models, such as those in which gravity is classical, emergent, or responsible for a breakdown of quantum mechanics. With quantum computers of significant size now on the horizon, we should understand how to best exploit their initially limited abilities. To this end, we aim to identify a practical problem that is beyond the reach of current classical computers, but that requires the fewest resources for a quantum computer. We consider quantum simulation of spin systems, which could be applied to understand condensed matter phenomena. We synthesize explicit circuits for three leading quantum simulation algorithms, using diverse techniques to tighten error bounds and optimize circuit implementations. Quantum signal processing appears to be preferred among algorithms with rigorous performance guarantees, whereas higher-order product formulas prevail if empirical error estimates suffice. Our circuits are orders of magnitude smaller than those for the simplest classically infeasible instances of factoring and quantum chemistry, bringing practical quantum computation closer to reality. The construction of large-scale quantum computers will require modular architectures that allow physical resources to be localized in easy-to-manage packages. In this work, we examine the impact of different graph structures on the preparation of entangled states. We begin by explaining a formal framework, the hierarchical product, in which modular graphs can be easily constructed. This framework naturally leads us to suggest a class of graphs, which we dub hierarchies. We argue that such graphs have favorable properties for quantum information processing, such as a small diameter and small total edge weight, and use the concept of Pareto efficiency to identify promising quantum graph architectures. We present numerical and analytical results on the speed at which large entangled states can be created on nearest-neighbor grids and hierarchy graphs. We also present a scheme for performing circuit placement--the translation from circuit diagrams to machine qubits--on quantum systems whose connectivity is described by hierarchies. We initiate the study of computational entropy in the quantum setting. We investigate to what extent the classical notions of computational entropy generalize to the quantum setting, and whether quantum analogues of classical theorems hold. Our main results are as follows. (1) The classical Leakage Chain Rule for pseudoentropy can be extended to the case that the leakage information is quantum (while the source remains classical). Specifically, if the source has pseudoentropy at least k, then it has pseudoentropy at least k \− l conditioned on an l- qubit leakage. (2) As an application of the Leakage Chain Rule, we construct the first quantum leakage-resilient stream-cipher in the bounded-quantum-storage model, assuming the existence of a quantum-secure pseudorandom generator. (3) We show that the general form of the classical Dense Model Theorem (interpreted as the equivalence between two definitions of pseudo-relativemin-entropy) does not extend to quantum states. Along the way, we develop quantum analogues of some classical techniques (e.g., the Leakage Simulation Lemma, which is proven by a Nonuniform Min-Max Theorem or Boosting). On the other hand, we also identify some classical techniques (e.g., Gap Amplification) that do not work in the quantum setting. Moreover, we introduce a variety of notions that combine quantum information and quantum complexity, and this raises several directions for future work. We provide results for the exponential dominating numbers and total exponential dominating numbers of a family of triangular grid graphs. We then prove inequalities for these numbers and compare them with inequalities that hold more generally for exponential dominating numbers of graphs. Quantum algorithms for simulating Hamiltonian dynamics have been extensively developed, but there has been much less work on quantum algorithms for simulating the dynamics of open quantum systems. We give the first efficient quantum algorithms for simulating Markovian quantum dynamics generated by Lindbladians that are not necessarily local. We introduce two approaches to simulating sparse Lindbladians. First, we show how to simulate Lindbladians that act within small invariant subspaces using a quantum algorithm to implement sparse Stinespring isometries. Second, we develop a method for simulating sparse Lindblad operators by concatenating a sequence of short-time evolutions. We also show limitations on Lindbladian simulation by proving a no-fast-forwarding theorem for simulating sparse Lindbladians in a black-box model. We apply and extend the theory of universal recovery channels from quantum information theory to address the problem of entanglement wedge reconstruction in AdS/CFT. It has recently been proposed that any low-energy local bulk operators in a CFT boundary region\&$\#$39;s entanglement wedge can be reconstructed on that boundary region itself. Existing work arguing for this proposal relies on algebraic consequences of the exact equivalence between bulk and boundary relative entropies, namely the theory of operator algebra quantum error correction. However, bulk and boundary relative entropies are only approximately equal in bulk effective field theory, and in similar situations it is known that predictions from exact entropic equalities can be qualitatively incorrect. The framework of universal recovery channels provides a robust demonstration of the entanglement wedge reconstruction conjecture in addition to new physical insights. Most notably, we find that a bulk operator acting in a given boundary region\&$\#$39;s entanglement wedge can be expressed as the response of the boundary region\&$\#$39;s modular Hamiltonian to a perturbation of the bulk state in the direction of the bulk operator. This formula can be interpreted as a noncommutative version of Bayes\&$\#$39; rule that attempts to undo the noise induced by restricting to only a portion of the boundary, and has an integral representation in terms of modular flows. To reach these conclusions, we extend the theory of universal recovery channels to finite-dimensional operator algebras and demonstrate that recovery channels approximately preserve the multiplicative structure of the operator algebra Random numbers are an important resource for applications such as numerical simulation and secure communication. However, it is difficult to certify whether a physical random number generator is truly unpredictable. Here, we exploit the phenomenon of quantum nonlocality in a loophole-free photonic Bell test experiment for the generation of randomness that cannot be predicted within any physical theory that allows one to make independent measurement choices and prohibits superluminal signaling. To certify and quantify the randomness, we describe a new protocol that performs well in an experimental regime characterized by low violation of Bell inequalities. Applying an extractor function to our data, we obtained 256 new random bits, uniform to within 0.001. With nowadays steadily growing quantum processors, it is required to develop new quantum tomography tools that are tailored for high-dimensional systems. In this work, we describe such a computational tool, based on recent ideas from non-convex optimization. The algorithm excels in the compressed-sensing-like setting, where only a few data points are measured from a lowrank or highly-pure quantum state of a high-dimensional system. We show that the algorithm can practically be used in quantum tomography problems that are beyond the reach of convex solvers, and, moreover, is faster than other state-of-the-art non-convex approaches. Crucially, we prove that, despite being a non-convex program, under mild conditions, the algorithm is guaranteed to converge to the global minimum of the problem; thus, it constitutes a provable quantum state tomography protocol. We present a quantum algorithm for systems of (possibly inhomogeneous) linear ordinary differential equations with constant coefficients. The algorithm produces a quantum state that is proportional to the solution at a desired final time. The complexity of the algorithm is polynomial in the logarithm of the inverse error, an exponential improvement over previous quantum algorithms for this problem. Our result builds upon recent advances in quantum linear systems algorithms by encoding the simulation into a sparse, well-conditioned linear system that approximates evolution according to the propagator using a Taylor series. Unlike with finite difference methods, our approach does not require additional hypotheses to ensure numerical stability. Harrow, Hassidim, and Lloyd showed that for a suitably specified N\×N matrix A and N-dimensional vector b⃗ , there is a quantum algorithm that outputs a quantum state proportional to the solution of the linear system of equations Ax⃗ =b⃗ . If A is sparse and well-conditioned, their algorithm runs in time poly(logN,1/ϵ), where ϵ is the desired precision in the output state. We improve this to an algorithm whose running time is polynomial in log(1/ϵ), exponentially improving the dependence on precision while keeping essentially the same dependence on other parameters. Our algorithm is based on a general technique for implementing any operator with a suitable Fourier or Chebyshev series representation. This allows us to bypass the quantum phase estimation algorithm, whose dependence on ϵ is prohibitive. Quantum state tomography via local measurements is an efficient tool for characterizing quantum states. However it requires that the original global state be uniquely determined (UD) by its local reduced density matrices (RDMs). In this work we demonstrate for the first time a class of states that are UD by their RDMs under the assumption that the global state is pure, but fail to be UD in the absence of that assumption. This discovery allows us to classify quantum states according to their UD properties, with the requirement that each class be treated distinctly in the practice of simplifying quantum state tomography. Additionally we experimentally test the feasibility and stability of performing quantum state tomography via the measurement of local RDMs for each class. These theoretical and experimental results advance the project of performing efficient and accurate quantum state tomography in practice. Infinite-range interactions are known to facilitate the production of highly entangled states with applications in quantum information and metrology. However, many experimental systems have interactions that decay with distance, and the achievable benefits in this context are much less clear. Combining recent exact solutions with a controlled expansion in the system size, we analyze quench dynamics in Ising models with power-law (1/r α ) interactions in D dimensions, thereby expanding the understanding of spin squeezing into a broad and experimentally relevant context. In spatially homogeneous systems, we show that for small α the scaling of squeezing with system size is identical to the infinite-range (α = 0) case. This indifference to the interaction range persists up to a critical value α = 2D/3, above which squeezing degrades continuously. Boundaryinduced inhomogeneities present in most experimental systems modify this picture, but it nevertheless remains qualitatively correct for finite-sized systems. We give a finite presentation by generators and relations of unitary operators expressible over the {CNOT, T, X} gate set, also known as CNOT-dihedral operators. To this end, we introduce a notion of normal form for CNOT-dihedral circuits and prove that every CNOT-dihedral operator admits a unique normal form. Moreover, we show that in the presence of certain structural rules only finitely many circuit identities are required to reduce an arbitrary CNOT-dihedral circuit to its normal form. By appropriately restricting our relations, we obtain a finite presentation of unitary operators expressible over the {CNOT, T } gate set as a corollary. Current quantum annealing (QA) hardware suffers from practical limitations such as finite\ temperature, sparse connectivity, small qubit numbers, and control error. We propose new algorithms for\ mapping Boolean constraint satisfaction problems (CSPs) onto QA hardware mitigating these limitations.\ In particular, we develop a new embedding algorithm for mapping a CSP onto a hardware Ising model with\ a fixed sparse set of interactions and propose two new decomposition algorithms for solving problems too\ large to map directly into hardware. The mapping technique is locally structured, as hardware compatible\ Ising models are generated for each problem constraint, and variables appearing in different constraints are\ chained together using ferromagnetic couplings. By contrast, global embedding techniques generate a\ hardware-independent Ising model for all the constraints, and then use a minor-embedding algorithm to\ generate\ a hardware compatible Ising model. We give an example of a class of CSPs for which the scaling\ performance\ of the D-Wave hardware using the local mapping technique is significantly better than global\ embedding. We validate\ the approach by applying D- Wave\’s QA hardware to circuit-based fault diagnosis.\ For circuits that embed directly, we\ find that the hardware is typically able to find all solutions from a\ min-fault diagnosis set of size N using 1000 N samples,\ using an annealing rate that is 25 times faster than\ a leading SAT-based sampling method. Furthermore, we apply\ decomposition algorithms to find min-cardinality\ faults for circuits that are up to 5 times larger than can be solved directly on current hardware. We consider the number of quantum queries required to determine the coefficients of a degree-d polynomial over GF(q). A lower bound shown independently by Kane and Kutin and by Meyer and Pommersheim shows that d/2+1/2 quantum queries are needed to solve this problem with bounded error, whereas an algorithm of Boneh and Zhandry shows that d quantum queries are sufficient. We show that the lower bound is achievable: d/2+1/2 quantum queries suffice to determine the polynomial with bounded error. Furthermore, we show that d/2+1 queries suffice to achieve probability approaching 1 for large q. These upper bounds improve results of Boneh and Zhandry on the insecurity of cryptographic protocols against quantum attacks. We also show that our algorithm\&$\#$39;s success probability as a function of the number of queries is precisely optimal. Furthermore, the algorithm can be implemented with gate complexity poly(log q) with negligible decrease in the success probability. We examine the problem of finding the minimum number of Pauli measurements needed to uniquely determine an arbitrary n-qubit pure state among all quantum states. We show that only 11 Pauli measurements are needed to determine an arbitrary two-qubit pure state compared to the full quantum state tomography with 16 measurements, and only 31 Pauli measurements are needed to determine an arbitrary three-qubit pure state compared to the full quantum state tomography with 64 measurements. We demonstrate that our protocol is robust under depolarizing error with simulated random pure states. We experimentally test the protocol on two- and three-qubit systems with nuclear magnetic resonance techniques. We show that the pure state tomography protocol saves us a number of measurements without considerable loss of fidelity. We compare our protocol with same-size sets of randomly selected Pauli operators and find that our selected set of Pauli measurements significantly outperforms those random sampling sets. As a direct application, our scheme can also be used to reduce the number of settings needed for pure-state tomography in quantum optical systems. In recent years, several measures have been proposed for characterizing the coherence of a given quantum state. We derive several results that illuminate how these measures behave when restricted to pure states. Notably, we present an explicit characterization of the closest incoherent state to a given pure state under the trace distance measure of coherence, and we affirm a recent conjecture that the l1 measure of coherence of a pure state is never smaller than its relative entropy of coherence. We then use our result to show that the states maximizing the trace distance of coherence are exactly the maximally coherent states, and we derive a new inequality relating the negativity and distillable entanglement of pure states. Tightly confined modes of light, as in optical nanofibers or photonic crystal waveguides, can lead to large optical coupling in atomic systems, which mediates long-range interactions between atoms. These one-dimensional systems can naturally possess couplings that are asymmetric between modes propagating in different directions. Strong long-range interaction among atoms via these modes can drive them to a self-organized periodic distribution. In this paper, we examine the self-organizing behavior of atoms in one dimension coupled to a chiral reservoir. We determine the solution to the equations of motion in different parameter regimes, relative to both the detuning of the pump laser that initializes the atomic dipole-dipole interactions and the degree of reservoir chirality. In addition, we calculate possible experimental signatures such as reflectivity from self-organized atoms and motional sidebands. We propose a method for creating far-field optical barrier potentials for ultracold atoms with widths that are narrower than the diffraction limit and can approach tens of nanometers. The reduced widths stem from the nonlinear atomic response to control fields that create spatially varying dark resonances. The subwavelength barrier is the result of the geometric scalar potential experienced by an atom prepared in such a spatially varying dark state. The performance of this technique, as well as its applications to the study of many-body physics and to the implementation of quantum-information protocols with ultracold atoms, are discussed, with a focus on the implementation of tunnel junctions. Using cold atoms to simulate strongly interacting quantum systems represents an exciting frontier of physics. However, achieving tunable, coherent long-range interactions between atoms is an outstanding challenge, which currently leaves a large class of models inaccessible to quantum simulation. Here, we propose a solution exploiting the powerful new platform of cold atoms trapped near nano-photonic systems. We show that the dielectric contrast of an atom trapped near a photonic crystal can seed a localized cavity mode around the atomic position. In a dynamic form of \“all-atomic\” cavity QED, the length of these cavity modes can be tuned, and atoms separated by the order of the e\↵ective cavity length can interact coherently with each other. Considering realistic conditions such as fabrication disorder and photon losses, coherent long-range potentials or spin interactions can be dominant in the system over length scales up to hundreds of wavelengths. We present an optomechanical accelerometer with high dynamic range, high bandwidth and read-out noise levels below 8 ${\mu}$g/$\sqrt{\mathrm{Hz}}$. The straightforward assembly and low cost of our device make it a prime candidate for on-site reference calibrations and autonomous navigation. We present experimental data taken with a vacuum sealed, portable prototype and deduce the achieved bias stability and scale factor accuracy. Additionally, we present a comprehensive model of the device physics that we use to analyze the fundamental noise sources and accuracy limitations of such devices. In this brief report, we consider the equivalence between two sets of\ m\ + 1 bipartite quantum states under local unitary transformations. For pure states, this problem corresponds to the matrix algebra question of whether two degree m matrix polynomials are unitarily equivalent; i.e.\ UA Quantum entanglement plays a central role in quantum information processing. A main objective of the theory is to classify different types of entanglement according to their interconvertibility through manipulations that do not require additional entanglement to perform. While bipartite entanglement is well understood in this framework, the classification of entanglements among three or more subsystems is inherently much more difficult. In this paper, we study pure state entanglement in systems of dimension\ 2\⊗m\⊗n. Two states are considered equivalent if they can be reversibly converted from one to the other with a nonzero probability using only local quantum resources and classical communication (SLOCC). We introduce a connection between entanglement manipulations in these systems and the well-studied theory of matrix pencils. All previous attempts to study general SLOCC equivalence in such systems have relied on somewhat contrived techniques which fail to reveal the elegant structure of the problem that can be seen from the matrix pencil approach. Based on this method, we report the first polynomial-time algorithm for deciding when two2\⊗m\⊗n\ states are SLOCC equivalent. We then proceed to present a canonical form for all\ 2\⊗m\⊗n\ states based on the matrix pencil construction such that two states are equivalent if and only if they have the same canonical form. Besides recovering the previously known 26 distinct SLOCC equivalence classes in\ 2\⊗3\⊗n\ systems, we also determine the hierarchy between these classes.
Our analysis also suggests that the conventional product input states assumed by magic distillation protocols are extremely atypical among general states with distillable magic. It further justifies the need for studying a diverse range of entangled inputs that yield magic states with high probability.
a common problem: How can they be sure that their code does what they intend it to do? This
paper presents encouraging results in the application of mechanized proof to the domain of quantum
programming in the context of the sqir development. It verifies the correctness of a range of a
quantum algorithms including Grover\’s algorithm and quantum phase estimation, a key component
of Shor\’s algorithm. In doing so, it aims to highlight both the successes and challenges of formal
verification in the quantum context and motivate the theorem proving community to target quantum
computing as an application domain.
We first define and investigate the basic complexity-theoretic properties of minimum quantum circuit size problems for three natural objects: Boolean functions, unitaries, and quantum states. We show that these problems are not trivially in NP but in QCMA (or have QCMA protocols). Next, we explore the relations between the three quantum MCSPs and their variants. We discover that some reductions that are not known for classical MCSP exist for quantum MCSPs for unitaries and states, e.g., search-to-decision reduction and self-reduction. Finally, we systematically generalize results known for classical MCSP to the quantum setting (including quantum cryptography, quantum learning theory, quantum circuit lower bounds, and quantum fine-grained complexity) and also find new connections to tomography and quantum gravity. Due to the fundamental differences between classical and quantum circuits, most of our results require extra care and reveal properties and phenomena unique to the quantum setting. Our findings could be of interest for future studies, and we post several open problems for further exploration along this direction.
Our protocols result from a sequence of significant improvements to the original four-message protocol of Mahadev. We begin by making the first message instance-independent and moving it to an offline setup phase. We then establish a parallel repetition theorem for the resulting three-message protocol, with an asymptotically optimal rate. This, in turn, enables an application of the Fiat-Shamir heuristic, eliminating the second message and giving a non-interactive protocol. Finally, we employ classical non-interactive zero-knowledge (NIZK) arguments and classical fully homomorphic encryption (FHE) to give a zero-knowledge variant of this construction. This yields the first purely classical NIZK argument system for QMA, a quantum analogue of NP.
We establish the security of our protocols under standard assumptions in quantum-secure cryptography. Specifically, our protocols are secure in the Quantum Random Oracle Model, under the assumption that Learning with Errors is quantumly hard. The NIZK construction also requires circuit-private FHE.
Our algorithm has a Divide-and-Conquer structure, demonstrating how to approximate the desired quantity via several instantiations of the same problem type, each involving 3D-local circuits on about half the number of qubits as the original. This division step is then applied recursively, expressing the original quantity as a weighted combination of smaller and smaller 3D-local quantum circuits. A central technical challenge is to control correlations arising from entanglement that may exist between the different circuit {\textquoteleft}{\textquoteleft}pieces\" produced this way.
In this work, we investigate this question using the Constructive Cryptography framework by Maurer and Renner (ICS\&$\#$39;11). We first identify the goal of RSPCC as the construction of ideal RSP resources from classical channels and then reveal the security limitations of using RSPCC. First, we uncover a fundamental relationship between constructing ideal RSP resources (from classical channels) and the task of cloning quantum states. Any classically constructed ideal RSP resource must leak to the server the full classical description (possibly in an encoded form) of the generated quantum state, even if we target computational security only. As a consequence, we find that the realization of common RSP resources, without weakening their guarantees drastically, is impossible due to the no-cloning theorem. Second, the above result does not rule out that a specific RSPCC protocol can replace the quantum channel at least in some contexts, such as the Universal Blind Quantum Computing (UBQC) protocol of Broadbent et al. (FOCS \&$\#$39;09). However, we show that the resulting UBQC protocol cannot maintain its proven composable security as soon as RSPCC is used as a subroutine. Third, we show that replacing the quantum channel of the above UBQC protocol by the RSPCC protocol QFactory of Cojocaru et al. (Asiacrypt \&$\#$39;19), preserves the weaker, game-based, security of UBQC.
In this work, we prove that hypergraph symmetries in the adjacency matrix model allow at most a polynomial separation between randomized and quantum query complexities. We also show that, remarkably, permutation groups constructed out of these symmetries are essentially the only permutation groups that prevent super-polynomial quantum speedups. We prove this by fully characterizing the primitive permutation groups that allow super-polynomial quantum speedups.
In contrast, in the adjacency list model for bounded-degree graphs (where graph symmetry is manifested differently), we exhibit a property testing problem that shows an exponential quantum speedup. These results resolve open questions posed by Ambainis, Childs, and Liu (2010) and Montanaro and de Wolf (2013).
public-key cryptographic algorithms through a public competition-like process. The new publickey cryptography standards will specify one or more additional digital signature, public-key
encryption, and key-establishment algorithms to augment FIPS 186-4, Digital Signature Standard
(DSS), as well as special publications SP 800-56A Revision 2, Recommendation for Pair-Wise
Key Establishment Schemes Using Discrete Logarithm Cryptography, and SP 800-56B,
Recommendation for Pair-Wise Key-Establishment Schemes Using Integer Factorization. It is
intended that these algorithms will be capable of protecting sensitive information well into the
foreseeable future, including after the advent of quantum computers.
In November 2017, 82 candidate algorithms were submitted to NIST for consideration. Among
these, 69 met both the minimum acceptance criteria and our submission requirements, and were
accepted as First-Round Candidates on Dec. 20, 2017, marking the beginning of the First Round
of the NIST Post-Quantum Cryptography Standardization Process. This report describes the
evaluation criteria and selection process, based on public feedback and internal review of the
first-round candidates, and summarizes the 26 candidate algorithms announced on January 30,
2019 for moving forward to the second round of the competition. The 17 Second-Round
Candidate public-key encryption and key-establishment algorithms are BIKE, Classic McEliece,
CRYSTALS-KYBER, FrodoKEM, HQC, LAC, LEDAcrypt (merger of LEDAkem/LEDApkc),
NewHope, NTRU (merger of NTRUEncrypt/NTRU-HRSS-KEM), NTRU Prime, NTS-KEM,
ROLLO (merger of LAKE/LOCKER/Ouroboros-R), Round5 (merger of Hila5/Round2), RQC,
SABER, SIKE, and Three Bears. The 9 Second-Round Candidates for digital signatures are
CRYSTALS-DILITHIUM, FALCON, GeMSS, LUOV, MQDSS, Picnic, qTESLA, Rainbow,
and SPHINCS+.