pax_global_header00006660000000000000000000000064135246034360014520gustar00rootroot0000000000000052 comment=18e71981f63f27f9b22e9413647b5e55b11035e3 qutip-4.4.1/000077500000000000000000000000001352460343600126705ustar00rootroot00000000000000qutip-4.4.1/.codeclimate.yml000066400000000000000000000010671352460343600157460ustar00rootroot00000000000000version: "2" checks: argument-count: config: threshold: 8 complex-logic: config: threshold: 10 file-lines: enabled: false method-complexity: config: threshold: 10 method-count: config: threshold: 50 method-lines: config: threshold: 70 nested-control-flow: config: threshold: 4 return-statements: config: threshold: 4 similar-code: config: threshold: 32 identical-code: config: threshold: 32 plugins: fixme: enabled: true pep8: enabled: true qutip-4.4.1/.coveragerc000066400000000000000000000000531352460343600150070ustar00rootroot00000000000000[run] include = */qutip/* omit = */tests/* qutip-4.4.1/.gitignore000066400000000000000000000004751352460343600146660ustar00rootroot00000000000000*~ *.py[cod] *.so .DS_Store .f2py_f2cmap # Packages *.egg *.egg-info dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 qutip/version.py qutip/__config__.py rhs*.pyx qutip/cy/*.c qutip/cy/*.cpp qutip/control/*.cpp qutip/cy/openmp/*.cpp *.dat *.qo benchmark/benchmark_data.js *-tasks.txt qutip-4.4.1/.mailmap000066400000000000000000000011701352460343600143100ustar00rootroot00000000000000Paul Nation Paul Nation Paul Nation Paul Nation Anubhav Vardhan Anubhav Christopher Granade Chris Granade Markus Baden Markus Baden kafischer kevinf Alexander Pitchford ajgpitch Alexander Pitchford Alexander James Pitchford qutip-4.4.1/.travis.yml000066400000000000000000000066701352460343600150120ustar00rootroot00000000000000sudo: required stage_generic_linux: &stage_generic_linux os: linux language: python # change dir before_script: - mkdir qutip_testing - cd qutip_testing # command to run tests script: - python -m qutip.about - nosetests --verbosity=2 --with-coverage --cover-package=qutip qutip install: - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no - conda info -a - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION - source activate test-environment - conda install mkl blas=*=mkl numpy scipy nose cython coveralls - python setup.py install stage_linux_36: &stage_linux_36 <<: *stage_generic_linux name: "Python 3.6" dist: trusty python: 3.6 stage_linux_37: &stage_linux_37 <<: *stage_generic_linux name: "Python 3.7" dist: xenial python: 3.7 stage_linux_37_openblas: &stage_linux_37_openblas <<: *stage_generic_linux name: "Python 3.7 OpenBLAS" dist: xenial python: 3.7 install: - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no - conda info -a - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION - source activate test-environment - conda install nomkl blas=*=openblas numpy scipy nose cython - python setup.py install stage_linux_37_omp: &stage_linux_37_omp <<: *stage_generic_linux name: "Python 3.7 OpenMP" dist: xenial python: 3.7 install: - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no - conda info -a - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION - source activate test-environment - conda install mkl blas=*=mkl numpy scipy nose cython coveralls - python setup.py install --with-openmp after_success: - coveralls stage_osx: &stage_osx os: osx name: "Python 3.7, OSX 10.13, XCode 10" osx_image: xcode10 language: generic # change dir before_script: - mkdir qutip_testing - cd qutip_testing # command to run tests script: - python -m qutip.about - nosetests --verbosity=2 --with-coverage --cover-package=qutip qutip install: - wget https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r - conda config --set always_yes yes --set changeps1 no - conda info -a - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION - source activate test-environment # - conda install mkl blas=*=mkl numpy scipy nose cython coveralls - conda install nomkl blas=*=openblas numpy scipy nose cython - python setup.py install stages: - test jobs: include: - stage: test <<: *stage_linux_36 - stage: test <<: *stage_linux_37 - stage: test <<: *stage_linux_37_omp - stage: test <<: *stage_linux_37_openblas - stage: test <<: *stage_osx qutip-4.4.1/CODE_OF_CONDUCT.md000066400000000000000000000037621352460343600154770ustar00rootroot00000000000000# Contributor Covenant Code of Conduct As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities. We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, or nationality. Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery * Personal attacks * Trolling or insulting/derogatory comments * Public or private harassment * Publishing other's private information, such as physical or electronic addresses, without explicit permission * Other unethical or unprofessional conduct Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team. This code of conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by opening an issue or contacting one or more of the project maintainers. This Code of Conduct is adapted from the Contributor Covenant , version 1.2.0, available at https://www.contributor-covenant.org/version/1/2/0/code-of-conduct.html [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/2/ qutip-4.4.1/LICENSE.txt000066400000000000000000000030511352460343600145120ustar00rootroot00000000000000 Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.qutip-4.4.1/MANIFEST.in000066400000000000000000000003541352460343600144300ustar00rootroot00000000000000include README.md include LICENSE.txt include requirements.txt include qutip.bib recursive-include qutip/ *.pyx recursive-include qutip/ *.pxi recursive-include qutip/ *.hpp recursive-include qutip/ *.pxd recursive-include qutip/ *.ini qutip-4.4.1/README.md000066400000000000000000000074341352460343600141570ustar00rootroot00000000000000QuTiP: Quantum Toolbox in Python ================================ [A. Pitchford](http://github.com/ajgpitch), [C. Granade](http://github.com/cgranade), [A. Grimsmo](http://github.com/arnelg), [P. D. Nation](http://github.com/nonhermitian), and [J. R. Johansson](http://github.com/jrjohansson) QuTiP is open-source software for simulating the dynamics of closed and open quantum systems. The QuTiP library uses the excellent Numpy, Scipy, and Cython packages as numerical backend, and graphical output is provided by Matplotlib. QuTiP aims to provide user-friendly and efficient numerical simulations of a wide variety of quantum mechanical problems, including those with Hamiltonians and/or collapse operators with arbitrary time-dependence, commonly found in a wide range of physics applications. QuTiP is freely available for use and/or modification, and it can be used on all Unix-based platforms and on Windows. Being free of any licensing fees, QuTiP is ideal for exploring quantum mechanics in research as well as in the classroom. Build status and test coverage ------------------------------ [![build-status](https://secure.travis-ci.org/qutip/qutip.svg?branch=master)](http://travis-ci.org/qutip/qutip) [![Coverage Status](https://img.shields.io/coveralls/qutip/qutip.svg)](https://coveralls.io/r/qutip/qutip) [![Maintainability](https://api.codeclimate.com/v1/badges/df502674f1dfa1f1b67a/maintainability)](https://codeclimate.com/github/qutip/qutip/maintainability) Download -------- [![Anaconda-Server Badge](https://anaconda.org/conda-forge/qutip/badges/downloads.svg)](https://anaconda.org/conda-forge/qutip) The official releases of QuTiP can be downloaded at: [http://qutip.org/download.html](http://qutip.org/download.html) Installation ------------ For instructions on how to install QuTiP, see: [http://qutip.org/docs/latest/installation.html](http://qutip.org/docs/latest/installation.html) Demos ----- A selection of demonstration notebooks is available here: [![Binder](http://img.shields.io/badge/launch-binder-ff69b4.svg?style=flat)](http://mybinder.org/repo/qutip/qutip-notebooks) or may be found at: [github.com/qutip/qutip-notebooks](http://github.com/qutip/qutip-notebooks). Documentation ------------- The documentation for official releases, in HTML and PDF formats, are available at: [http://qutip.org/documentation.html](http://qutip.org/documentation.html) and the development documentation is available at [github.com/qutip/qutip-doc](http://github.com/qutip/qutip-doc). Contribute ---------- You are most welcome to contribute to QuTiP development by forking this repository and sending pull requests, or filing bug reports at the [issues page](http://github.com/qutip/qutip/issues), or send us bug reports, questions, or your proposed changes to our [QuTiP discussion group](http://groups.google.com/group/qutip). All contributions are acknowledged in the [contributors](http://github.com/qutip/qutip-doc/blob/master/contributors.rst) section in the documentation. Note that all contributions must adhere to the [PEP 8 -- Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). For more information, including technical advice, please see [Contributing to QuTiP development](https://github.com/qutip/qutip-doc/blob/master/qutip_dev_contrib.md). Changelog --------- For release notes and a change log, see the [changelog](http://github.com/qutip/qutip-doc/blob/master/changelog.rst) section in the documentation. License ------- [![license](https://img.shields.io/badge/license-New%20BSD-blue.svg)](http://en.wikipedia.org/wiki/BSD_licenses#3-clause_license_.28.22Revised_BSD_License.22.2C_.22New_BSD_License.22.2C_or_.22Modified_BSD_License.22.29) You are free to use this software, with or without modification, provided that the conditions listed in the LICENSE.txt file are satisfied. qutip-4.4.1/qutip.bib000066400000000000000000000014521352460343600145120ustar00rootroot00000000000000@article{qutip2, doi = {10.1016/j.cpc.2012.11.019}, url = {https://doi.org/10.1016/j.cpc.2012.11.019}, year = {2013}, month = {apr}, publisher = {Elsevier {BV}}, volume = {184}, number = {4}, pages = {1234--1240}, author = {J.R. Johansson and P.D. Nation and F. Nori}, title = {{QuTiP} 2: A {P}ython framework for the dynamics of open quantum systems}, journal = {Computer Physics Communications} } @article{qutip1, doi = {10.1016/j.cpc.2012.02.021}, url = {https://doi.org/10.1016/j.cpc.2012.02.021}, year = {2012}, month = {aug}, publisher = {Elsevier {BV}}, volume = {183}, number = {8}, pages = {1760--1772}, author = {J.R. Johansson and P.D. Nation and F. Nori}, title = {{QuTiP}: An open-source {P}ython framework for the dynamics of open quantum systems}, journal = {Computer Physics Communications} }qutip-4.4.1/qutip/000077500000000000000000000000001352460343600140325ustar00rootroot00000000000000qutip-4.4.1/qutip/__init__.py000066400000000000000000000217441352460343600161530ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### from __future__ import division, print_function, absolute_import import os import sys import warnings import qutip.settings import qutip.version from qutip.version import version as __version__ from qutip.utilities import _version2int # ----------------------------------------------------------------------------- # Check if we're in IPython. try: __IPYTHON__ qutip.settings.ipython = True except: qutip.settings.ipython = False # ----------------------------------------------------------------------------- # Check for minimum requirements of dependencies, give the user a warning # if the requirements aren't fulfilled # numpy_requirement = "1.8.0" try: import numpy if _version2int(numpy.__version__) < _version2int(numpy_requirement): print("QuTiP warning: old version of numpy detected " + ("(%s), requiring %s." % (numpy.__version__, numpy_requirement))) except: warnings.warn("numpy not found.") scipy_requirement = "0.15.0" try: import scipy if _version2int(scipy.__version__) < _version2int(scipy_requirement): print("QuTiP warning: old version of scipy detected " + ("(%s), requiring %s." % (scipy.__version__, scipy_requirement))) except: warnings.warn("scipy not found.") # ----------------------------------------------------------------------------- # check to see if running from install directory for released versions. # top_path = os.path.dirname(os.path.dirname(__file__)) try: setup_file = open(top_path + '/setup.py', 'r') except: pass else: if ('QuTiP' in setup_file.readlines()[1][3:]) and qutip.version.release: print("You are in the installation directory. " + "Change directories before running QuTiP.") setup_file.close() del top_path # ----------------------------------------------------------------------------- # setup the cython environment # _cython_requirement = "0.21.0" try: import Cython if _version2int(Cython.__version__) < _version2int(_cython_requirement): print("QuTiP warning: old version of cython detected " + ("(%s), requiring %s." % (Cython.__version__, _cython_requirement))) except Exception as e: print("QuTiP warning: Cython setup failed: " + str(e)) else: del Cython # ----------------------------------------------------------------------------- # Look to see if we are running with OPENMP # # Set environ variable to determin if running in parallel mode # (i.e. in parfor or parallel_map) os.environ['QUTIP_IN_PARALLEL'] = 'FALSE' try: from qutip.cy.openmp.parfuncs import spmv_csr_openmp except: qutip.settings.has_openmp = False else: qutip.settings.has_openmp = True # See Pull #652 for why this is here. os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' # ----------------------------------------------------------------------------- # cpu/process configuration # import multiprocessing # Check if environ flag for qutip processes is set if 'QUTIP_NUM_PROCESSES' in os.environ: qutip.settings.num_cpus = int(os.environ['QUTIP_NUM_PROCESSES']) else: os.environ['QUTIP_NUM_PROCESSES'] = str(qutip.settings.num_cpus) if qutip.settings.num_cpus == 0: # if num_cpu is 0 set it to the available number of cores import qutip.hardware_info info = qutip.hardware_info.hardware_info() if 'cpus' in info: qutip.settings.num_cpus = info['cpus'] else: try: qutip.settings.num_cpus = multiprocessing.cpu_count() except: qutip.settings.num_cpus = 1 # Find MKL library if it exists import qutip._mkl # ----------------------------------------------------------------------------- # Check that import modules are compatible with requested configuration # # Check for Matplotlib try: import matplotlib except: warnings.warn("matplotlib not found: Graphics will not work.") else: del matplotlib # ----------------------------------------------------------------------------- # Load modules # # core from qutip.qobj import * from qutip.qobjevo import * from qutip.states import * from qutip.operators import * from qutip.expect import * from qutip.tensor import * from qutip.superoperator import * from qutip.superop_reps import * from qutip.subsystem_apply import * from qutip.graph import * # graphics from qutip.bloch import * from qutip.visualization import * from qutip.orbital import * from qutip.bloch3d import * from qutip.matplotlib_utilities import * # library functions from qutip.tomography import * from qutip.wigner import * from qutip.random_objects import * from qutip.simdiag import * from qutip.entropy import * from qutip.metrics import * from qutip.partial_transpose import * from qutip.permute import * from qutip.continuous_variables import * from qutip.distributions import * from qutip.three_level_atom import * # evolution from qutip.solver import * from qutip.rhs_generate import * from qutip.mesolve import * from qutip.sesolve import * from qutip.mcsolve import * from qutip.stochastic import * from qutip.essolve import * from qutip.eseries import * from qutip.propagator import * from qutip.floquet import * from qutip.bloch_redfield import * from qutip.cy.br_tensor import bloch_redfield_tensor from qutip.steadystate import * from qutip.correlation import * from qutip.countstat import * from qutip.rcsolve import * from qutip.nonmarkov import * from qutip.interpolate import * from qutip.scattering import * # quantum information from qutip.qip import * # utilities from qutip.parallel import * from qutip.utilities import * from qutip.fileio import * from qutip.about import * from qutip.cite import * # Remove -Wstrict-prototypes from cflags import distutils.sysconfig cfg_vars = distutils.sysconfig.get_config_vars() if "CFLAGS" in cfg_vars: cfg_vars["CFLAGS"] = cfg_vars["CFLAGS"].replace("-Wstrict-prototypes", "") # Setup pyximport import qutip.cy.pyxbuilder as pbldr pbldr.install(setup_args={'include_dirs': [numpy.get_include()]}) del pbldr # ----------------------------------------------------------------------------- # Load user configuration if present: override defaults. # import qutip.configrc has_rc, rc_file = qutip.configrc.has_qutip_rc() # Make qutiprc and benchmark OPENMP if has_rc = False if qutip.settings.has_openmp and (not has_rc): from qutip.cy.openmp.bench_openmp import calculate_openmp_thresh #bench OPENMP print('Calibrating OPENMP threshold...') thrsh = calculate_openmp_thresh() qutip.configrc.generate_qutiprc() has_rc, rc_file = qutip.configrc.has_qutip_rc() if has_rc: qutip.configrc.write_rc_key(rc_file, 'openmp_thresh', thrsh) # Make OPENMP if has_rc but 'openmp_thresh' not in keys elif qutip.settings.has_openmp and has_rc: from qutip.cy.openmp.bench_openmp import calculate_openmp_thresh has_omp_key = qutip.configrc.has_rc_key(rc_file, 'openmp_thresh') if not has_omp_key: print('Calibrating OPENMP threshold...') thrsh = calculate_openmp_thresh() qutip.configrc.write_rc_key(rc_file, 'openmp_thresh', thrsh) # Load the config file if has_rc: qutip.configrc.load_rc_config(rc_file) # ----------------------------------------------------------------------------- # Clean name space # del os, sys, numpy, scipy, multiprocessing, distutils qutip-4.4.1/qutip/_mkl/000077500000000000000000000000001352460343600147545ustar00rootroot00000000000000qutip-4.4.1/qutip/_mkl/__init__.py000077500000000000000000000001221352460343600170630ustar00rootroot00000000000000import qutip.settings as qset from qutip._mkl.utilities import _set_mkl _set_mkl()qutip-4.4.1/qutip/_mkl/spmv.py000066400000000000000000000056771352460343600163320ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np import scipy.sparse as sp import ctypes from ctypes import POINTER,c_int,c_char,c_double, byref from numpy import ctypeslib import qutip.settings as qset zcsrgemv = qset.mkl_lib.mkl_cspblas_zcsrgemv def mkl_spmv(A, x): """ sparse csr_spmv using MKL """ (m,n) = A.shape # Pointers to data of the matrix data = A.data.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C')) indptr = A.indptr.ctypes.data_as(POINTER(c_int)) indices = A.indices.ctypes.data_as(POINTER(c_int)) # Allocate output, using same conventions as input if x.ndim is 1: y = np.empty(m,dtype=np.complex,order='C') elif x.ndim==2 and x.shape[1]==1: y = np.empty((m,1),dtype=np.complex,order='C') else: raise Exception('Input vector must be 1D row or 2D column vector') np_x = x.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C')) np_y = y.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C')) # now call MKL. This returns the answer in np_y, which points to y zcsrgemv(byref(c_char(bytes(b'N'))), byref(c_int(m)), data ,indptr, indices, np_x, np_y ) return y qutip-4.4.1/qutip/_mkl/spsolve.py000066400000000000000000000362741352460343600170350ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### from __future__ import print_function, division import sys import numpy as np import scipy.sparse as sp import ctypes from ctypes import POINTER, c_int, c_char, c_char_p, c_double, byref from numpy import ctypeslib import time import qutip.settings as qset # Load solver functions from mkl_lib pardiso = qset.mkl_lib.pardiso pardiso_delete = qset.mkl_lib.pardiso_handle_delete if sys.maxsize > 2**32: #Running 64-bit pardiso_64 = qset.mkl_lib.pardiso_64 pardiso_delete_64 = qset.mkl_lib.pardiso_handle_delete_64 def _pardiso_parameters(hermitian, has_perm, max_iter_refine, scaling_vectors, weighted_matching): iparm = np.zeros(64, dtype=np.int32) iparm[0] = 1 # Do not use default values iparm[1] = 3 # Use openmp nested dissection if has_perm: iparm[4] = 1 iparm[7] = max_iter_refine # Max number of iterative refinements if hermitian: iparm[9] = 8 else: iparm[9] = 13 if not hermitian: iparm[10] = int(scaling_vectors) # Scaling vectors iparm[12] = int(weighted_matching) # Use non-symmetric weighted matching iparm[17] = -1 iparm[20] = 1 iparm[23] = 1 # Parallel factorization iparm[26] = 0 # Check matrix structure iparm[34] = 1 # Use zero-based indexing return iparm # Set error messages pardiso_error_msgs = {'-1': 'Input inconsistant', '-2': 'Out of memory', '-3': 'Reordering problem', '-4' : 'Zero pivot, numerical factorization or iterative refinement problem', '-5': 'Unclassified internal error', '-6': 'Reordering failed', '-7': 'Diagonal matrix is singular', '-8': '32-bit integer overflow', '-9': 'Not enough memory for OOC', '-10': 'Error opening OOC files', '-11': 'Read/write error with OOC files', '-12': 'Pardiso-64 called from 32-bit library'} def _default_solver_args(): def_args = {'hermitian': False, 'posdef': False, 'max_iter_refine': 10, 'scaling_vectors': True, 'weighted_matching': True, 'return_info': False} return def_args class mkl_lu(object): """ Object pointing to LU factorization of a sparse matrix generated by mkl_splu. Methods ------- solve(b, verbose=False) Solve system of equations using given RHS vector 'b'. Returns solution ndarray with same shape as input. info() Returns the statistics of the factorization and solution in the lu.info attribute. delete() Deletes the allocated solver memory. """ def __init__(self, np_pt=None, dim=None, is_complex=None, data=None, indptr=None, indices=None, iparm=None, np_iparm=None, mtype=None, perm=None, np_perm=None, factor_time=None): self._np_pt = np_pt self._dim = dim self._is_complex = is_complex self._data = data self._indptr = indptr self._indices = indices self._iparm = iparm self._np_iparm = np_iparm self._mtype = mtype self._perm = perm self._np_perm = np_perm self._factor_time = factor_time self._solve_time = None def solve(self, b, verbose = None): b_shp = b.shape if b.ndim == 2 and b.shape[1] == 1: b = b.ravel() nrhs = 1 elif b.ndim == 2 and b.shape[1] != 1: nrhs = b.shape[1] b = b.ravel(order='F') else: b = b.ravel() nrhs = 1 if self._is_complex: data_type = np.complex128 if b.dtype != np.complex128: b = b.astype(np.complex128, copy=False) else: data_type = np.float64 if b.dtype != np.float64: b = b.astype(np.float64, copy=False) # Create solution array (x) and pointers to x and b if self._is_complex: x = np.zeros(b.shape, dtype=np.complex128, order='C') else: x = np.zeros(b.shape, dtype=np.float64, order='C') np_x = x.ctypes.data_as(ctypeslib.ndpointer(data_type, ndim=1, flags='C')) np_b = b.ctypes.data_as(ctypeslib.ndpointer(data_type, ndim=1, flags='C')) error = np.zeros(1,dtype=np.int32) np_error = error.ctypes.data_as(ctypeslib.ndpointer(np.int32, ndim=1, flags='C')) #Call solver _solve_start = time.time() pardiso(self._np_pt, byref(c_int(1)), byref(c_int(1)), byref(c_int(self._mtype)), byref(c_int(33)), byref(c_int(self._dim)), self._data, self._indptr, self._indices, self._np_perm, byref(c_int(nrhs)), self._np_iparm, byref(c_int(0)), np_b, np_x, np_error) self._solve_time = time.time() -_solve_start if error[0] != 0: raise Exception(pardiso_error_msgs[str(error[0])]) if verbose: print('Solution Stage') print('--------------') print('Solution time: ',round(self._solve_time,4)) print('Solution memory (Mb): ',round(self._iparm[16]/1024.,4)) print('Number of iterative refinements:',self._iparm[6]) print('Total memory (Mb): ',round(sum(self._iparm[15:17])/1024.,4)) print() # Return solution vector x if nrhs==1: if x.shape != b_shp: x = np.reshape(x, b_shp) return x else: return np.reshape(x, b_shp, order='F') def info(self): info = {'FactorTime': self._factor_time, 'SolveTime': self._solve_time, 'Factormem': round(self._iparm[15]/1024.,4), 'Solvemem': round(self._iparm[16]/1024.,4), 'IterRefine': self._iparm[6]} return info def delete(self): #Delete all data error = np.zeros(1,dtype=np.int32) np_error = error.ctypes.data_as(ctypeslib.ndpointer(np.int32, ndim=1, flags='C')) pardiso(self._np_pt, byref(c_int(1)), byref(c_int(1)), byref(c_int(self._mtype)), byref(c_int(-1)), byref(c_int(self._dim)), self._data, self._indptr, self._indices, self._np_perm, byref(c_int(1)), self._np_iparm, byref(c_int(0)), byref(c_int(0)), byref(c_int(0)), np_error) if error[0] == -10: raise Exception('Error freeing solver memory') def mkl_splu(A, perm=None, verbose=False, **kwargs): """ Returns the LU factorization of the sparse matrix A. Parameters ---------- A : csr_matrix Sparse input matrix. perm : ndarray (optional) User defined matrix factorization permutation. verbose : bool {False, True} Report factorization details. Returns ------- lu : mkl_lu Returns object containing LU factorization with a solve method for solving with a given RHS vector. """ if not sp.isspmatrix_csr(A): raise TypeError('Input matrix must be in sparse CSR format.') if A.shape[0] != A.shape[1]: raise Exception('Input matrix must be square') dim = A.shape[0] solver_args = _default_solver_args() for key in kwargs.keys(): if key in solver_args.keys(): solver_args[key] = kwargs[key] else: raise Exception( "Invalid keyword argument '"+key+"' passed to mkl_splu.") # If hermitian, then take upper-triangle of matrix only if solver_args['hermitian']: B = sp.triu(A, format='csr') A = B #This gets around making a full copy of A in triu if A.dtype == np.complex128: is_complex = 1 data_type = np.complex128 else: is_complex = 0 data_type = np.float64 if A.dtype != np.float64: A = sp.csr_matrix(A, dtype=np.float64, copy=False) #Create pointer to internal memory pt = np.zeros(64,dtype=int) np_pt = pt.ctypes.data_as(ctypeslib.ndpointer(int, ndim=1, flags='C')) # Create pointers to sparse matrix arrays data = A.data.ctypes.data_as(ctypeslib.ndpointer(data_type, ndim=1, flags='C')) indptr = A.indptr.ctypes.data_as(ctypeslib.ndpointer(np.int32, ndim=1, flags='C')) indices = A.indices.ctypes.data_as(ctypeslib.ndpointer(np.int32, ndim=1, flags='C')) nnz = A.nnz # Setup perm array if perm is None: perm = np.zeros(dim, dtype=np.int32) has_perm = 0 else: has_perm = 1 np_perm = perm.ctypes.data_as(ctypeslib.ndpointer(np.int32, ndim=1, flags='C')) # setup iparm iparm = _pardiso_parameters(solver_args['hermitian'], has_perm, solver_args['max_iter_refine'], solver_args['scaling_vectors'], solver_args['weighted_matching']) np_iparm = iparm.ctypes.data_as(ctypeslib.ndpointer(np.int32, ndim=1, flags='C')) # setup call parameters matrix_dtype = '' matrix_type = '' if data_type == np.complex128: matrix_dtype = 'Complex ' if solver_args['hermitian']: if solver_args['posdef']: mtype = 4 matrix_type = 'Hermitian postive-definite' else: mtype = -4 matrix_type = 'Hermitian indefinite' else: mtype = 13 matrix_type = 'Non-symmetric' else: matrix_dtype = 'Real ' if solver_args['hermitian']: if solver_args['posdef']: mtype = 2 matrix_type = 'symmetric postive-definite' else: mtype = -2 matrix_type = 'symmetric indefinite' else: mtype = 11 matrix_type = 'Non-symmetric' if verbose: print('Solver Initialization') print('---------------------') print('Input matrix type: ', matrix_dtype+matrix_type) print('Input matrix shape:', A.shape) print('Input matrix NNZ: ', A.nnz) print() b = np.zeros(1, dtype=data_type) # Input dummy RHS at this phase np_b = b.ctypes.data_as(ctypeslib.ndpointer(data_type, ndim=1, flags='C')) x = np.zeros(1, dtype=data_type) # Input dummy solution at this phase np_x = x.ctypes.data_as(ctypeslib.ndpointer(data_type, ndim=1, flags='C')) error = np.zeros(1,dtype=np.int32) np_error = error.ctypes.data_as(ctypeslib.ndpointer(np.int32, ndim=1, flags='C')) #Call solver _factor_start = time.time() pardiso(np_pt, byref(c_int(1)), byref(c_int(1)), byref(c_int(mtype)), byref(c_int(12)), byref(c_int(dim)), data, indptr, indices, np_perm, byref(c_int(1)), np_iparm, byref(c_int(0)), np_b, np_x, np_error) _factor_time = time.time() - _factor_start if error[0] != 0: raise Exception(pardiso_error_msgs[str(error[0])]) if verbose: print('Analysis and Factorization Stage') print('--------------------------------') print('Factorization time: ',round(_factor_time,4)) print('Factorization memory (Mb):',round(iparm[15]/1024.,4)) print('NNZ in LU factors: ',iparm[17]) print() return mkl_lu(np_pt, dim, is_complex, data, indptr, indices, iparm, np_iparm, mtype, perm, np_perm, _factor_time) def mkl_spsolve(A, b, perm=None, verbose=False, **kwargs): """ Solves a sparse linear system of equations using the Intel MKL Pardiso solver. Parameters ---------- A : csr_matrix Sparse matrix. b : ndarray or sparse matrix The vector or matrix representing the right hand side of the equation. If a vector, b.shape must be (n,) or (n, 1). perm : ndarray (optional) User defined matrix factorization permutation. Returns ------- x : ndarray or csr_matrix The solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[1] If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1]) """ lu = mkl_splu(A, perm=perm, verbose=verbose, **kwargs) b_is_sparse = sp.isspmatrix(b) b_shp = b.shape if b_is_sparse and b.shape[1] == 1: b = b.toarray() b_is_sparse = False elif b_is_sparse and b.shape[1] != 1: nrhs = b.shape[1] if lu._is_complex: b = sp.csc_matrix(b, dtype=np.complex128, copy=False) else: b = sp.csc_matrix(b, dtype=np.float64, copy=False) # Do dense RHS solving if not b_is_sparse: x = lu.solve(b, verbose=verbose) # Solve each RHS vec individually and convert to sparse else: data_segs = [] row_segs = [] col_segs = [] for j in range(nrhs): bj = b[:, j].A.ravel() xj = lu.solve(bj) w = np.flatnonzero(xj) segment_length = w.shape[0] row_segs.append(w) col_segs.append(np.ones(segment_length, dtype=np.int32)*j) data_segs.append(np.asarray(xj[w], dtype=xj.dtype)) sp_data = np.concatenate(data_segs) sp_row = np.concatenate(row_segs) sp_col = np.concatenate(col_segs) x = sp.coo_matrix((sp_data,(sp_row,sp_col)), shape=b_shp).tocsr() info = lu.info() lu.delete() if 'return_info' in kwargs.keys() and kwargs['return_info'] == True: return x, info else: return x qutip-4.4.1/qutip/_mkl/utilities.py000066400000000000000000000064451352460343600173520ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np import os, sys from qutip.utilities import _blas_info import qutip.settings as qset from ctypes import cdll def _set_mkl(): """ Finds the MKL runtime library for the Anaconda and Intel Python distributions. """ if _blas_info() == 'INTEL MKL': plat = sys.platform python_dir = os.path.dirname(sys.executable) if plat in ['darwin','linux2', 'linux']: python_dir = os.path.dirname(python_dir) if plat == 'darwin': lib = '/libmkl_rt.dylib' elif plat == 'win32': lib = '\\mkl_rt.dll' elif plat in ['linux2', 'linux']: lib = '/libmkl_rt.so' else: raise Exception('Unknown platfrom.') if plat in ['darwin','linux2', 'linux']: lib_dir = '/lib' else: lib_dir = '\Library\\bin' # Try in default Anaconda location first try: qset.mkl_lib = cdll.LoadLibrary(python_dir+lib_dir+lib) qset.has_mkl = True except: pass # Look in Intel Python distro location if not qset.has_mkl: if plat in ['darwin','linux2', 'linux']: lib_dir = '/ext/lib' else: lib_dir = '\ext\\lib' try: qset.mkl_lib = cdll.LoadLibrary(python_dir+lib_dir+lib) qset.has_mkl = True except: pass else: pass if __name__ == "__main__": _set_mkl() print(qset.has_mkl) qutip-4.4.1/qutip/about.py000066400000000000000000000102511352460343600155150ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ Command line output of information on QuTiP and dependencies. """ __all__ = ['about'] import sys import os import platform import numpy import scipy import inspect from qutip.utilities import _blas_info import qutip.settings from qutip.hardware_info import hardware_info def about(): """ About box for QuTiP. Gives version numbers for QuTiP, NumPy, SciPy, Cython, and MatPlotLib. """ print("") print("QuTiP: Quantum Toolbox in Python") print("================================") print("Copyright (c) QuTiP team 2011 and later.") print("Original developers: R. J. Johansson & P. D. Nation.") print("Current admin team: Alexander Pitchford, Paul D. Nation, " "Nathan Shammah, Shahnawaz Ahmed, " "Neill Lambert, and Eric Giguère.") print("Project Manager: Franco Nori.") print("Currently developed through wide collaboration. " "See https://github.com/qutip for details.") print("") print("QuTiP Version: %s" % qutip.__version__) print("Numpy Version: %s" % numpy.__version__) print("Scipy Version: %s" % scipy.__version__) try: import Cython cython_ver = Cython.__version__ except: cython_ver = 'None' print("Cython Version: %s" % cython_ver) try: import matplotlib matplotlib_ver = matplotlib.__version__ except: matplotlib_ver = 'None' print("Matplotlib Version: %s" % matplotlib_ver) print("Python Version: %d.%d.%d" % sys.version_info[0:3]) print("Number of CPUs: %s" % hardware_info()['cpus']) print("BLAS Info: %s" % _blas_info()) print("OPENMP Installed: %s" % str(qutip.settings.has_openmp)) print("INTEL MKL Ext: %s" % str(qutip.settings.has_mkl)) print("Platform Info: %s (%s)" % (platform.system(), platform.machine())) qutip_install_path = os.path.dirname(inspect.getsourcefile(qutip)) print("Installation path: %s" % qutip_install_path) # citation longbar = "==============================================================" longbar += "================" cite_msg = "For your convenience a bibtex reference can be easily generated" cite_msg += " using `qutip.cite()`" print(longbar) print("Please cite QuTiP in your publication.") print(longbar) print(cite_msg) if __name__ == "__main__": about() qutip-4.4.1/qutip/bloch.py000066400000000000000000000650751352460343600155100ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['Bloch'] import os from numpy import (ndarray, array, linspace, pi, outer, cos, sin, ones, size, sqrt, real, mod, append, ceil, arange) from qutip.qobj import Qobj from qutip.expect import expect from qutip.operators import sigmax, sigmay, sigmaz try: import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.patches import FancyArrowPatch from mpl_toolkits.mplot3d import proj3d class Arrow3D(FancyArrowPatch): def __init__(self, xs, ys, zs, *args, **kwargs): FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs) self._verts3d = xs, ys, zs def draw(self, renderer): xs3d, ys3d, zs3d = self._verts3d xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M) self.set_positions((xs[0], ys[0]), (xs[1], ys[1])) FancyArrowPatch.draw(self, renderer) except: pass try: from IPython.display import display except: pass class Bloch(): """Class for plotting data on the Bloch sphere. Valid data can be either points, vectors, or qobj objects. Attributes ---------- axes : instance {None} User supplied Matplotlib axes for Bloch sphere animation. fig : instance {None} User supplied Matplotlib Figure instance for plotting Bloch sphere. font_color : str {'black'} Color of font used for Bloch sphere labels. font_size : int {20} Size of font used for Bloch sphere labels. frame_alpha : float {0.1} Sets transparency of Bloch sphere frame. frame_color : str {'gray'} Color of sphere wireframe. frame_width : int {1} Width of wireframe. point_color : list {["b","r","g","#CC6600"]} List of colors for Bloch sphere point markers to cycle through. i.e. By default, points 0 and 4 will both be blue ('b'). point_marker : list {["o","s","d","^"]} List of point marker shapes to cycle through. point_size : list {[25,32,35,45]} List of point marker sizes. Note, not all point markers look the same size when plotted! sphere_alpha : float {0.2} Transparency of Bloch sphere itself. sphere_color : str {'#FFDDDD'} Color of Bloch sphere. figsize : list {[7,7]} Figure size of Bloch sphere plot. Best to have both numbers the same; otherwise you will have a Bloch sphere that looks like a football. vector_color : list {["g","#CC6600","b","r"]} List of vector colors to cycle through. vector_width : int {5} Width of displayed vectors. vector_style : str {'-|>', 'simple', 'fancy', ''} Vector arrowhead style (from matplotlib's arrow style). vector_mutation : int {20} Width of vectors arrowhead. view : list {[-60,30]} Azimuthal and Elevation viewing angles. xlabel : list {["$x$",""]} List of strings corresponding to +x and -x axes labels, respectively. xlpos : list {[1.1,-1.1]} Positions of +x and -x labels respectively. ylabel : list {["$y$",""]} List of strings corresponding to +y and -y axes labels, respectively. ylpos : list {[1.2,-1.2]} Positions of +y and -y labels respectively. zlabel : list {[r'$\\left|0\\right>$',r'$\\left|1\\right>$']} List of strings corresponding to +z and -z axes labels, respectively. zlpos : list {[1.2,-1.2]} Positions of +z and -z labels respectively. """ def __init__(self, fig=None, axes=None, view=None, figsize=None, background=False): # Figure and axes self.fig = fig self.axes = axes # Background axes, default = False self.background = background # The size of the figure in inches, default = [5,5]. self.figsize = figsize if figsize else [5, 5] # Azimuthal and Elvation viewing angles, default = [-60,30]. self.view = view if view else [-60, 30] # Color of Bloch sphere, default = #FFDDDD self.sphere_color = '#FFDDDD' # Transparency of Bloch sphere, default = 0.2 self.sphere_alpha = 0.2 # Color of wireframe, default = 'gray' self.frame_color = 'gray' # Width of wireframe, default = 1 self.frame_width = 1 # Transparency of wireframe, default = 0.2 self.frame_alpha = 0.2 # Labels for x-axis (in LaTex), default = ['$x$', ''] self.xlabel = ['$x$', ''] # Position of x-axis labels, default = [1.2, -1.2] self.xlpos = [1.2, -1.2] # Labels for y-axis (in LaTex), default = ['$y$', ''] self.ylabel = ['$y$', ''] # Position of y-axis labels, default = [1.1, -1.1] self.ylpos = [1.2, -1.2] # Labels for z-axis (in LaTex), # default = [r'$\left|0\right>$', r'$\left|1\right>$'] self.zlabel = [r'$\left|0\right>$', r'$\left|1\right>$'] # Position of z-axis labels, default = [1.2, -1.2] self.zlpos = [1.2, -1.2] # ---font options--- # Color of fonts, default = 'black' self.font_color = 'black' # Size of fonts, default = 20 self.font_size = 20 # ---vector options--- # List of colors for Bloch vectors, default = ['b','g','r','y'] self.vector_color = ['g', '#CC6600', 'b', 'r'] #: Width of Bloch vectors, default = 5 self.vector_width = 3 #: Style of Bloch vectors, default = '-|>' (or 'simple') self.vector_style = '-|>' #: Sets the width of the vectors arrowhead self.vector_mutation = 20 # ---point options--- # List of colors for Bloch point markers, default = ['b','g','r','y'] self.point_color = ['b', 'r', 'g', '#CC6600'] # Size of point markers, default = 25 self.point_size = [25, 32, 35, 45] # Shape of point markers, default = ['o','^','d','s'] self.point_marker = ['o', 's', 'd', '^'] # ---data lists--- # Data for point markers self.points = [] # Data for Bloch vectors self.vectors = [] # Data for annotations self.annotations = [] # Number of times sphere has been saved self.savenum = 0 # Style of points, 'm' for multiple colors, 's' for single color self.point_style = [] # status of rendering self._rendered = False # status of showing if fig is None: self._shown = False else: self._shown = True def set_label_convention(self, convention): """Set x, y and z labels according to one of conventions. Parameters ---------- convention : string One of the following: - "original" - "xyz" - "sx sy sz" - "01" - "polarization jones" - "polarization jones letters" see also: http://en.wikipedia.org/wiki/Jones_calculus - "polarization stokes" see also: http://en.wikipedia.org/wiki/Stokes_parameters """ ketex = "$\\left.|%s\\right\\rangle$" # \left.| is on purpose, so that every ket has the same size if convention == "original": self.xlabel = ['$x$', ''] self.ylabel = ['$y$', ''] self.zlabel = ['$\\left|0\\right>$', '$\\left|1\\right>$'] elif convention == "xyz": self.xlabel = ['$x$', ''] self.ylabel = ['$y$', ''] self.zlabel = ['$z$', ''] elif convention == "sx sy sz": self.xlabel = ['$s_x$', ''] self.ylabel = ['$s_y$', ''] self.zlabel = ['$s_z$', ''] elif convention == "01": self.xlabel = ['', ''] self.ylabel = ['', ''] self.zlabel = ['$\\left|0\\right>$', '$\\left|1\\right>$'] elif convention == "polarization jones": self.xlabel = [ketex % "\\nearrow\\hspace{-1.46}\\swarrow", ketex % "\\nwarrow\\hspace{-1.46}\\searrow"] self.ylabel = [ketex % "\\circlearrowleft", ketex % "\\circlearrowright"] self.zlabel = [ketex % "\\leftrightarrow", ketex % "\\updownarrow"] elif convention == "polarization jones letters": self.xlabel = [ketex % "D", ketex % "A"] self.ylabel = [ketex % "L", ketex % "R"] self.zlabel = [ketex % "H", ketex % "V"] elif convention == "polarization stokes": self.ylabel = ["$\\nearrow\\hspace{-1.46}\\swarrow$", "$\\nwarrow\\hspace{-1.46}\\searrow$"] self.zlabel = ["$\\circlearrowleft$", "$\\circlearrowright$"] self.xlabel = ["$\\leftrightarrow$", "$\\updownarrow$"] else: raise Exception("No such convention.") def __str__(self): s = "" s += "Bloch data:\n" s += "-----------\n" s += "Number of points: " + str(len(self.points)) + "\n" s += "Number of vectors: " + str(len(self.vectors)) + "\n" s += "\n" s += "Bloch sphere properties:\n" s += "------------------------\n" s += "font_color: " + str(self.font_color) + "\n" s += "font_size: " + str(self.font_size) + "\n" s += "frame_alpha: " + str(self.frame_alpha) + "\n" s += "frame_color: " + str(self.frame_color) + "\n" s += "frame_width: " + str(self.frame_width) + "\n" s += "point_color: " + str(self.point_color) + "\n" s += "point_marker: " + str(self.point_marker) + "\n" s += "point_size: " + str(self.point_size) + "\n" s += "sphere_alpha: " + str(self.sphere_alpha) + "\n" s += "sphere_color: " + str(self.sphere_color) + "\n" s += "figsize: " + str(self.figsize) + "\n" s += "vector_color: " + str(self.vector_color) + "\n" s += "vector_width: " + str(self.vector_width) + "\n" s += "vector_style: " + str(self.vector_style) + "\n" s += "vector_mutation: " + str(self.vector_mutation) + "\n" s += "view: " + str(self.view) + "\n" s += "xlabel: " + str(self.xlabel) + "\n" s += "xlpos: " + str(self.xlpos) + "\n" s += "ylabel: " + str(self.ylabel) + "\n" s += "ylpos: " + str(self.ylpos) + "\n" s += "zlabel: " + str(self.zlabel) + "\n" s += "zlpos: " + str(self.zlpos) + "\n" return s def _repr_png_(self): from IPython.core.pylabtools import print_figure self.render() fig_data = print_figure(self.fig, 'png') plt.close(self.fig) return fig_data def _repr_svg_(self): from IPython.core.pylabtools import print_figure self.render() fig_data = print_figure(self.fig, 'svg').decode('utf-8') plt.close(self.fig) return fig_data def clear(self): """Resets Bloch sphere data sets to empty. """ self.points = [] self.vectors = [] self.point_style = [] self.annotations = [] def add_points(self, points, meth='s'): """Add a list of data points to bloch sphere. Parameters ---------- points : array/list Collection of data points. meth : str {'s', 'm', 'l'} Type of points to plot, use 'm' for multicolored, 'l' for points connected with a line. """ if not isinstance(points[0], (list, ndarray)): points = [[points[0]], [points[1]], [points[2]]] points = array(points) if meth == 's': if len(points[0]) == 1: pnts = array([[points[0][0]], [points[1][0]], [points[2][0]]]) pnts = append(pnts, points, axis=1) else: pnts = points self.points.append(pnts) self.point_style.append('s') elif meth == 'l': self.points.append(points) self.point_style.append('l') else: self.points.append(points) self.point_style.append('m') def add_states(self, state, kind='vector'): """Add a state vector Qobj to Bloch sphere. Parameters ---------- state : qobj Input state vector. kind : str {'vector','point'} Type of object to plot. """ if isinstance(state, Qobj): state = [state] for st in state: vec = [expect(sigmax(), st), expect(sigmay(), st), expect(sigmaz(), st)] if kind == 'vector': self.add_vectors(vec) elif kind == 'point': self.add_points(vec) def add_vectors(self, vectors): """Add a list of vectors to Bloch sphere. Parameters ---------- vectors : array_like Array with vectors of unit length or smaller. """ if isinstance(vectors[0], (list, ndarray)): for vec in vectors: self.vectors.append(vec) else: self.vectors.append(vectors) def add_annotation(self, state_or_vector, text, **kwargs): """Add a text or LaTeX annotation to Bloch sphere, parametrized by a qubit state or a vector. Parameters ---------- state_or_vector : Qobj/array/list/tuple Position for the annotaion. Qobj of a qubit or a vector of 3 elements. text : str/unicode Annotation text. You can use LaTeX, but remember to use raw string e.g. r"$\\langle x \\rangle$" or escape backslashes e.g. "$\\\\langle x \\\\rangle$". **kwargs : Options as for mplot3d.axes3d.text, including: fontsize, color, horizontalalignment, verticalalignment. """ if isinstance(state_or_vector, Qobj): vec = [expect(sigmax(), state_or_vector), expect(sigmay(), state_or_vector), expect(sigmaz(), state_or_vector)] elif isinstance(state_or_vector, (list, ndarray, tuple)) \ and len(state_or_vector) == 3: vec = state_or_vector else: raise Exception("Position needs to be specified by a qubit " + "state or a 3D vector.") self.annotations.append({'position': vec, 'text': text, 'opts': kwargs}) def make_sphere(self): """ Plots Bloch sphere and data sets. """ self.render(self.fig, self.axes) def run_from_ipython(self): try: __IPYTHON__ return True except NameError: return False def render(self, fig=None, axes=None): """ Render the Bloch sphere and its data sets in on given figure and axes. """ if self._rendered: self.axes.clear() self._rendered = True # Figure instance for Bloch sphere plot if not fig: self.fig = plt.figure(figsize=self.figsize) if not axes: self.axes = Axes3D(self.fig, azim=self.view[0], elev=self.view[1]) if self.background: self.axes.clear() self.axes.set_xlim3d(-1.3, 1.3) self.axes.set_ylim3d(-1.3, 1.3) self.axes.set_zlim3d(-1.3, 1.3) else: self.plot_axes() self.axes.set_axis_off() self.axes.set_xlim3d(-0.7, 0.7) self.axes.set_ylim3d(-0.7, 0.7) self.axes.set_zlim3d(-0.7, 0.7) self.axes.grid(False) self.plot_back() self.plot_points() self.plot_vectors() self.plot_front() self.plot_axes_labels() self.plot_annotations() def plot_back(self): # back half of sphere u = linspace(0, pi, 25) v = linspace(0, pi, 25) x = outer(cos(u), sin(v)) y = outer(sin(u), sin(v)) z = outer(ones(size(u)), cos(v)) self.axes.plot_surface(x, y, z, rstride=2, cstride=2, color=self.sphere_color, linewidth=0, alpha=self.sphere_alpha) # wireframe self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5, color=self.frame_color, alpha=self.frame_alpha) # equator self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='z', lw=self.frame_width, color=self.frame_color) self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='x', lw=self.frame_width, color=self.frame_color) def plot_front(self): # front half of sphere u = linspace(-pi, 0, 25) v = linspace(0, pi, 25) x = outer(cos(u), sin(v)) y = outer(sin(u), sin(v)) z = outer(ones(size(u)), cos(v)) self.axes.plot_surface(x, y, z, rstride=2, cstride=2, color=self.sphere_color, linewidth=0, alpha=self.sphere_alpha) # wireframe self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5, color=self.frame_color, alpha=self.frame_alpha) # equator self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='z', lw=self.frame_width, color=self.frame_color) self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='x', lw=self.frame_width, color=self.frame_color) def plot_axes(self): # axes span = linspace(-1.0, 1.0, 2) self.axes.plot(span, 0 * span, zs=0, zdir='z', label='X', lw=self.frame_width, color=self.frame_color) self.axes.plot(0 * span, span, zs=0, zdir='z', label='Y', lw=self.frame_width, color=self.frame_color) self.axes.plot(0 * span, span, zs=0, zdir='y', label='Z', lw=self.frame_width, color=self.frame_color) def plot_axes_labels(self): # axes labels opts = {'fontsize': self.font_size, 'color': self.font_color, 'horizontalalignment': 'center', 'verticalalignment': 'center'} self.axes.text(0, -self.xlpos[0], 0, self.xlabel[0], **opts) self.axes.text(0, -self.xlpos[1], 0, self.xlabel[1], **opts) self.axes.text(self.ylpos[0], 0, 0, self.ylabel[0], **opts) self.axes.text(self.ylpos[1], 0, 0, self.ylabel[1], **opts) self.axes.text(0, 0, self.zlpos[0], self.zlabel[0], **opts) self.axes.text(0, 0, self.zlpos[1], self.zlabel[1], **opts) for a in (self.axes.w_xaxis.get_ticklines() + self.axes.w_xaxis.get_ticklabels()): a.set_visible(False) for a in (self.axes.w_yaxis.get_ticklines() + self.axes.w_yaxis.get_ticklabels()): a.set_visible(False) for a in (self.axes.w_zaxis.get_ticklines() + self.axes.w_zaxis.get_ticklabels()): a.set_visible(False) def plot_vectors(self): # -X and Y data are switched for plotting purposes for k in range(len(self.vectors)): xs3d = self.vectors[k][1] * array([0, 1]) ys3d = -self.vectors[k][0] * array([0, 1]) zs3d = self.vectors[k][2] * array([0, 1]) color = self.vector_color[mod(k, len(self.vector_color))] if self.vector_style == '': # simple line style self.axes.plot(xs3d, ys3d, zs3d, zs=0, zdir='z', label='Z', lw=self.vector_width, color=color) else: # decorated style, with arrow heads a = Arrow3D(xs3d, ys3d, zs3d, mutation_scale=self.vector_mutation, lw=self.vector_width, arrowstyle=self.vector_style, color=color) self.axes.add_artist(a) def plot_points(self): # -X and Y data are switched for plotting purposes for k in range(len(self.points)): num = len(self.points[k][0]) dist = [sqrt(self.points[k][0][j] ** 2 + self.points[k][1][j] ** 2 + self.points[k][2][j] ** 2) for j in range(num)] if any(abs(dist - dist[0]) / dist[0] > 1e-12): # combine arrays so that they can be sorted together zipped = list(zip(dist, range(num))) zipped.sort() # sort rates from lowest to highest dist, indperm = zip(*zipped) indperm = array(indperm) else: indperm = arange(num) if self.point_style[k] == 's': self.axes.scatter( real(self.points[k][1][indperm]), - real(self.points[k][0][indperm]), real(self.points[k][2][indperm]), s=self.point_size[mod(k, len(self.point_size))], alpha=1, edgecolor='none', zdir='z', color=self.point_color[mod(k, len(self.point_color))], marker=self.point_marker[mod(k, len(self.point_marker))]) elif self.point_style[k] == 'm': pnt_colors = array(self.point_color * int(ceil(num / float(len(self.point_color))))) pnt_colors = pnt_colors[0:num] pnt_colors = list(pnt_colors[indperm]) marker = self.point_marker[mod(k, len(self.point_marker))] s = self.point_size[mod(k, len(self.point_size))] self.axes.scatter(real(self.points[k][1][indperm]), -real(self.points[k][0][indperm]), real(self.points[k][2][indperm]), s=s, alpha=1, edgecolor='none', zdir='z', color=pnt_colors, marker=marker) elif self.point_style[k] == 'l': color = self.point_color[mod(k, len(self.point_color))] self.axes.plot(real(self.points[k][1]), -real(self.points[k][0]), real(self.points[k][2]), alpha=0.75, zdir='z', color=color) def plot_annotations(self): # -X and Y data are switched for plotting purposes for annotation in self.annotations: vec = annotation['position'] opts = {'fontsize': self.font_size, 'color': self.font_color, 'horizontalalignment': 'center', 'verticalalignment': 'center'} opts.update(annotation['opts']) self.axes.text(vec[1], -vec[0], vec[2], annotation['text'], **opts) def show(self): """ Display Bloch sphere and corresponding data sets. """ self.render(self.fig, self.axes) if self.run_from_ipython(): if self._shown: display(self.fig) else: self.fig.show() self._shown = True def save(self, name=None, format='png', dirc=None): """Saves Bloch sphere to file of type ``format`` in directory ``dirc``. Parameters ---------- name : str Name of saved image. Must include path and format as well. i.e. '/Users/Paul/Desktop/bloch.png' This overrides the 'format' and 'dirc' arguments. format : str Format of output image. dirc : str Directory for output images. Defaults to current working directory. Returns ------- File containing plot of Bloch sphere. """ self.render(self.fig, self.axes) if dirc: if not os.path.isdir(os.getcwd() + "/" + str(dirc)): os.makedirs(os.getcwd() + "/" + str(dirc)) if name is None: if dirc: self.fig.savefig(os.getcwd() + "/" + str(dirc) + '/bloch_' + str(self.savenum) + '.' + format) else: self.fig.savefig(os.getcwd() + '/bloch_' + str(self.savenum) + '.' + format) else: self.fig.savefig(name) self.savenum += 1 if self.fig: plt.close(self.fig) def _hide_tick_lines_and_labels(axis): ''' Set visible property of ticklines and ticklabels of an axis to False ''' for a in axis.get_ticklines() + axis.get_ticklabels(): a.set_visible(False) qutip-4.4.1/qutip/bloch3d.py000066400000000000000000000523441352460343600157320ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['Bloch3d'] import numpy as np from qutip.qobj import Qobj from qutip.expect import expect from qutip.operators import sigmax, sigmay, sigmaz class Bloch3d(): """Class for plotting data on a 3D Bloch sphere using mayavi. Valid data can be either points, vectors, or qobj objects corresponding to state vectors or density matrices. for a two-state system (or subsystem). Attributes ---------- fig : instance {None} User supplied Matplotlib Figure instance for plotting Bloch sphere. font_color : str {'black'} Color of font used for Bloch sphere labels. font_scale : float {0.08} Scale for font used for Bloch sphere labels. frame : bool {True} Draw frame for Bloch sphere frame_alpha : float {0.05} Sets transparency of Bloch sphere frame. frame_color : str {'gray'} Color of sphere wireframe. frame_num : int {8} Number of frame elements to draw. frame_radius : floats {0.005} Width of wireframe. point_color : list {['r', 'g', 'b', 'y']} List of colors for Bloch sphere point markers to cycle through. i.e. By default, points 0 and 4 will both be blue ('r'). point_mode : string {'sphere','cone','cube','cylinder','point'} Point marker shapes. point_size : float {0.075} Size of points on Bloch sphere. sphere_alpha : float {0.1} Transparency of Bloch sphere itself. sphere_color : str {'#808080'} Color of Bloch sphere. size : list {[500,500]} Size of Bloch sphere plot in pixels. Best to have both numbers the same otherwise you will have a Bloch sphere that looks like a football. vector_color : list {['r', 'g', 'b', 'y']} List of vector colors to cycle through. vector_width : int {3} Width of displayed vectors. view : list {[45,65]} Azimuthal and Elevation viewing angles. xlabel : list {['|x>', '']} List of strings corresponding to +x and -x axes labels, respectively. xlpos : list {[1.07,-1.07]} Positions of +x and -x labels respectively. ylabel : list {['|y>', '']} List of strings corresponding to +y and -y axes labels, respectively. ylpos : list {[1.07,-1.07]} Positions of +y and -y labels respectively. zlabel : list {['|0>', '|1>']} List of strings corresponding to +z and -z axes labels, respectively. zlpos : list {[1.07,-1.07]} Positions of +z and -z labels respectively. Notes ----- The use of mayavi for 3D rendering of the Bloch sphere comes with a few limitations: I) You can not embed a Bloch3d figure into a matplotlib window. II) The use of LaTex is not supported by the mayavi rendering engine. Therefore all labels must be defined using standard text. Of course you can post-process the generated figures later to add LaTeX using other software if needed. """ def __init__(self, fig=None): # ----check for mayavi----- try: from mayavi import mlab except: raise Exception("This function requires the mayavi module.") # ---Image options--- self.fig = None self.user_fig = None # check if user specified figure or axes. if fig: self.user_fig = fig # The size of the figure in inches, default = [500,500]. self.size = [500, 500] # Azimuthal and Elvation viewing angles, default = [45,65]. self.view = [45, 65] # Image background color self.bgcolor = 'white' # Image foreground color. Other options can override. self.fgcolor = 'black' # ---Sphere options--- # Color of Bloch sphere, default = #808080 self.sphere_color = '#808080' # Transparency of Bloch sphere, default = 0.1 self.sphere_alpha = 0.1 # ---Frame options--- # Draw frame? self.frame = True # number of lines to draw for frame self.frame_num = 8 # Color of wireframe, default = 'gray' self.frame_color = 'black' # Transparency of wireframe, default = 0.2 self.frame_alpha = 0.05 # Radius of frame lines self.frame_radius = 0.005 # --Axes--- # Axes color self.axes_color = 'black' # Transparency of axes self.axes_alpha = 0.4 # Radius of axes lines self.axes_radius = 0.005 # ---Labels--- # Labels for x-axis (in LaTex), default = ['$x$',''] self.xlabel = ['|x>', ''] # Position of x-axis labels, default = [1.2,-1.2] self.xlpos = [1.07, -1.07] # Labels for y-axis (in LaTex), default = ['$y$',''] self.ylabel = ['|y>', ''] # Position of y-axis labels, default = [1.1,-1.1] self.ylpos = [1.07, -1.07] # Labels for z-axis self.zlabel = ['|0>', '|1>'] # Position of z-axis labels, default = [1.05,-1.05] self.zlpos = [1.07, -1.07] # ---Font options--- # Color of fonts, default = 'black' self.font_color = 'black' # Size of fonts, default = 20 self.font_scale = 0.08 # ---Vector options--- # Object used for representing vectors on Bloch sphere. # List of colors for Bloch vectors, default = ['b','g','r','y'] self.vector_color = ['r', 'g', 'b', 'y'] # Transparency of vectors self.vector_alpha = 1.0 # Width of Bloch vectors, default = 2 self.vector_width = 2.0 # Height of vector head self.vector_head_height = 0.15 # Radius of vector head self.vector_head_radius = 0.075 # ---Point options--- # List of colors for Bloch point markers, default = ['b','g','r','y'] self.point_color = ['r', 'g', 'b', 'y'] # Size of point markers self.point_size = 0.06 # Shape of point markers # Options: 'cone' or 'cube' or 'cylinder' or 'point' or 'sphere'. # Default = 'sphere' self.point_mode = 'sphere' # ---Data lists--- # Data for point markers self.points = [] # Data for Bloch vectors self.vectors = [] # Number of times sphere has been saved self.savenum = 0 # Style of points, 'm' for multiple colors, 's' for single color self.point_style = [] def __str__(self): s = "" s += "Bloch3D data:\n" s += "-----------\n" s += "Number of points: " + str(len(self.points)) + "\n" s += "Number of vectors: " + str(len(self.vectors)) + "\n" s += "\n" s += "Bloch3D sphere properties:\n" s += "--------------------------\n" s += "axes_alpha: " + str(self.axes_alpha) + "\n" s += "axes_color: " + str(self.axes_color) + "\n" s += "axes_radius: " + str(self.axes_radius) + "\n" s += "bgcolor: " + str(self.bgcolor) + "\n" s += "fgcolor: " + str(self.fgcolor) + "\n" s += "font_color: " + str(self.font_color) + "\n" s += "font_scale: " + str(self.font_scale) + "\n" s += "frame: " + str(self.frame) + "\n" s += "frame_alpha: " + str(self.frame_alpha) + "\n" s += "frame_color: " + str(self.frame_color) + "\n" s += "frame_num: " + str(self.frame_num) + "\n" s += "frame_radius: " + str(self.frame_radius) + "\n" s += "point_color: " + str(self.point_color) + "\n" s += "point_mode: " + str(self.point_mode) + "\n" s += "point_size: " + str(self.point_size) + "\n" s += "sphere_alpha: " + str(self.sphere_alpha) + "\n" s += "sphere_color: " + str(self.sphere_color) + "\n" s += "size: " + str(self.size) + "\n" s += "vector_alpha: " + str(self.vector_alpha) + "\n" s += "vector_color: " + str(self.vector_color) + "\n" s += "vector_width: " + str(self.vector_width) + "\n" s += "vector_head_height: " + str(self.vector_head_height) + "\n" s += "vector_head_radius: " + str(self.vector_head_radius) + "\n" s += "view: " + str(self.view) + "\n" s += "xlabel: " + str(self.xlabel) + "\n" s += "xlpos: " + str(self.xlpos) + "\n" s += "ylabel: " + str(self.ylabel) + "\n" s += "ylpos: " + str(self.ylpos) + "\n" s += "zlabel: " + str(self.zlabel) + "\n" s += "zlpos: " + str(self.zlpos) + "\n" return s def clear(self): """Resets the Bloch sphere data sets to empty. """ self.points = [] self.vectors = [] self.point_style = [] def add_points(self, points, meth='s'): """Add a list of data points to bloch sphere. Parameters ---------- points : array/list Collection of data points. meth : str {'s','m'} Type of points to plot, use 'm' for multicolored. """ if not isinstance(points[0], (list, np.ndarray)): points = [[points[0]], [points[1]], [points[2]]] points = np.array(points) if meth == 's': if len(points[0]) == 1: pnts = np.array( [[points[0][0]], [points[1][0]], [points[2][0]]]) pnts = np.append(pnts, points, axis=1) else: pnts = points self.points.append(pnts) self.point_style.append('s') else: self.points.append(points) self.point_style.append('m') def add_states(self, state, kind='vector'): """Add a state vector Qobj to Bloch sphere. Parameters ---------- state : qobj Input state vector. kind : str {'vector','point'} Type of object to plot. """ if isinstance(state, Qobj): state = [state] for st in state: if kind == 'vector': vec = [expect(sigmax(), st), expect(sigmay(), st), expect(sigmaz(), st)] self.add_vectors(vec) elif kind == 'point': pnt = [expect(sigmax(), st), expect(sigmay(), st), expect(sigmaz(), st)] self.add_points(pnt) def add_vectors(self, vectors): """Add a list of vectors to Bloch sphere. Parameters ---------- vectors : array/list Array with vectors of unit length or smaller. """ if isinstance(vectors[0], (list, np.ndarray)): for vec in vectors: self.vectors.append(vec) else: self.vectors.append(vectors) def plot_vectors(self): """ Plots vectors on the Bloch sphere. """ from mayavi import mlab from tvtk.api import tvtk import matplotlib.colors as colors ii = 0 for k in range(len(self.vectors)): vec = np.array(self.vectors[k]) norm = np.linalg.norm(vec) theta = np.arccos(vec[2] / norm) phi = np.arctan2(vec[1], vec[0]) vec -= 0.5 * self.vector_head_height * \ np.array([np.sin(theta) * np.cos(phi), np.sin(theta) * np.sin(phi), np.cos(theta)]) color = colors.colorConverter.to_rgb( self.vector_color[np.mod(k, len(self.vector_color))]) mlab.plot3d([0, vec[0]], [0, vec[1]], [0, vec[2]], name='vector' + str(ii), tube_sides=100, line_width=self.vector_width, opacity=self.vector_alpha, color=color) cone = tvtk.ConeSource(height=self.vector_head_height, radius=self.vector_head_radius, resolution=100) cone_mapper = tvtk.PolyDataMapper(input=cone.output) prop = tvtk.Property(opacity=self.vector_alpha, color=color) cc = tvtk.Actor(mapper=cone_mapper, property=prop) cc.rotate_z(np.degrees(phi)) cc.rotate_y(-90 + np.degrees(theta)) cc.position = vec self.fig.scene.add_actor(cc) def plot_points(self): """ Plots points on the Bloch sphere. """ from mayavi import mlab import matplotlib.colors as colors for k in range(len(self.points)): num = len(self.points[k][0]) dist = [np.sqrt(self.points[k][0][j] ** 2 + self.points[k][1][j] ** 2 + self.points[k][2][j] ** 2) for j in range(num)] if any(abs(dist - dist[0]) / dist[0] > 1e-12): # combine arrays so that they can be sorted together zipped = zip(dist, range(num)) zipped.sort() # sort rates from lowest to highest dist, indperm = zip(*zipped) indperm = np.array(indperm) else: indperm = range(num) if self.point_style[k] == 's': color = colors.colorConverter.to_rgb( self.point_color[np.mod(k, len(self.point_color))]) mlab.points3d( self.points[k][0][indperm], self.points[k][1][indperm], self.points[k][2][indperm], figure=self.fig, resolution=100, scale_factor=self.point_size, mode=self.point_mode, color=color) elif self.point_style[k] == 'm': pnt_colors = np.array(self.point_color * np.ceil( num / float(len(self.point_color)))) pnt_colors = pnt_colors[0:num] pnt_colors = list(pnt_colors[indperm]) for kk in range(num): mlab.points3d( self.points[k][0][ indperm[kk]], self.points[k][1][indperm[kk]], self.points[k][2][ indperm[kk]], figure=self.fig, resolution=100, scale_factor=self.point_size, mode=self.point_mode, color=colors.colorConverter.to_rgb(pnt_colors[kk])) def make_sphere(self): """ Plots Bloch sphere and data sets. """ # setup plot # Figure instance for Bloch sphere plot from mayavi import mlab import matplotlib.colors as colors if self.user_fig: self.fig = self.user_fig else: self.fig = mlab.figure( 1, size=self.size, bgcolor=colors.colorConverter.to_rgb(self.bgcolor), fgcolor=colors.colorConverter.to_rgb(self.fgcolor)) sphere = mlab.points3d( 0, 0, 0, figure=self.fig, scale_mode='none', scale_factor=2, color=colors.colorConverter.to_rgb(self.sphere_color), resolution=100, opacity=self.sphere_alpha, name='bloch_sphere') # Thse commands make the sphere look better sphere.actor.property.specular = 0.45 sphere.actor.property.specular_power = 5 sphere.actor.property.backface_culling = True # make frame for sphere surface if self.frame: theta = np.linspace(0, 2 * np.pi, 100) for angle in np.linspace(-np.pi, np.pi, self.frame_num): xlat = np.cos(theta) * np.cos(angle) ylat = np.sin(theta) * np.cos(angle) zlat = np.ones_like(theta) * np.sin(angle) xlon = np.sin(angle) * np.sin(theta) ylon = np.cos(angle) * np.sin(theta) zlon = np.cos(theta) mlab.plot3d( xlat, ylat, zlat, color=colors.colorConverter.to_rgb(self.frame_color), opacity=self.frame_alpha, tube_radius=self.frame_radius) mlab.plot3d( xlon, ylon, zlon, color=colors.colorConverter.to_rgb(self.frame_color), opacity=self.frame_alpha, tube_radius=self.frame_radius) # add axes axis = np.linspace(-1.0, 1.0, 10) other = np.zeros_like(axis) mlab.plot3d( axis, other, other, color=colors.colorConverter.to_rgb(self.axes_color), tube_radius=self.axes_radius, opacity=self.axes_alpha) mlab.plot3d( other, axis, other, color=colors.colorConverter.to_rgb(self.axes_color), tube_radius=self.axes_radius, opacity=self.axes_alpha) mlab.plot3d( other, other, axis, color=colors.colorConverter.to_rgb(self.axes_color), tube_radius=self.axes_radius, opacity=self.axes_alpha) # add data to sphere self.plot_points() self.plot_vectors() # #add labels mlab.text3d(0, 0, self.zlpos[0], self.zlabel[0], color=colors.colorConverter.to_rgb(self.font_color), scale=self.font_scale) mlab.text3d(0, 0, self.zlpos[1], self.zlabel[1], color=colors.colorConverter.to_rgb(self.font_color), scale=self.font_scale) mlab.text3d(self.xlpos[0], 0, 0, self.xlabel[0], color=colors.colorConverter.to_rgb(self.font_color), scale=self.font_scale) mlab.text3d(self.xlpos[1], 0, 0, self.xlabel[1], color=colors.colorConverter.to_rgb(self.font_color), scale=self.font_scale) mlab.text3d(0, self.ylpos[0], 0, self.ylabel[0], color=colors.colorConverter.to_rgb(self.font_color), scale=self.font_scale) mlab.text3d(0, self.ylpos[1], 0, self.ylabel[1], color=colors.colorConverter.to_rgb(self.font_color), scale=self.font_scale) def show(self): """ Display the Bloch sphere and corresponding data sets. """ from mayavi import mlab self.make_sphere() mlab.view(azimuth=self.view[0], elevation=self.view[1], distance=5) if self.fig: mlab.show() def save(self, name=None, format='png', dirc=None): """Saves Bloch sphere to file of type ``format`` in directory ``dirc``. Parameters ---------- name : str Name of saved image. Must include path and format as well. i.e. '/Users/Paul/Desktop/bloch.png' This overrides the 'format' and 'dirc' arguments. format : str Format of output image. Default is 'png'. dirc : str Directory for output images. Defaults to current working directory. Returns ------- File containing plot of Bloch sphere. """ from mayavi import mlab import os self.make_sphere() mlab.view(azimuth=self.view[0], elevation=self.view[1], distance=5) if dirc: if not os.path.isdir(os.getcwd() + "/" + str(dirc)): os.makedirs(os.getcwd() + "/" + str(dirc)) if name is None: if dirc: mlab.savefig(os.getcwd() + "/" + str(dirc) + '/bloch_' + str(self.savenum) + '.' + format) else: mlab.savefig(os.getcwd() + '/bloch_' + str(self.savenum) + '.' + format) else: mlab.savefig(name) self.savenum += 1 if self.fig: mlab.close(self.fig) qutip-4.4.1/qutip/bloch_redfield.py000066400000000000000000000525561352460343600173460ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, QuSTaR # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['brmesolve', 'bloch_redfield_solve'] import numpy as np import os import time import types import warnings from functools import partial import scipy.integrate import scipy.sparse as sp from qutip.qobj import Qobj, isket from qutip.states import ket2dm from qutip.operators import qdiags from qutip.superoperator import spre, spost, vec2mat, mat2vec, vec2mat_index from qutip.expect import expect from qutip.solver import Options, Result, config, _solver_safety_check from qutip.cy.spmatfuncs import cy_ode_rhs from qutip.cy.spconvert import dense2D_to_fastcsr_fmode from qutip.superoperator import liouvillian from qutip.interpolate import Cubic_Spline from qutip.cy.spconvert import arr_coo2fast from qutip.cy.br_codegen import BR_Codegen from qutip.ui.progressbar import BaseProgressBar, TextProgressBar from qutip.cy.utilities import _cython_build_cleanup from qutip.expect import expect_rho_vec from qutip.rhs_generate import _td_format_check from qutip.cy.openmp.utilities import check_use_openmp import qutip.settings as qset from qutip.cy.br_tensor import bloch_redfield_tensor # ----------------------------------------------------------------------------- # Solve the Bloch-Redfield master equation # def brmesolve(H, psi0, tlist, a_ops=[], e_ops=[], c_ops=[], args={}, use_secular=True, sec_cutoff = 0.1, tol=qset.atol, spectra_cb=None, options=None, progress_bar=None, _safe_mode=True, verbose=False): """ Solves for the dynamics of a system using the Bloch-Redfield master equation, given an input Hamiltonian, Hermitian bath-coupling terms and their associated spectrum functions, as well as possible Lindblad collapse operators. For time-independent systems, the Hamiltonian must be given as a Qobj, whereas the bath-coupling terms (a_ops), must be written as a nested list of operator - spectrum function pairs, where the frequency is specified by the `w` variable. *Example* a_ops = [[a+a.dag(),lambda w: 0.2*(w>=0)]] For time-dependent systems, the Hamiltonian, a_ops, and Lindblad collapse operators (c_ops), can be specified in the QuTiP string-based time-dependent format. For the a_op spectra, the frequency variable must be `w`, and the string cannot contain any other variables other than the possibility of having a time-dependence through the time variable `t`: *Example* a_ops = [[a+a.dag(), '0.2*exp(-t)*(w>=0)']] It is also possible to use Cubic_Spline objects for time-dependence. In the case of a_ops, Cubic_Splines must be passed as a tuple: *Example* a_ops = [ [a+a.dag(), ( f(w), g(t)] ] where f(w) and g(t) are strings or Cubic_spline objects for the bath spectrum and time-dependence, respectively. Finally, if one has bath-couplimg terms of the form H = f(t)*a + conj[f(t)]*a.dag(), then the correct input format is *Example* a_ops = [ [(a,a.dag()), (f(w), g1(t), g2(t))],... ] where f(w) is the spectrum of the operators while g1(t) and g2(t) are the time-dependence of the operators `a` and `a.dag()`, respectively Parameters ---------- H : Qobj / list System Hamiltonian given as a Qobj or nested list in string-based format. psi0: Qobj Initial density matrix or state vector (ket). tlist : array_like List of times for evaluating evolution a_ops : list Nested list of Hermitian system operators that couple to the bath degrees of freedom, along with their associated spectra. e_ops : list List of operators for which to evaluate expectation values. c_ops : list List of system collapse operators, or nested list in string-based format. args : dict Placeholder for future implementation, kept for API consistency. use_secular : bool {True} Use secular approximation when evaluating bath-coupling terms. sec_cutoff : float {0.1} Cutoff for secular approximation. tol : float {qutip.setttings.atol} Tolerance used for removing small values after basis transformation. spectra_cb : list DEPRECIATED. Do not use. options : :class:`qutip.solver.Options` Options for the solver. progress_bar : BaseProgressBar Optional instance of BaseProgressBar, or a subclass thereof, for showing the progress of the simulation. Returns ------- result: :class:`qutip.solver.Result` An instance of the class :class:`qutip.solver.Result`, which contains either an array of expectation values, for operators given in e_ops, or a list of states for the times specified by `tlist`. """ _prep_time = time.time() #This allows for passing a list of time-independent Qobj #as allowed by mesolve if isinstance(H, list): if np.all([isinstance(h,Qobj) for h in H]): H = sum(H) if isinstance(c_ops, Qobj): c_ops = [c_ops] if isinstance(e_ops, Qobj): e_ops = [e_ops] if isinstance(e_ops, dict): e_ops_dict = e_ops e_ops = [e for e in e_ops.values()] else: e_ops_dict = None if not (spectra_cb is None): warnings.warn("The use of spectra_cb is depreciated.", DeprecationWarning) _a_ops = [] for kk, a in enumerate(a_ops): _a_ops.append([a,spectra_cb[kk]]) a_ops = _a_ops if _safe_mode: _solver_safety_check(H, psi0, a_ops+c_ops, e_ops, args) # check for type (if any) of time-dependent inputs _, n_func, n_str = _td_format_check(H, a_ops+c_ops) if progress_bar is None: progress_bar = BaseProgressBar() elif progress_bar is True: progress_bar = TextProgressBar() if options is None: options = Options() if (not options.rhs_reuse) or (not config.tdfunc): # reset config collapse and time-dependence flags to default values config.reset() #check if should use OPENMP check_use_openmp(options) if n_str == 0: R, ekets = bloch_redfield_tensor(H, a_ops, spectra_cb=None, c_ops=c_ops, use_secular=use_secular, sec_cutoff=sec_cutoff) output = Result() output.solver = "brmesolve" output.times = tlist results = bloch_redfield_solve(R, ekets, psi0, tlist, e_ops, options, progress_bar=progress_bar) if e_ops: output.expect = results else: output.states = results return output elif n_str != 0 and n_func == 0: output = _td_brmesolve(H, psi0, tlist, a_ops=a_ops, e_ops=e_ops, c_ops=c_ops, args=args, use_secular=use_secular, sec_cutoff=sec_cutoff, tol=tol, options=options, progress_bar=progress_bar, _safe_mode=_safe_mode, verbose=verbose, _prep_time=_prep_time) return output else: raise Exception('Cannot mix func and str formats.') # ----------------------------------------------------------------------------- # Evolution of the Bloch-Redfield master equation given the Bloch-Redfield # tensor. # def bloch_redfield_solve(R, ekets, rho0, tlist, e_ops=[], options=None, progress_bar=None): """ Evolve the ODEs defined by Bloch-Redfield master equation. The Bloch-Redfield tensor can be calculated by the function :func:`bloch_redfield_tensor`. Parameters ---------- R : :class:`qutip.qobj` Bloch-Redfield tensor. ekets : array of :class:`qutip.qobj` Array of kets that make up a basis tranformation for the eigenbasis. rho0 : :class:`qutip.qobj` Initial density matrix. tlist : *list* / *array* List of times for :math:`t`. e_ops : list of :class:`qutip.qobj` / callback function List of operators for which to evaluate expectation values. options : :class:`qutip.Qdeoptions` Options for the ODE solver. Returns ------- output: :class:`qutip.solver` An instance of the class :class:`qutip.solver`, which contains either an *array* of expectation values for the times specified by `tlist`. """ if options is None: options = Options() if options.tidy: R.tidyup() if progress_bar is None: progress_bar = BaseProgressBar() elif progress_bar is True: progress_bar = TextProgressBar() # # check initial state # if isket(rho0): # Got a wave function as initial state: convert to density matrix. rho0 = rho0 * rho0.dag() # # prepare output array # n_tsteps = len(tlist) dt = tlist[1] - tlist[0] result_list = [] # # transform the initial density matrix and the e_ops opterators to the # eigenbasis # rho_eb = rho0.transform(ekets) e_eb_ops = [e.transform(ekets) for e in e_ops] for e_eb in e_eb_ops: if e_eb.isherm: result_list.append(np.zeros(n_tsteps, dtype=float)) else: result_list.append(np.zeros(n_tsteps, dtype=complex)) # # setup integrator # initial_vector = mat2vec(rho_eb.full()) r = scipy.integrate.ode(cy_ode_rhs) r.set_f_params(R.data.data, R.data.indices, R.data.indptr) r.set_integrator('zvode', method=options.method, order=options.order, atol=options.atol, rtol=options.rtol, nsteps=options.nsteps, first_step=options.first_step, min_step=options.min_step, max_step=options.max_step) r.set_initial_value(initial_vector, tlist[0]) # # start evolution # dt = np.diff(tlist) progress_bar.start(n_tsteps) for t_idx, _ in enumerate(tlist): progress_bar.update(t_idx) if not r.successful(): break rho_eb.data = dense2D_to_fastcsr_fmode(vec2mat(r.y), rho0.shape[0], rho0.shape[1]) # calculate all the expectation values, or output rho_eb if no # expectation value operators are given if e_ops: rho_eb_tmp = Qobj(rho_eb) for m, e in enumerate(e_eb_ops): result_list[m][t_idx] = expect(e, rho_eb_tmp) else: result_list.append(rho_eb.transform(ekets, True)) if t_idx < n_tsteps - 1: r.integrate(r.t + dt[t_idx]) progress_bar.finished() return result_list def _td_brmesolve(H, psi0, tlist, a_ops=[], e_ops=[], c_ops=[], args={}, use_secular=True, sec_cutoff=0.1, tol=qset.atol, options=None, progress_bar=None,_safe_mode=True, verbose=False, _prep_time=0): if isket(psi0): rho0 = ket2dm(psi0) else: rho0 = psi0 nrows = rho0.shape[0] H_terms = [] H_td_terms = [] H_obj = [] A_terms = [] A_td_terms = [] C_terms = [] C_td_terms = [] CA_obj = [] spline_count = [0,0] coupled_ops = [] coupled_lengths = [] coupled_spectra = [] if isinstance(H, Qobj): H_terms.append(H.full('f')) H_td_terms.append('1') else: for kk, h in enumerate(H): if isinstance(h, Qobj): H_terms.append(h.full('f')) H_td_terms.append('1') elif isinstance(h, list): H_terms.append(h[0].full('f')) if isinstance(h[1], Cubic_Spline): H_obj.append(h[1].coeffs) spline_count[0] += 1 H_td_terms.append(h[1]) else: raise Exception('Invalid Hamiltonian specification.') for kk, c in enumerate(c_ops): if isinstance(c, Qobj): C_terms.append(c.full('f')) C_td_terms.append('1') elif isinstance(c, list): C_terms.append(c[0].full('f')) if isinstance(c[1], Cubic_Spline): CA_obj.append(c[1].coeffs) spline_count[0] += 1 C_td_terms.append(c[1]) else: raise Exception('Invalid collapse operator specification.') coupled_offset = 0 for kk, a in enumerate(a_ops): if isinstance(a, list): if isinstance(a[0], Qobj): A_terms.append(a[0].full('f')) A_td_terms.append(a[1]) if isinstance(a[1], tuple): if not len(a[1])==2: raise Exception('Tuple must be len=2.') if isinstance(a[1][0],Cubic_Spline): spline_count[1] += 1 if isinstance(a[1][1],Cubic_Spline): spline_count[1] += 1 elif isinstance(a[0], tuple): if not isinstance(a[1], tuple): raise Exception('Invalid bath-coupling specification.') if (len(a[0])+1) != len(a[1]): raise Exception('BR a_ops tuple lengths not compatible.') coupled_ops.append(kk+coupled_offset) coupled_lengths.append(len(a[0])) coupled_spectra.append(a[1][0]) coupled_offset += len(a[0])-1 if isinstance(a[1][0],Cubic_Spline): spline_count[1] += 1 for nn, _a in enumerate(a[0]): A_terms.append(_a.full('f')) A_td_terms.append(a[1][nn+1]) if isinstance(a[1][nn+1],Cubic_Spline): CA_obj.append(a[1][nn+1].coeffs) spline_count[1] += 1 else: raise Exception('Invalid bath-coupling specification.') string_list = [] for kk,_ in enumerate(H_td_terms): string_list.append("H_terms[{0}]".format(kk)) for kk,_ in enumerate(H_obj): string_list.append("H_obj[{0}]".format(kk)) for kk,_ in enumerate(C_td_terms): string_list.append("C_terms[{0}]".format(kk)) for kk,_ in enumerate(CA_obj): string_list.append("CA_obj[{0}]".format(kk)) for kk,_ in enumerate(A_td_terms): string_list.append("A_terms[{0}]".format(kk)) #Add nrows to parameters string_list.append('nrows') for name, value in args.items(): if isinstance(value, np.ndarray): raise TypeError('NumPy arrays not valid args for BR solver.') else: string_list.append(str(value)) parameter_string = ",".join(string_list) if verbose: print('BR prep time:', time.time()-_prep_time) # # generate and compile new cython code if necessary # if not options.rhs_reuse or config.tdfunc is None: if options.rhs_filename is None: config.tdname = "rhs" + str(os.getpid()) + str(config.cgen_num) else: config.tdname = opt.rhs_filename if verbose: _st = time.time() cgen = BR_Codegen(h_terms=len(H_terms), h_td_terms=H_td_terms, h_obj=H_obj, c_terms=len(C_terms), c_td_terms=C_td_terms, c_obj=CA_obj, a_terms=len(A_terms), a_td_terms=A_td_terms, spline_count=spline_count, coupled_ops = coupled_ops, coupled_lengths = coupled_lengths, coupled_spectra = coupled_spectra, config=config, sparse=False, use_secular = use_secular, sec_cutoff = sec_cutoff, args=args, use_openmp=options.use_openmp, omp_thresh=qset.openmp_thresh if qset.has_openmp else None, omp_threads=options.num_cpus, atol=tol) cgen.generate(config.tdname + ".pyx") code = compile('from ' + config.tdname + ' import cy_td_ode_rhs', '', 'exec') exec(code, globals()) config.tdfunc = cy_td_ode_rhs if verbose: print('BR compile time:', time.time()-_st) initial_vector = mat2vec(rho0.full()).ravel() _ode = scipy.integrate.ode(config.tdfunc) code = compile('_ode.set_f_params(' + parameter_string + ')', '', 'exec') _ode.set_integrator('zvode', method=options.method, order=options.order, atol=options.atol, rtol=options.rtol, nsteps=options.nsteps, first_step=options.first_step, min_step=options.min_step, max_step=options.max_step) _ode.set_initial_value(initial_vector, tlist[0]) exec(code, locals()) # # prepare output array # n_tsteps = len(tlist) e_sops_data = [] output = Result() output.solver = "brmesolve" output.times = tlist if options.store_states: output.states = [] if isinstance(e_ops, types.FunctionType): n_expt_op = 0 expt_callback = True elif isinstance(e_ops, list): n_expt_op = len(e_ops) expt_callback = False if n_expt_op == 0: # fall back on storing states output.states = [] options.store_states = True else: output.expect = [] output.num_expect = n_expt_op for op in e_ops: e_sops_data.append(spre(op).data) if op.isherm: output.expect.append(np.zeros(n_tsteps)) else: output.expect.append(np.zeros(n_tsteps, dtype=complex)) else: raise TypeError("Expectation parameter must be a list or a function") # # start evolution # if type(progress_bar)==BaseProgressBar and verbose: _run_time = time.time() progress_bar.start(n_tsteps) rho = Qobj(rho0) dt = np.diff(tlist) for t_idx, t in enumerate(tlist): progress_bar.update(t_idx) if not _ode.successful(): raise Exception("ODE integration error: Try to increase " "the allowed number of substeps by increasing " "the nsteps parameter in the Options class.") if options.store_states or expt_callback: rho.data = dense2D_to_fastcsr_fmode(vec2mat(_ode.y), rho.shape[0], rho.shape[1]) if options.store_states: output.states.append(Qobj(rho, isherm=True)) if expt_callback: # use callback method e_ops(t, rho) for m in range(n_expt_op): if output.expect[m].dtype == complex: output.expect[m][t_idx] = expect_rho_vec(e_sops_data[m], _ode.y, 0) else: output.expect[m][t_idx] = expect_rho_vec(e_sops_data[m], _ode.y, 1) if t_idx < n_tsteps - 1: _ode.integrate(_ode.t + dt[t_idx]) progress_bar.finished() if type(progress_bar)==BaseProgressBar and verbose: print('BR runtime:', time.time()-_run_time) if (not options.rhs_reuse) and (config.tdname is not None): _cython_build_cleanup(config.tdname) if options.store_final_state: rho.data = dense2D_to_fastcsr_fmode(vec2mat(_ode.y), rho.shape[0], rho.shape[1]) output.final_state = Qobj(rho, dims=rho0.dims, isherm=True) return outputqutip-4.4.1/qutip/cite.py000066400000000000000000000073531352460343600153400ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ Citation generator for QuTiP """ import sys import os __all__ = ['cite'] def cite(save=False, path=None): """ Citation information and bibtex generator for QuTiP Parameters ---------- save: bool The flag specifying whether to save the .bib file. path: str The complete directory path to generate the bibtex file. If not specified then the citation will be generated in cwd """ citation = ["@article{qutip2,", "doi = {10.1016/j.cpc.2012.11.019},", "url = {https://doi.org/10.1016/j.cpc.2012.11.019},", "year = {2013},", "month = {apr},", "publisher = {Elsevier {BV}},", "volume = {184},", "number = {4},", "pages = {1234--1240},", "author = {J.R. Johansson and P.D. Nation and F. Nori},", "title = {{QuTiP} 2: A {P}ython framework for the dynamics of open quantum systems},", "journal = {Computer Physics Communications}", "}", "@article{qutip1,", "doi = {10.1016/j.cpc.2012.02.021},", "url = {https://doi.org/10.1016/j.cpc.2012.02.021},", "year = {2012},", "month = {aug},", "publisher = {Elsevier {BV}},", "volume = {183},", "number = {8},", "pages = {1760--1772},", "author = {J.R. Johansson and P.D. Nation and F. Nori},", "title = {{QuTiP}: An open-source {P}ython framework for the dynamics of open quantum systems},", "journal = {Computer Physics Communications}", "}"] print("\n".join(citation)) if not path: path = os.getcwd() if save: filename = "qutip.bib" with open(os.path.join(path, filename), 'w') as f: f.write("\n".join(citation)) if __name__ == "__main__": cite() qutip-4.4.1/qutip/configrc.py000066400000000000000000000127011352460343600161770ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, QuSTaR. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import os import qutip import qutip.settings as qset import warnings try: import ConfigParser as configparser #py27 except: import configparser #py3x def has_qutip_rc(): """ Checks to see if the qutiprc file exists in the default location, i.e. HOME/.qutip/qutiprc """ qutip_conf_dir = os.path.join(os.path.expanduser("~"), '.qutip') if os.path.exists(qutip_conf_dir): qutip_rc_file = os.path.join(qutip_conf_dir,'qutiprc') qrc_exists = os.path.isfile(qutip_rc_file) if qrc_exists: return True, qutip_rc_file else: return False, '' else: return False, '' def generate_qutiprc(): """ Generate a blank qutiprc file. """ # Check for write access to home dir if not os.access(os.path.expanduser("~"), os.W_OK): return False qutip_conf_dir = os.path.join(os.path.expanduser("~"), '.qutip') if not os.path.exists(qutip_conf_dir): try: os.mkdir(qutip_conf_dir) except: warnings.warn('Cannot write config file to user home dir.') return False qutip_rc_file = os.path.join(qutip_conf_dir,'qutiprc') qrc_exists = os.path.isfile(qutip_rc_file) if qrc_exists: #Do not overwrite return False else: #Write a basic file with qutip section cfgfile = open(qutip_rc_file,'w') config = configparser.ConfigParser() config.add_section('qutip') config.write(cfgfile) cfgfile.close() return True def load_rc_config(rc_file): """ Loads the configuration data from the qutiprc file """ config = configparser.ConfigParser() _valid_keys ={'auto_tidyup' : config.getboolean, 'auto_herm' : config.getboolean, 'atol': config.getfloat, 'auto_tidyup_atol' : config.getfloat, 'num_cpus' : config.getint, 'debug' : config.getboolean, 'log_handler' : config.getboolean, 'colorblind_safe' : config.getboolean, 'openmp_thresh': config.getint} config.read(rc_file) if config.has_section('qutip'): opts = config.options('qutip') for op in opts: if op in _valid_keys.keys(): setattr(qset, op, _valid_keys[op]('qutip',op)) else: raise Exception('Invalid config variable in qutiprc.') else: raise configparser.NoSectionError('qutip') if config.has_section('compiler'): _valid_keys = ['CC', 'CXX'] opts = config.options('compiler') for op in opts: up_op = op.upper() if up_op in _valid_keys: os.environ[up_op] = config.get('compiler', op) else: raise Exception('Invalid config variable in qutiprc.') def has_rc_key(rc_file, key): config = configparser.ConfigParser() config.read(rc_file) if config.has_section('qutip'): opts = config.options('qutip') if key in opts: return True else: return False else: raise configparser.NoSectionError('qutip') def write_rc_key(rc_file, key, value): """ Writes a single key value to the qutiprc file Parameters ---------- rc_file : str String specifying file location. key : str The key name to be written. value : int/float/bool Value corresponding to given key. """ if not os.access(os.path.expanduser("~"), os.W_OK): return cfgfile = open(rc_file,'w') config = configparser.ConfigParser() if not config.has_section('qutip'): config.add_section('qutip') config.set('qutip',key,str(value)) config.write(cfgfile) cfgfile.close() qutip-4.4.1/qutip/configspec.ini000066400000000000000000000005411352460343600166530ustar00rootroot00000000000000auto_tidyup = boolean(default=True) auto_herm = boolean(default=True) atol = float(default=1e-12) auto_tidyup_atol = float(default=1e-12) # num_cpus is set at import, but we allow it # to be overriden here, too. num_cpus = integer(default=0) debug = boolean(default=False) log_handler = string(default=default) colorblind_safe = boolean(default=False) qutip-4.4.1/qutip/continuous_variables.py000066400000000000000000000222261352460343600206460ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ This module contains a collection functions for calculating continuous variable quantities from fock-basis representation of the state of multi-mode fields. """ __all__ = ['correlation_matrix', 'covariance_matrix', 'correlation_matrix_field', 'correlation_matrix_quadrature', 'wigner_covariance_matrix', 'logarithmic_negativity'] from qutip.expect import expect import numpy as np def correlation_matrix(basis, rho=None): """ Given a basis set of operators :math:`\\{a\\}_n`, calculate the correlation matrix: .. math:: C_{mn} = \\langle a_m a_n \\rangle Parameters ---------- basis : list List of operators that defines the basis for the correlation matrix. rho : Qobj Density matrix for which to calculate the correlation matrix. If `rho` is `None`, then a matrix of correlation matrix operators is returned instead of expectation values of those operators. Returns ------- corr_mat : ndarray A 2-dimensional *array* of correlation values or operators. """ if rho is None: # return array of operators return np.array([[op1 * op2 for op1 in basis] for op2 in basis], dtype=object) else: # return array of expectation values return np.array([[expect(op1 * op2, rho) for op1 in basis] for op2 in basis], dtype=object) def covariance_matrix(basis, rho, symmetrized=True): """ Given a basis set of operators :math:`\{a\}_n`, calculate the covariance matrix: .. math:: V_{mn} = \\frac{1}{2}\\langle a_m a_n + a_n a_m \\rangle - \\langle a_m \\rangle \\langle a_n\\rangle or, if of the optional argument `symmetrized=False`, .. math:: V_{mn} = \\langle a_m a_n\\rangle - \\langle a_m \\rangle \\langle a_n\\rangle Parameters ---------- basis : list List of operators that defines the basis for the covariance matrix. rho : Qobj Density matrix for which to calculate the covariance matrix. symmetrized : bool {True, False} Flag indicating whether the symmetrized (default) or non-symmetrized correlation matrix is to be calculated. Returns ------- corr_mat : ndarray A 2-dimensional array of covariance values. """ if symmetrized: return np.array([[0.5 * expect(op1 * op2 + op2 * op1, rho) - expect(op1, rho) * expect(op2, rho) for op1 in basis] for op2 in basis], dtype=object) else: return np.array([[expect(op1 * op2, rho) - expect(op1, rho) * expect(op2, rho) for op1 in basis] for op2 in basis], dtype=object) def correlation_matrix_field(a1, a2, rho=None): """ Calculates the correlation matrix for given field operators :math:`a_1` and :math:`a_2`. If a density matrix is given the expectation values are calculated, otherwise a matrix with operators is returned. Parameters ---------- a1 : Qobj Field operator for mode 1. a2 : Qobj Field operator for mode 2. rho : Qobj Density matrix for which to calculate the covariance matrix. Returns ------- cov_mat : ndarray Array of complex numbers or Qobj's A 2-dimensional *array* of covariance values, or, if rho=0, a matrix of operators. """ basis = [a1, a1.dag(), a2, a2.dag()] return correlation_matrix(basis, rho) def correlation_matrix_quadrature(a1, a2, rho=None): """ Calculate the quadrature correlation matrix with given field operators :math:`a_1` and :math:`a_2`. If a density matrix is given the expectation values are calculated, otherwise a matrix with operators is returned. Parameters ---------- a1 : Qobj Field operator for mode 1. a2 : Qobj Field operator for mode 2. rho : Qobj Density matrix for which to calculate the covariance matrix. Returns ------- corr_mat : ndarray Array of complex numbers or Qobj's A 2-dimensional *array* of covariance values for the field quadratures, or, if rho=0, a matrix of operators. """ x1 = (a1 + a1.dag()) / np.sqrt(2) p1 = -1j * (a1 - a1.dag()) / np.sqrt(2) x2 = (a2 + a2.dag()) / np.sqrt(2) p2 = -1j * (a2 - a2.dag()) / np.sqrt(2) basis = [x1, p1, x2, p2] return correlation_matrix(basis, rho) def wigner_covariance_matrix(a1=None, a2=None, R=None, rho=None): """ Calculates the Wigner covariance matrix :math:`V_{ij} = \\frac{1}{2}(R_{ij} + R_{ji})`, given the quadrature correlation matrix :math:`R_{ij} = \\langle R_{i} R_{j}\\rangle - \\langle R_{i}\\rangle \\langle R_{j}\\rangle`, where :math:`R = (q_1, p_1, q_2, p_2)^T` is the vector with quadrature operators for the two modes. Alternatively, if `R = None`, and if annihilation operators `a1` and `a2` for the two modes are supplied instead, the quadrature correlation matrix is constructed from the annihilation operators before then the covariance matrix is calculated. Parameters ---------- a1 : Qobj Field operator for mode 1. a2 : Qobj Field operator for mode 2. R : ndarray The quadrature correlation matrix. rho : Qobj Density matrix for which to calculate the covariance matrix. Returns ------- cov_mat : ndarray A 2-dimensional array of covariance values. """ if R is not None: if rho is None: return np.array([[0.5 * np.real(R[i, j] + R[j, i]) for i in range(4)] for j in range(4)], dtype=object) else: return np.array([[0.5 * np.real(expect(R[i, j] + R[j, i], rho)) for i in range(4)] for j in range(4)], dtype=object) elif a1 is not None and a2 is not None: if rho is not None: x1 = (a1 + a1.dag()) / np.sqrt(2) p1 = -1j * (a1 - a1.dag()) / np.sqrt(2) x2 = (a2 + a2.dag()) / np.sqrt(2) p2 = -1j * (a2 - a2.dag()) / np.sqrt(2) return covariance_matrix([x1, p1, x2, p2], rho) else: raise ValueError("Must give rho if using field operators " + "(a1 and a2)") else: raise ValueError("Must give either field operators (a1 and a2) " + "or a precomputed correlation matrix (R)") def logarithmic_negativity(V): """ Calculates the logarithmic negativity given a symmetrized covariance matrix, see :func:`qutip.continous_variables.covariance_matrix`. Note that the two-mode field state that is described by `V` must be Gaussian for this function to applicable. Parameters ---------- V : *2d array* The covariance matrix. Returns ------- N : float The logarithmic negativity for the two-mode Gaussian state that is described by the the Wigner covariance matrix V. """ A = V[0:2, 0:2] B = V[2:4, 2:4] C = V[0:2, 2:4] sigma = np.linalg.det(A) + np.linalg.det(B) - 2 * np.linalg.det(C) nu_ = sigma / 2 - np.sqrt(sigma ** 2 - 4 * np.linalg.det(V)) / 2 if nu_ < 0.0: return 0.0 nu = np.sqrt(nu_) lognu = -np.log(2 * nu) logneg = max(0, lognu) return logneg qutip-4.4.1/qutip/control/000077500000000000000000000000001352460343600155125ustar00rootroot00000000000000qutip-4.4.1/qutip/control/__init__.py000066400000000000000000000000421352460343600176170ustar00rootroot00000000000000from qutip.control.grape import * qutip-4.4.1/qutip/control/cy_grape.pyx000066400000000000000000000110531352460343600200450ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as np cimport cython cimport libc.math DTYPE = np.float64 ctypedef np.float64_t DTYPE_t ITYPE = np.int32 ctypedef np.int32_t ITYPE_t CTYPE = np.complex128 ctypedef np.complex128_t CTYPE_t CTYPE = np.int64 ctypedef np.int64_t LTYPE_t @cython.boundscheck(False) @cython.wraparound(False) cpdef CTYPE_t cy_overlap(object op1, object op2): cdef Py_ssize_t row cdef CTYPE_t tr = 0.0 op1 = op1.T.tocsr() cdef int col1, row1_idx_start, row1_idx_end cdef np.ndarray[CTYPE_t, ndim=1, mode="c"] data1 = op1.data.conj() cdef np.ndarray[ITYPE_t, ndim=1, mode="c"] idx1 = op1.indices cdef np.ndarray[ITYPE_t, ndim=1, mode="c"] ptr1 = op1.indptr cdef int col2, row2_idx_start, row2_idx_end cdef np.ndarray[CTYPE_t, ndim=1, mode="c"] data2 = op2.data cdef np.ndarray[ITYPE_t, ndim=1, mode="c"] idx2 = op2.indices cdef np.ndarray[ITYPE_t, ndim=1, mode="c"] ptr2 = op2.indptr cdef int num_rows = ptr1.shape[0]-1 for row in range(num_rows): row1_idx_start = ptr1[row] row1_idx_end = ptr1[row + 1] for row1_idx from row1_idx_start <= row1_idx < row1_idx_end: col1 = idx1[row1_idx] row2_idx_start = ptr2[col1] row2_idx_end = ptr2[col1 + 1] for row2_idx from row2_idx_start <= row2_idx < row2_idx_end: col2 = idx2[row2_idx] if col2 == row: tr += data1[row1_idx] * data2[row2_idx] return tr / op1.shape[0] @cython.boundscheck(False) @cython.wraparound(False) cpdef cy_grape_inner(U, np.ndarray[DTYPE_t, ndim=3, mode="c"] u, int r, int J, int M, U_b_list, U_f_list, H_ops, float dt, float eps, float alpha, float beta, int phase_sensitive, int use_u_limits, float u_min, float u_max): cdef int j, k for m in range(M-1): P = U_b_list[m] * U for j in range(J): Q = 1j * dt * H_ops[j] * U_f_list[m] if phase_sensitive: du = - cy_overlap(P, Q) else: du = - 2 * cy_overlap(P, Q) * cy_overlap(U_f_list[m], P) if alpha > 0.0: # penalty term for high power control signals u du += -2 * alpha * u[r, j, m] * dt if beta: # penalty term for late control signals u du += -2 * beta * m ** 2 * u[r, j, m] * dt u[r + 1, j, m] = u[r, j, m] + eps * du.real if use_u_limits: if u[r + 1, j, m] < u_min: u[r + 1, j, m] = u_min elif u[r + 1, j, m] > u_max: u[r + 1, j, m] = u_max for j in range(J): u[r + 1, j, M-1] = u[r + 1, j, M-2] qutip-4.4.1/qutip/control/dump.py000066400000000000000000001023221352460343600170310ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2016 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ Classes that enable the storing of historical objects created during the pulse optimisation. These are intented for debugging. See the optimizer and dynamics objects for instrutcions on how to enable data dumping. """ import os import numpy as np import copy # QuTiP logging import qutip.logging_utils logger = qutip.logging_utils.get_logger() # QuTiP control modules import qutip.control.io as qtrlio from numpy.compat import asbytes DUMP_DIR = "~/.qtrl_dump" def _is_string(var): try: if isinstance(var, basestring): return True except NameError: try: if isinstance(var, str): return True except: return False except: return False return False class Dump(object): """ A container for dump items. The lists for dump items is depends on the type Note: abstract class Attributes ---------- parent : some control object (Dynamics or Optimizer) aka the host. Object that generates the data that is dumped and is host to this dump object. dump_dir : str directory where files (if any) will be written out the path and be relative or absolute use ~/ to specify user home directory Note: files are only written when write_to_file is True of writeout is called explicitly Defaults to ~/.qtrl_dump level : string level of data dumping: SUMMARY, FULL or CUSTOM See property docstring for details Set automatically if dump is created by the setting host dumping attrib write_to_file : bool When set True data and summaries (as configured) will be written interactively to file during the processing Set during instantiation by the host based on its dump_to_file attrib dump_file_ext : str Default file extension for any file names that are auto generated fname_base : str First part of any auto generated file names. This is usually overridden in the subclass dump_summary : bool If True a summary is recorded each time a new item is added to the the dump. Default is True summary_sep : str delimiter for the summary file. default is a space data_sep : str delimiter for the data files (arrays saved to file). default is a space summary_file : str File path for summary file. Automatically generated. Can be set specifically """ def __init__(self): self.reset() def reset(self): if self.parent: self.log_level = self.parent.log_level self.write_to_file = self.parent.dump_to_file else: self.write_to_file = False self._dump_dir = None self.dump_file_ext = "txt" self._fname_base = 'dump' self.dump_summary = True self.summary_sep = ' ' self.data_sep = ' ' self._summary_file_path = None self._summary_file_specified = False @property def log_level(self): return logger.level @log_level.setter def log_level(self, lvl): """ Set the log_level attribute and set the level of the logger that is call logger.setLevel(lvl) """ logger.setLevel(lvl) @property def level(self): """ The level of data dumping that will occur - SUMMARY : A summary will be recorded - FULL : All possible dumping - CUSTOM : Some customised level of dumping When first set to CUSTOM this is equivalent to SUMMARY. It is then up to the user to specify what specifically is dumped """ lvl = 'CUSTOM' if (self.dump_summary and not self.dump_any): lvl = 'SUMMARY' elif (self.dump_summary and self.dump_all): lvl = 'FULL' return lvl @level.setter def level(self, value): self._level = value self._apply_level() @property def dump_any(self): raise NotImplemented("This is an abstract class, " "use subclass such as DynamicsDump or OptimDump") @property def dump_all(self): raise NotImplemented("This is an abstract class, " "use subclass such as DynamicsDump or OptimDump") @property def dump_dir(self): if self._dump_dir is None: self.create_dump_dir() return self._dump_dir @dump_dir.setter def dump_dir(self, value): self._dump_dir = value if not self.create_dump_dir(): self._dump_dir = None def create_dump_dir(self): """ Checks dump directory exists, creates it if not """ if self._dump_dir is None or len(self._dump_dir) == 0: self._dump_dir = DUMP_DIR dir_ok, self._dump_dir, msg = qtrlio.create_dir( self._dump_dir, desc='dump') if not dir_ok: self.write_to_file = False msg += "\ndump file output will be suppressed." logger.error(msg) return dir_ok @property def fname_base(self): return self._fname_base @fname_base.setter def fname_base(self, value): if not _is_string(value): raise ValueError("File name base must be a string") self._fname_base = value self._summary_file_path = None @property def summary_file(self): if self._summary_file_path is None: fname = "{}-summary.{}".format(self._fname_base, self.dump_file_ext) self._summary_file_path = os.path.join(self.dump_dir, fname) return self._summary_file_path @summary_file.setter def summary_file(self, value): if not _is_string(value): raise ValueError("File path must be a string") self._summary_file_specified = True if os.path.abspath(value): self._summary_file_path = value elif '~' in value: self._summary_file_path = os.path.expanduser(value) else: self._summary_file_path = os.path.join(self.dump_dir, value) class OptimDump(Dump): """ A container for dumps of optimisation data generated during the pulse optimisation. Attributes ---------- dump_summary : bool When True summary items are appended to the iter_summary iter_summary : list of :class:`optimizer.OptimIterSummary` Summary at each iteration dump_fid_err : bool When True values are appended to the fid_err_log fid_err_log : list of float Fidelity error at each call of the fid_err_func dump_grad_norm : bool When True values are appended to the fid_err_log grad_norm_log : list of float Gradient norm at each call of the grad_norm_log dump_grad : bool When True values are appended to the grad_log grad_log : list of ndarray Gradients at each call of the fid_grad_func """ def __init__(self, optim, level='SUMMARY'): from qutip.control.optimizer import Optimizer if not isinstance(optim, Optimizer): raise TypeError("Must instantiate with {} type".format( Optimizer)) self.parent = optim self._level = level self.reset() def reset(self): Dump.reset(self) self._apply_level() self.iter_summary = [] self.fid_err_log = [] self.grad_norm_log = [] self.grad_log = [] self._fname_base = 'optimdump' self._fid_err_file = None self._grad_norm_file = None def clear(self): del self.iter_summary[:] self.fid_err_log[:] self.grad_norm_log[:] self.grad_log[:] @property def dump_any(self): """True if anything other than the summary is to be dumped""" if (self.dump_fid_err or self.dump_grad_norm or self.dump_grad): return True else: return False @property def dump_all(self): """True if everything (ignoring the summary) is to be dumped""" if (self.dump_fid_err and self.dump_grad_norm and self.dump_grad): return True else: return False def _apply_level(self, level=None): if level is None: level = self._level if not _is_string(level): raise ValueError("Dump level must be a string") level = level.upper() if level == 'CUSTOM': if self._level == 'CUSTOM': # dumping level has not changed keep the same specific config pass else: # Switching to custom, start from SUMMARY level = 'SUMMARY' if level == 'SUMMARY': self.dump_summary = True self.dump_fid_err = False self.dump_grad_norm = False self.dump_grad = False elif level == 'FULL': self.dump_summary = True self.dump_fid_err = True self.dump_grad_norm = True self.dump_grad = True else: raise ValueError("No option for dumping level '{}'".format(level)) def add_iter_summary(self): """add copy of current optimizer iteration summary""" optim = self.parent if optim.iter_summary is None: raise RuntimeError("Cannot add iter_summary as not available") ois = copy.copy(optim.iter_summary) ois.idx = len(self.iter_summary) self.iter_summary.append(ois) if self.write_to_file: if ois.idx == 0: f = open(self.summary_file, 'w') f.write("{}\n{}\n".format( ois.get_header_line(self.summary_sep), ois.get_value_line(self.summary_sep))) else: f = open(self.summary_file, 'a') f.write("{}\n".format( ois.get_value_line(self.summary_sep))) f.close() return ois @property def fid_err_file(self): if self._fid_err_file is None: fname = "{}-fid_err_log.{}".format(self.fname_base, self.dump_file_ext) self._fid_err_file = os.path.join(self.dump_dir, fname) return self._fid_err_file def update_fid_err_log(self, fid_err): """add an entry to the fid_err log""" self.fid_err_log.append(fid_err) if self.write_to_file: if len(self.fid_err_log) == 1: mode = 'w' else: mode = 'a' f = open(self.fid_err_file, mode) f.write("{}\n".format(fid_err)) f.close() @property def grad_norm_file(self): if self._grad_norm_file is None: fname = "{}-grad_norm_log.{}".format(self.fname_base, self.dump_file_ext) self._grad_norm_file = os.path.join(self.dump_dir, fname) return self._grad_norm_file def update_grad_norm_log(self, grad_norm): """add an entry to the grad_norm log""" self.grad_norm_log.append(grad_norm) if self.write_to_file: if len(self.grad_norm_log) == 1: mode = 'w' else: mode = 'a' f = open(self.grad_norm_file, mode) f.write("{}\n".format(grad_norm)) f.close() def update_grad_log(self, grad): """add an entry to the grad log""" self.grad_log.append(grad) if self.write_to_file: fname = "{}-fid_err_gradients{}.{}".format(self.fname_base, len(self.grad_log), self.dump_file_ext) fpath = os.path.join(self.dump_dir, fname) np.savetxt(fpath, grad, delimiter=self.data_sep) def writeout(self, f=None): """write all the logs and the summary out to file(s) Parameters ---------- f : filename or filehandle If specified then all summary and object data will go in one file. If None is specified then type specific files will be generated in the dump_dir If a filehandle is specified then it must be a byte mode file as numpy.savetxt is used, and requires this. """ fall = None # If specific file given then write everything to it if hasattr(f, 'write'): if not 'b' in f.mode: raise RuntimeError("File stream must be in binary mode") # write all to this stream fall = f fs = f closefall = False closefs = False elif f: # Assume f is a filename fall = open(f, 'wb') fs = fall closefs = False closefall = True else: self.create_dump_dir() closefall = False if self.dump_summary: fs = open(self.summary_file, 'wb') closefs = True if self.dump_summary: for ois in self.iter_summary: if ois.idx == 0: fs.write(asbytes("{}\n{}\n".format( ois.get_header_line(self.summary_sep), ois.get_value_line(self.summary_sep)))) else: fs.write(asbytes("{}\n".format( ois.get_value_line(self.summary_sep)))) if closefs: fs.close() logger.info("Optim dump summary saved to {}".format( self.summary_file)) if self.dump_fid_err: if fall: fall.write(asbytes("Fidelity errors:\n")) np.savetxt(fall, self.fid_err_log) else: np.savetxt(self.fid_err_file, self.fid_err_log) if self.dump_grad_norm: if fall: fall.write(asbytes("gradients norms:\n")) np.savetxt(fall, self.grad_norm_log) else: np.savetxt(self.grad_norm_file, self.grad_norm_log) if self.dump_grad: g_num = 0 for grad in self.grad_log: g_num += 1 if fall: fall.write(asbytes("gradients (call {}):\n".format(g_num))) np.savetxt(fall, grad) else: fname = "{}-fid_err_gradients{}.{}".format(self.fname_base, g_num, self.dump_file_ext) fpath = os.path.join(self.dump_dir, fname) np.savetxt(fpath, grad, delimiter=self.data_sep) if closefall: fall.close() logger.info("Optim dump saved to {}".format(f)) else: if fall: logger.info("Optim dump saved to specified stream") else: logger.info("Optim dump saved to {}".format(self.dump_dir)) class DynamicsDump(Dump): """ A container for dumps of dynamics data. Mainly time evolution calculations Attributes ---------- dump_summary : bool If True a summary is recorded evo_summary : list of :class:`tslotcomp.EvoCompSummary' Summary items are appended if dump_summary is True at each recomputation of the evolution. dump_amps : bool If True control amplitudes are dumped dump_dyn_gen : bool If True the dynamics generators (Hamiltonians) are dumped dump_prop : bool If True propagators are dumped dump_prop_grad : bool If True propagator gradients are dumped dump_fwd_evo : bool If True forward evolution operators are dumped dump_onwd_evo : bool If True onward evolution operators are dumped dump_onto_evo : bool If True onto (or backward) evolution operators are dumped evo_dumps : list of :class:`EvoCompDumpItem` A new dump item is appended at each recomputation of the evolution. That is if any of the calculation objects are to be dumped. """ def __init__(self, dynamics, level='SUMMARY'): from qutip.control.dynamics import Dynamics if not isinstance(dynamics, Dynamics): raise TypeError("Must instantiate with {} type".format( Dynamics)) self.parent = dynamics self._level = level self.reset() def reset(self): Dump.reset(self) self._apply_level() self.evo_dumps = [] self.evo_summary = [] self._fname_base = 'dyndump' def clear(self): del self.evo_dumps[:] del self.evo_summary[:] @property def dump_any(self): """True if any of the calculation objects are to be dumped""" if (self.dump_amps or self.dump_dyn_gen or self.dump_prop or self.dump_prop_grad or self.dump_fwd_evo or self.dump_onwd_evo or self.dump_onto_evo): return True else: return False @property def dump_all(self): """True if all of the calculation objects are to be dumped""" dyn = self.parent if (self.dump_amps and self.dump_dyn_gen and self.dump_prop and self.dump_prop_grad and self.dump_fwd_evo and (self.dump_onwd_evo) or (self.dump_onwd_evo == dyn.fid_computer.uses_onwd_evo) and (self.dump_onto_evo or (self.dump_onto_evo == dyn.fid_computer.uses_onto_evo))): return True else: return False def _apply_level(self, level=None): dyn = self.parent if level is None: level = self._level if not _is_string(level): raise ValueError("Dump level must be a string") level = level.upper() if level == 'CUSTOM': if self._level == 'CUSTOM': # dumping level has not changed keep the same specific config pass else: # Switching to custom, start from SUMMARY level = 'SUMMARY' if level == 'SUMMARY': self.dump_summary = True self.dump_amps = False self.dump_dyn_gen = False self.dump_prop = False self.dump_prop_grad = False self.dump_fwd_evo = False self.dump_onwd_evo = False self.dump_onto_evo = False elif level == 'FULL': self.dump_summary = True self.dump_amps = True self.dump_dyn_gen = True self.dump_prop = True self.dump_prop_grad = True self.dump_fwd_evo = True self.dump_onwd_evo = dyn.fid_computer.uses_onwd_evo self.dump_onto_evo = dyn.fid_computer.uses_onto_evo else: raise ValueError("No option for dumping level '{}'".format(level)) def add_evo_dump(self): """Add dump of current time evolution generating objects""" dyn = self.parent item = EvoCompDumpItem(self) item.idx = len(self.evo_dumps) self.evo_dumps.append(item) if self.dump_amps: item.ctrl_amps = copy.deepcopy(dyn.ctrl_amps) if self.dump_dyn_gen: item.dyn_gen = copy.deepcopy(dyn._dyn_gen) if self.dump_prop: item.prop = copy.deepcopy(dyn._prop) if self.dump_prop_grad: item.prop_grad = copy.deepcopy(dyn._prop_grad) if self.dump_fwd_evo: item.fwd_evo = copy.deepcopy(dyn._fwd_evo) if self.dump_onwd_evo: item.onwd_evo = copy.deepcopy(dyn._onwd_evo) if self.dump_onto_evo: item.onto_evo = copy.deepcopy(dyn._onto_evo) if self.write_to_file: item.writeout() return item def add_evo_comp_summary(self, dump_item_idx=None): """add copy of current evo comp summary""" dyn = self.parent if dyn.tslot_computer.evo_comp_summary is None: raise RuntimeError("Cannot add evo_comp_summary as not available") ecs = copy.copy(dyn.tslot_computer.evo_comp_summary) ecs.idx = len(self.evo_summary) ecs.evo_dump_idx = dump_item_idx if dyn.stats: ecs.iter_num = dyn.stats.num_iter ecs.fid_func_call_num = dyn.stats.num_fidelity_func_calls ecs.grad_func_call_num = dyn.stats.num_grad_func_calls self.evo_summary.append(ecs) if self.write_to_file: if ecs.idx == 0: f = open(self.summary_file, 'w') f.write("{}\n{}\n".format( ecs.get_header_line(self.summary_sep), ecs.get_value_line(self.summary_sep))) else: f = open(self.summary_file, 'a') f.write("{}\n".format(ecs.get_value_line(self.summary_sep))) f.close() return ecs def writeout(self, f=None): """write all the dump items and the summary out to file(s) Parameters ---------- f : filename or filehandle If specified then all summary and object data will go in one file. If None is specified then type specific files will be generated in the dump_dir If a filehandle is specified then it must be a byte mode file as numpy.savetxt is used, and requires this. """ fall = None # If specific file given then write everything to it if hasattr(f, 'write'): if not 'b' in f.mode: raise RuntimeError("File stream must be in binary mode") # write all to this stream fall = f fs = f closefall = False closefs = False elif f: # Assume f is a filename fall = open(f, 'wb') fs = fall closefs = False closefall = True else: self.create_dump_dir() closefall = False if self.dump_summary: fs = open(self.summary_file, 'wb') closefs = True if self.dump_summary: for ecs in self.evo_summary: if ecs.idx == 0: fs.write(asbytes("{}\n{}\n".format( ecs.get_header_line(self.summary_sep), ecs.get_value_line(self.summary_sep)))) else: fs.write(asbytes("{}\n".format( ecs.get_value_line(self.summary_sep)))) if closefs: fs.close() logger.info("Dynamics dump summary saved to {}".format( self.summary_file)) for di in self.evo_dumps: di.writeout(fall) if closefall: fall.close() logger.info("Dynamics dump saved to {}".format(f)) else: if fall: logger.info("Dynamics dump saved to specified stream") else: logger.info("Dynamics dump saved to {}".format(self.dump_dir)) class DumpItem(object): """ An item in a dump list """ def __init__(self): pass class EvoCompDumpItem(DumpItem): """ A copy of all objects generated to calculate one time evolution Note the attributes are only set if the corresponding :class:`DynamicsDump` dump_ attribute is set. """ def __init__(self, dump): if not isinstance(dump, DynamicsDump): raise TypeError("Must instantiate with {} type".format( DynamicsDump)) self.parent = dump self.reset() def reset(self): self.idx = None # self.num_ctrls = None # self.num_tslots = None self.ctrl_amps = None self.dyn_gen = None self.prop = None self.prop_grad = None self.fwd_evo = None self.onwd_evo = None self.onto_evo = None def writeout(self, f=None): """ write all the objects out to files Parameters ---------- f : filename or filehandle If specified then all object data will go in one file. If None is specified then type specific files will be generated in the dump_dir If a filehandle is specified then it must be a byte mode file as numpy.savetxt is used, and requires this. """ dump = self.parent fall = None closefall = True closef = False # If specific file given then write everything to it if hasattr(f, 'write'): if not 'b' in f.mode: raise RuntimeError("File stream must be in binary mode") # write all to this stream fall = f closefall = False f.write(asbytes("EVOLUTION COMPUTATION {}\n".format(self.idx))) elif f: fall = open(f, 'wb') else: # otherwise files for each type will be created fnbase = "{}-evo{}".format(dump._fname_base, self.idx) closefall = False #ctrl amps if not self.ctrl_amps is None: if fall: f = fall f.write(asbytes("Ctrl amps\n")) else: fname = "{}-ctrl_amps.{}".format(fnbase, dump.dump_file_ext) f = open(os.path.join(dump.dump_dir, fname), 'wb') closef = True np.savetxt(f, self.ctrl_amps, fmt='%14.6g', delimiter=dump.data_sep) if closef: f.close() # dynamics generators if not self.dyn_gen is None: k = 0 if fall: f = fall f.write(asbytes("Dynamics Generators\n")) else: fname = "{}-dyn_gen.{}".format(fnbase, dump.dump_file_ext) f = open(os.path.join(dump.dump_dir, fname), 'wb') closef = True for dg in self.dyn_gen: f.write(asbytes( "dynamics generator for timeslot {}\n".format(k))) np.savetxt(f, self.dyn_gen[k], delimiter=dump.data_sep) k += 1 if closef: f.close() # Propagators if not self.prop is None: k = 0 if fall: f = fall f.write(asbytes("Propagators\n")) else: fname = "{}-prop.{}".format(fnbase, dump.dump_file_ext) f = open(os.path.join(dump.dump_dir, fname), 'wb') closef = True for dg in self.dyn_gen: f.write(asbytes("Propagator for timeslot {}\n".format(k))) np.savetxt(f, self.prop[k], delimiter=dump.data_sep) k += 1 if closef: f.close() # Propagator gradient if not self.prop_grad is None: k = 0 if fall: f = fall f.write(asbytes("Propagator gradients\n")) else: fname = "{}-prop_grad.{}".format(fnbase, dump.dump_file_ext) f = open(os.path.join(dump.dump_dir, fname), 'wb') closef = True for k in range(self.prop_grad.shape[0]): for j in range(self.prop_grad.shape[1]): f.write(asbytes("Propagator gradient for timeslot {} " "control {}\n".format(k, j))) np.savetxt(f, self.prop_grad[k, j], delimiter=dump.data_sep) if closef: f.close() # forward evolution if not self.fwd_evo is None: k = 0 if fall: f = fall f.write(asbytes("Forward evolution\n")) else: fname = "{}-fwd_evo.{}".format(fnbase, dump.dump_file_ext) f = open(os.path.join(dump.dump_dir, fname), 'wb') closef = True for dg in self.dyn_gen: f.write(asbytes("Evolution from 0 to {}\n".format(k))) np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep) k += 1 if closef: f.close() # onward evolution if not self.onwd_evo is None: k = 0 if fall: f = fall f.write(asbytes("Onward evolution\n")) else: fname = "{}-onwd_evo.{}".format(fnbase, dump.dump_file_ext) f = open(os.path.join(dump.dump_dir, fname), 'wb') closef = True for dg in self.dyn_gen: f.write(asbytes("Evolution from {} to end\n".format(k))) np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep) k += 1 if closef: f.close() # onto evolution if not self.onto_evo is None: k = 0 if fall: f = fall f.write(asbytes("Onto evolution\n")) else: fname = "{}-onto_evo.{}".format(fnbase, dump.dump_file_ext) f = open(os.path.join(dump.dump_dir, fname), 'wb') closef = True for dg in self.dyn_gen: f.write(asbytes("Evolution from {} onto target\n".format(k))) np.savetxt(f, self.fwd_evo[k], delimiter=dump.data_sep) k += 1 if closef: f.close() if closefall: fall.close() class DumpSummaryItem(object): """A summary of the most recent iteration Abstract class only Attributes: idx : int Index in the summary list in which this is stored """ min_col_width = 11 summary_property_names = () summary_property_fmt_type = () summary_property_fmt_prec = () @classmethod def get_header_line(cls, sep=' '): if sep == ' ': line = '' i = 0 for a in cls.summary_property_names: if i > 0: line += sep i += 1 line += format(a, str(max(len(a), cls.min_col_width)) + 's') else: line = sep.join(cls.summary_property_names) return line def reset(self): self.idx = 0 def get_value_line(self, sep=' '): line = "" i = 0 for a in zip(self.summary_property_names, self.summary_property_fmt_type, self.summary_property_fmt_prec): if i > 0: line += sep i += 1 v = getattr(self, a[0]) w = max(len(a[0]), self.min_col_width) if v is not None: fmt = '' if sep == ' ': fmt += str(w) else: fmt += '0' if a[2] > 0: fmt += '.' + str(a[2]) fmt += a[1] line += format(v, fmt) else: if sep == ' ': line += format('None', str(w) + 's') else: line += 'None' return linequtip-4.4.1/qutip/control/dynamics.py000066400000000000000000002044601352460343600177010ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Classes that define the dynamics of the (quantum) system and target evolution to be optimised. The contols are also defined here, i.e. the dynamics generators (Hamiltonians, Limbladians etc). The dynamics for the time slices are calculated here, along with the evolution as determined by the control amplitudes. See the subclass descriptions and choose the appropriate class for the application. The choice depends on the type of matrix used to define the dynamics. These class implement functions for getting the dynamics generators for the combined (drift + ctrls) dynamics with the approriate operator applied Note the methods in these classes were inspired by: DYNAMO - Dynamic Framework for Quantum Optimal Control See Machnes et.al., arXiv.1011.4874 """ import os import warnings import numpy as np import scipy.linalg as la import scipy.sparse as sp # QuTiP from qutip import Qobj from qutip.sparse import sp_eigs, _dense_eigs import qutip.settings as settings # QuTiP logging import qutip.logging_utils as logging logger = logging.get_logger() # QuTiP control modules import qutip.control.errors as errors import qutip.control.tslotcomp as tslotcomp import qutip.control.fidcomp as fidcomp import qutip.control.propcomp as propcomp import qutip.control.symplectic as sympl import qutip.control.dump as qtrldump DEF_NUM_TSLOTS = 10 DEF_EVO_TIME = 1.0 def _is_string(var): try: if isinstance(var, basestring): return True except NameError: try: if isinstance(var, str): return True except: return False except: return False return False def _check_ctrls_container(ctrls): """ Check through the controls container. Convert to an array if its a list of lists return the processed container raise type error if the container structure is invalid """ if isinstance(ctrls, (list, tuple)): # Check to see if list of lists try: if isinstance(ctrls[0], (list, tuple)): ctrls = np.array(ctrls, dtype=object) except: pass if isinstance(ctrls, np.ndarray): if len(ctrls.shape) != 2: raise TypeError("Incorrect shape for ctrl dyn gen array") for k in range(ctrls.shape[0]): for j in range(ctrls.shape[1]): if not isinstance(ctrls[k, j], Qobj): raise TypeError("All control dyn gen must be Qobj") elif isinstance(ctrls, (list, tuple)): for ctrl in ctrls: if not isinstance(ctrl, Qobj): raise TypeError("All control dyn gen must be Qobj") else: raise TypeError("Controls list or array not set correctly") return ctrls def _check_drift_dyn_gen(drift): if not isinstance(drift, Qobj): if not isinstance(drift, (list, tuple)): raise TypeError("drift should be a Qobj or a list of Qobj") else: for d in drift: if not isinstance(d, Qobj): raise TypeError( "drift should be a Qobj or a list of Qobj") warnings.simplefilter('always', DeprecationWarning) #turn off filter def _attrib_deprecation(message, stacklevel=3): """ Issue deprecation warning Using stacklevel=3 will ensure message refers the function calling with the deprecated parameter, """ warnings.warn(message, DeprecationWarning, stacklevel=stacklevel) def _func_deprecation(message, stacklevel=3): """ Issue deprecation warning Using stacklevel=3 will ensure message refers the function calling with the deprecated parameter, """ warnings.warn(message, DeprecationWarning, stacklevel=stacklevel) class Dynamics(object): """ This is a base class only. See subclass descriptions and choose an appropriate one for the application. Note that initialize_controls must be called before most of the methods can be used. init_timeslots can be called sometimes earlier in order to access timeslot related attributes This acts as a container for the operators that are used to calculate time evolution of the system under study. That is the dynamics generators (Hamiltonians, Lindbladians etc), the propagators from one timeslot to the next, and the evolution operators. Due to the large number of matrix additions and multiplications, for small systems at least, the optimisation performance is much better using ndarrays to represent these operators. However Attributes ---------- log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN params: Dictionary The key value pairs are the attribute name and value Note: attributes are created if they do not exist already, and are overwritten if they do. stats : Stats Attributes of which give performance stats for the optimisation set to None to reduce overhead of calculating stats. Note it is (usually) shared with the Optimizer object tslot_computer : TimeslotComputer (subclass instance) Used to manage when the timeslot dynamics generators, propagators, gradients etc are updated prop_computer : PropagatorComputer (subclass instance) Used to compute the propagators and their gradients fid_computer : FidelityComputer (subclass instance) Used to computer the fidelity error and the fidelity error gradient. memory_optimization : int Level of memory optimisation. Setting to 0 (default) means that execution speed is prioritized over memory. Setting to 1 means that some memory prioritisation steps will be taken, for instance using Qobj (and hence sparse arrays) as the the internal operator data type, and not caching some operators Potentially further memory saving maybe made with memory_optimization > 1. The options are processed in _set_memory_optimizations, see this for more information. Individual memory saving options can be switched by settting them directly (see below) oper_dtype : type Data type for internal dynamics generators, propagators and time evolution operators. This can be ndarray or Qobj, or (in theory) any other representaion that supports typical matrix methods (e.g. dot) ndarray performs best for smaller quantum systems. Qobj may perform better for larger systems, and will also perform better when (custom) fidelity measures use Qobj methods such as partial trace. See _choose_oper_dtype for how this is chosen when not specified cache_phased_dyn_gen : bool If True then the dynamics generators will be saved with and without the propagation prefactor (if there is one) Defaults to True when memory_optimization=0, otherwise False cache_prop_grad : bool If the True then the propagator gradients (for exact gradients) will be computed when the propagator are computed and cache until the are used by the fidelity computer. If False then the fidelity computer will calculate them as needed. Defaults to True when memory_optimization=0, otherwise False cache_dyn_gen_eigenvectors_adj: bool If True then DynamicsUnitary will cached the adjoint of the Hamiltion eignvector matrix Defaults to True when memory_optimization=0, otherwise False sparse_eigen_decomp: bool If True then DynamicsUnitary will use the sparse eigenvalue decomposition. Defaults to True when memory_optimization<=1, otherwise False num_tslots : integer Number of timeslots (aka timeslices) num_ctrls : integer Number of controls. Note this is calculated as the length of ctrl_dyn_gen when first used. And is recalculated during initialise_controls only. evo_time : float Total time for the evolution tau : array[num_tslots] of float Duration of each timeslot Note that if this is set before initialize_controls is called then num_tslots and evo_time are calculated from tau, otherwise tau is generated from num_tslots and evo_time, that is equal size time slices time : array[num_tslots+1] of float Cumulative time for the evolution, that is the time at the start of each time slice drift_dyn_gen : Qobj or list of Qobj Drift or system dynamics generator (Hamiltonian) Matrix defining the underlying dynamics of the system Can also be a list of Qobj (length num_tslots) for time varying drift dynamics ctrl_dyn_gen : List of Qobj Control dynamics generator (Hamiltonians) List of matrices defining the control dynamics initial : Qobj Starting state / gate The matrix giving the initial state / gate, i.e. at time 0 Typically the identity for gate evolution target : Qobj Target state / gate: The matrix giving the desired state / gate for the evolution ctrl_amps : array[num_tslots, num_ctrls] of float Control amplitudes The amplitude (scale factor) for each control in each timeslot initial_ctrl_scaling : float Scale factor applied to be applied the control amplitudes when they are initialised This is used by the PulseGens rather than in any fucntions in this class initial_ctrl_offset : float Linear offset applied to be applied the control amplitudes when they are initialised This is used by the PulseGens rather than in any fucntions in this class dyn_gen : List of Qobj Dynamics generators the combined drift and control dynamics generators for each timeslot prop : list of Qobj Propagators - used to calculate time evolution from one timeslot to the next prop_grad : array[num_tslots, num_ctrls] of Qobj Propagator gradient (exact gradients only) Array of Qobj that give the gradient with respect to the control amplitudes in a timeslot Note this attribute is only created when the selected PropagatorComputer is an exact gradient type. fwd_evo : List of Qobj Forward evolution (or propagation) the time evolution operator from the initial state / gate to the specified timeslot as generated by the dyn_gen onwd_evo : List of Qobj Onward evolution (or propagation) the time evolution operator from the specified timeslot to end of the evolution time as generated by the dyn_gen onto_evo : List of Qobj 'Backward' List of Qobj propagation the overlap of the onward propagation with the inverse of the target. Note this is only used (so far) by the unitary dynamics fidelity evo_current : Boolean Used to flag that the dynamics used to calculate the evolution operators is current. It is set to False when the amplitudes change fact_mat_round_prec : float Rounding precision used when calculating the factor matrix to determine if two eigenvalues are equivalent Only used when the PropagatorComputer uses diagonalisation def_amps_fname : string Default name for the output used when save_amps is called unitarity_check_level : int If > 0 then unitarity of the system evolution is checked at at evolution recomputation. level 1 checks all propagators level 2 checks eigen basis as well Default is 0 unitarity_tol : Tolerance used in checking if operator is unitary Default is 1e-10 dump : :class:`dump.DynamicsDump` Store of historical calculation data. Set to None (Default) for no storing of historical data Use dumping property to set level of data dumping dumping : string level of data dumping: NONE, SUMMARY, FULL or CUSTOM See property docstring for details dump_to_file : bool If set True then data will be dumped to file during the calculations dumping will be set to SUMMARY during init_evo if dump_to_file is True and dumping not set. Default is False dump_dir : string Basically a link to dump.dump_dir. Exists so that it can be set through dyn_params. If dump is None then will return None or will set dumping to SUMMARY when setting a path """ def __init__(self, optimconfig, params=None): self.config = optimconfig self.params = params self.reset() def reset(self): # Link to optimiser object if self is linked to one self.parent = None # Main functional attributes self.time = None self.initial = None self.target = None self.ctrl_amps = None self.initial_ctrl_scaling = 1.0 self.initial_ctrl_offset = 0.0 self.drift_dyn_gen = None self.ctrl_dyn_gen = None self._tau = None self._evo_time = None self._num_ctrls = None self._num_tslots = None # attributes used for processing evolution self.memory_optimization = 0 self.oper_dtype = None self.cache_phased_dyn_gen = None self.cache_prop_grad = None self.cache_dyn_gen_eigenvectors_adj = None self.sparse_eigen_decomp = None self.dyn_dims = None self.dyn_shape = None self.sys_dims = None self.sys_shape = None self.time_depend_drift = False self.time_depend_ctrl_dyn_gen = False # These internal attributes will be of the internal operator data type # used to compute the evolution # Note this maybe ndarray, Qobj or some other depending on oper_dtype self._drift_dyn_gen = None self._ctrl_dyn_gen = None self._phased_ctrl_dyn_gen = None self._dyn_gen_phase = None self._phase_application = None self._initial = None self._target = None self._onto_evo_target = None self._dyn_gen = None self._phased_dyn_gen = None self._prop = None self._prop_grad = None self._fwd_evo = None self._onwd_evo = None self._onto_evo = None # The _qobj attribs are Qobj representations of the equivalent # internal attribute. They are only set when the extenal accessors # are used self._onto_evo_target_qobj = None self._dyn_gen_qobj = None self._prop_qobj = None self._prop_grad_qobj = None self._fwd_evo_qobj = None self._onwd_evo_qobj = None self._onto_evo_qobj = None # Atrributes used in diagonalisation # again in internal operator data type (see above) self._decomp_curr = None self._prop_eigen = None self._dyn_gen_eigenvectors = None self._dyn_gen_eigenvectors_adj = None self._dyn_gen_factormatrix = None self.fact_mat_round_prec = 1e-10 # Debug and information attribs self.stats = None self.id_text = 'DYN_BASE' self.def_amps_fname = "ctrl_amps.txt" self.log_level = self.config.log_level # Internal flags self._dyn_gen_mapped = False self._evo_initialized = False self._timeslots_initialized = False self._ctrls_initialized = False self._ctrl_dyn_gen_checked = False self._drift_dyn_gen_checked = False # Unitary checking self.unitarity_check_level = 0 self.unitarity_tol = 1e-10 # Data dumping self.dump = None self.dump_to_file = False self.apply_params() # Create the computing objects self._create_computers() self.clear() def apply_params(self, params=None): """ Set object attributes based on the dictionary (if any) passed in the instantiation, or passed as a parameter This is called during the instantiation automatically. The key value pairs are the attribute name and value Note: attributes are created if they do not exist already, and are overwritten if they do. """ if not params: params = self.params if isinstance(params, dict): self.params = params for key in params: setattr(self, key, params[key]) @property def log_level(self): return logger.level @log_level.setter def log_level(self, lvl): """ Set the log_level attribute and set the level of the logger that is call logger.setLevel(lvl) """ logger.setLevel(lvl) @property def dumping(self): """ The level of data dumping that will occur during the time evolution calculation. - NONE : No processing data dumped (Default) - SUMMARY : A summary of each time evolution will be recorded - FULL : All operators used or created in the calculation dumped - CUSTOM : Some customised level of dumping When first set to CUSTOM this is equivalent to SUMMARY. It is then up to the user to specify which operators are dumped WARNING: FULL could consume a lot of memory! """ if self.dump is None: lvl = 'NONE' else: lvl = self.dump.level return lvl @dumping.setter def dumping(self, value): if value is None: self.dump = None else: if not _is_string(value): raise TypeError("Value must be string value") lvl = value.upper() if lvl == 'NONE': self.dump = None else: if not isinstance(self.dump, qtrldump.DynamicsDump): self.dump = qtrldump.DynamicsDump(self, level=lvl) else: self.dump.level = lvl @property def dump_dir(self): if self.dump: return self.dump.dump_dir else: return None @dump_dir.setter def dump_dir(self, value): if not self.dump: self.dumping = 'SUMMARY' self.dump.dump_dir = value def _create_computers(self): """ Create the default timeslot, fidelity and propagator computers """ # The time slot computer. By default it is set to UpdateAll # can be set to DynUpdate in the configuration # (see class file for details) if self.config.tslot_type == 'DYNAMIC': self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self) else: self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self) self.prop_computer = propcomp.PropCompFrechet(self) self.fid_computer = fidcomp.FidCompTraceDiff(self) def clear(self): self.ctrl_amps = None self.evo_current = False if self.fid_computer is not None: self.fid_computer.clear() @property def num_tslots(self): if not self._timeslots_initialized: self.init_timeslots() return self._num_tslots @num_tslots.setter def num_tslots(self, value): self._num_tslots = value if self._timeslots_initialized: self._tau = None self.init_timeslots() @property def evo_time(self): if not self._timeslots_initialized: self.init_timeslots() return self._evo_time @evo_time.setter def evo_time(self, value): self._evo_time = value if self._timeslots_initialized: self._tau = None self.init_timeslots() @property def tau(self): if not self._timeslots_initialized: self.init_timeslots() return self._tau @tau.setter def tau(self, value): self._tau = value self.init_timeslots() def init_timeslots(self): """ Generate the timeslot duration array 'tau' based on the evo_time and num_tslots attributes, unless the tau attribute is already set in which case this step in ignored Generate the cumulative time array 'time' based on the tau values """ # set the time intervals to be equal timeslices of the total if # the have not been set already (as part of user config) if self._num_tslots is None: self._num_tslots = DEF_NUM_TSLOTS if self._evo_time is None: self._evo_time = DEF_EVO_TIME if self._tau is None: self._tau = np.ones(self._num_tslots, dtype='f') * \ self._evo_time/self._num_tslots else: self._num_tslots = len(self._tau) self._evo_time = np.sum(self._tau) self.time = np.zeros(self._num_tslots+1, dtype=float) # set the cumulative time by summing the time intervals for t in range(self._num_tslots): self.time[t+1] = self.time[t] + self._tau[t] self._timeslots_initialized = True def _set_memory_optimizations(self): """ Set various memory optimisation attributes based on the memory_optimization attribute If they have been set already, e.g. in apply_params then they will not be overridden here """ logger.info("Setting memory optimisations for level {}".format( self.memory_optimization)) if self.oper_dtype is None: self._choose_oper_dtype() logger.info("Internal operator data type choosen to be {}".format( self.oper_dtype)) else: logger.info("Using operator data type {}".format( self.oper_dtype)) if self.cache_phased_dyn_gen is None: if self.memory_optimization > 0: self.cache_phased_dyn_gen = False else: self.cache_phased_dyn_gen = True logger.info("phased dynamics generator caching {}".format( self.cache_phased_dyn_gen)) if self.cache_prop_grad is None: if self.memory_optimization > 0: self.cache_prop_grad = False else: self.cache_prop_grad = True logger.info("propagator gradient caching {}".format( self.cache_prop_grad)) if self.cache_dyn_gen_eigenvectors_adj is None: if self.memory_optimization > 0: self.cache_dyn_gen_eigenvectors_adj = False else: self.cache_dyn_gen_eigenvectors_adj = True logger.info("eigenvector adjoint caching {}".format( self.cache_dyn_gen_eigenvectors_adj)) if self.sparse_eigen_decomp is None: if self.memory_optimization > 1: self.sparse_eigen_decomp = True else: self.sparse_eigen_decomp = False logger.info("use sparse eigen decomp {}".format( self.sparse_eigen_decomp)) def _choose_oper_dtype(self): """ Attempt select most efficient internal operator data type """ if self.memory_optimization > 0: self.oper_dtype = Qobj else: # Method taken from Qobj.expm() # if method is not explicitly given, try to make a good choice # between sparse and dense solvers by considering the size of the # system and the number of non-zero elements. if self.time_depend_drift: dg = self.drift_dyn_gen[0] else: dg = self.drift_dyn_gen if self.time_depend_ctrl_dyn_gen: ctrls = self.ctrl_dyn_gen[0, :] else: ctrls = self.ctrl_dyn_gen for c in ctrls: dg = dg + c N = dg.data.shape[0] n = dg.data.nnz if N ** 2 < 100 * n: # large number of nonzero elements, revert to dense solver self.oper_dtype = np.ndarray elif N > 400: # large system, and quite sparse -> qutips sparse method self.oper_dtype = Qobj else: # small system, but quite sparse -> qutips sparse/dense method self.oper_dtype = np.ndarray return self.oper_dtype def _init_evo(self): """ Create the container lists / arrays for the: dynamics generations, propagators, and evolutions etc Set the time slices and cumulative time """ # check evolution operators if not self._drift_dyn_gen_checked: _check_drift_dyn_gen(self.drift_dyn_gen) if not self._ctrl_dyn_gen_checked: self.ctrl_dyn_gen = _check_ctrls_container(self.ctrl_dyn_gen) if not isinstance(self.initial, Qobj): raise TypeError("initial must be a Qobj") if not isinstance(self.target, Qobj): raise TypeError("target must be a Qobj") self.refresh_drift_attribs() self.sys_dims = self.initial.dims self.sys_shape = self.initial.shape # Set the phase application method self._init_phase() self._set_memory_optimizations() n_ts = self.num_tslots n_ctrls = self.num_ctrls if self.oper_dtype == Qobj: self._initial = self.initial self._target = self.target self._drift_dyn_gen = self.drift_dyn_gen self._ctrl_dyn_gen = self.ctrl_dyn_gen elif self.oper_dtype == np.ndarray: self._initial = self.initial.full() self._target = self.target.full() if self.time_depend_drift: self._drift_dyn_gen = [d.full() for d in self.drift_dyn_gen] else: self._drift_dyn_gen = self.drift_dyn_gen.full() if self.time_depend_ctrl_dyn_gen: self._ctrl_dyn_gen = np.empty([n_ts, n_ctrls], dtype=object) for k in range(n_ts): for j in range(n_ctrls): self._ctrl_dyn_gen[k, j] = \ self.ctrl_dyn_gen[k, j].full() else: self._ctrl_dyn_gen = [ctrl.full() for ctrl in self.ctrl_dyn_gen] elif self.oper_dtype == sp.csr_matrix: self._initial = self.initial.data self._target = self.target.data if self.time_depend_drift: self._drift_dyn_gen = [d.data for d in self.drift_dyn_gen] else: self._drift_dyn_gen = self.drift_dyn_gen.data if self.time_depend_ctrl_dyn_gen: self._ctrl_dyn_gen = np.empty([n_ts, n_ctrls], dtype=object) for k in range(n_ts): for j in range(n_ctrls): self._ctrl_dyn_gen[k, j] = \ self.ctrl_dyn_gen[k, j].data else: self._ctrl_dyn_gen = [ctrl.data for ctrl in self.ctrl_dyn_gen] else: logger.warn("Unknown option '{}' for oper_dtype. " "Assuming that internal drift, ctrls, initial and target " "have been set correctly".format(self.oper_dtype)) if self.cache_phased_dyn_gen: if self.time_depend_ctrl_dyn_gen: self._phased_ctrl_dyn_gen = np.empty([n_ts, n_ctrls], dtype=object) for k in range(n_ts): for j in range(n_ctrls): self._phased_ctrl_dyn_gen[k, j] = self._apply_phase( self._ctrl_dyn_gen[k, j]) else: self._phased_ctrl_dyn_gen = [self._apply_phase(ctrl) for ctrl in self._ctrl_dyn_gen] self._dyn_gen = [object for x in range(self.num_tslots)] if self.cache_phased_dyn_gen: self._phased_dyn_gen = [object for x in range(self.num_tslots)] self._prop = [object for x in range(self.num_tslots)] if self.prop_computer.grad_exact and self.cache_prop_grad: self._prop_grad = np.empty([self.num_tslots, self.num_ctrls], dtype=object) # Time evolution operator (forward propagation) self._fwd_evo = [object for x in range(self.num_tslots+1)] self._fwd_evo[0] = self._initial if self.fid_computer.uses_onwd_evo: # Time evolution operator (onward propagation) self._onwd_evo = [object for x in range(self.num_tslots)] if self.fid_computer.uses_onto_evo: # Onward propagation overlap with inverse target self._onto_evo = [object for x in range(self.num_tslots+1)] self._onto_evo[self.num_tslots] = self._get_onto_evo_target() if isinstance(self.prop_computer, propcomp.PropCompDiag): self._create_decomp_lists() if (self.log_level <= logging.DEBUG and isinstance(self, DynamicsUnitary)): self.unitarity_check_level = 1 if self.dump_to_file: if self.dump is None: self.dumping = 'SUMMARY' self.dump.write_to_file = True self.dump.create_dump_dir() logger.info("Dynamics dump will be written to:\n{}".format( self.dump.dump_dir)) self._evo_initialized = True @property def dyn_gen_phase(self): """ Some op that is applied to the dyn_gen before expontiating to get the propagator. See `phase_application` for how this is applied """ # Note that if this returns None then _apply_phase will never be # called return self._dyn_gen_phase @dyn_gen_phase.setter def dyn_gen_phase(self, value): self._dyn_gen_phase = value @property def phase_application(self): """ phase_application : scalar(string), default='preop' Determines how the phase is applied to the dynamics generators - 'preop' : P = expm(phase*dyn_gen) - 'postop' : P = expm(dyn_gen*phase) - 'custom' : Customised phase application The 'custom' option assumes that the _apply_phase method has been set to a custom function """ return self._phase_application @phase_application.setter def phase_application(self, value): self._set_phase_application(value) def _set_phase_application(self, value): self._config_phase_application(value) self._phase_application = value def _config_phase_application(self, ph_app=None): """ Set the appropriate function for the phase application """ err_msg = ("Invalid value '{}' for phase application. Must be either " "'preop', 'postop' or 'custom'".format(ph_app)) if ph_app is None: ph_app = self._phase_application try: ph_app = ph_app.lower() except: raise ValueError(err_msg) if ph_app == 'preop': self._apply_phase = self._apply_phase_preop elif ph_app == 'postop': self._apply_phase = self._apply_phase_postop elif ph_app == 'custom': # Do nothing, assume _apply_phase set elsewhere pass else: raise ValueError(err_msg) def _init_phase(self): if self.dyn_gen_phase is not None: self._config_phase_application() else: self.cache_phased_dyn_gen = False def _apply_phase(self, dg): """ This default method does nothing. It will be set to another method automatically if `phase_application` is 'preop' or 'postop'. It should be overridden repointed if `phase_application` is 'custom' It will never be called if `dyn_gen_phase` is None """ return dg def _apply_phase_preop(self, dg): """ Apply phasing operator to dynamics generator. This called during the propagator calculation. In this case it will be applied as phase*dg """ if hasattr(self.dyn_gen_phase, 'dot'): phased_dg = self._dyn_gen_phase.dot(dg) else: phased_dg = self._dyn_gen_phase*dg return phased_dg def _apply_phase_postop(self, dg): """ Apply phasing operator to dynamics generator. This called during the propagator calculation. In this case it will be applied as dg*phase """ if hasattr(self.dyn_gen_phase, 'dot'): phased_dg = dg.dot(self._dyn_gen_phase) else: phased_dg = dg*self._dyn_gen_phase return phased_dg def _create_decomp_lists(self): """ Create lists that will hold the eigen decomposition used in calculating propagators and gradients Note: used with PropCompDiag propagator calcs """ n_ts = self.num_tslots self._decomp_curr = [False for x in range(n_ts)] self._prop_eigen = [object for x in range(n_ts)] self._dyn_gen_eigenvectors = [object for x in range(n_ts)] if self.cache_dyn_gen_eigenvectors_adj: self._dyn_gen_eigenvectors_adj = [object for x in range(n_ts)] self._dyn_gen_factormatrix = [object for x in range(n_ts)] def initialize_controls(self, amps, init_tslots=True): """ Set the initial control amplitudes and time slices Note this must be called after the configuration is complete before any dynamics can be calculated """ if not isinstance(self.prop_computer, propcomp.PropagatorComputer): raise errors.UsageError( "No prop_computer (propagator computer) " "set. A default should be assigned by the Dynamics subclass") if not isinstance(self.tslot_computer, tslotcomp.TimeslotComputer): raise errors.UsageError( "No tslot_computer (Timeslot computer)" " set. A default should be assigned by the Dynamics class") if not isinstance(self.fid_computer, fidcomp.FidelityComputer): raise errors.UsageError( "No fid_computer (Fidelity computer)" " set. A default should be assigned by the Dynamics subclass") self.ctrl_amps = None if not self._timeslots_initialized: init_tslots = True if init_tslots: self.init_timeslots() self._init_evo() self.tslot_computer.init_comp() self.fid_computer.init_comp() self._ctrls_initialized = True self.update_ctrl_amps(amps) def check_ctrls_initialized(self): if not self._ctrls_initialized: raise errors.UsageError( "Controls not initialised. " "Ensure Dynamics.initialize_controls has been " "executed with the initial control amplitudes.") def get_amp_times(self): return self.time[:self.num_tslots] def save_amps(self, file_name=None, times=None, amps=None, verbose=False): """ Save a file with the current control amplitudes in each timeslot The first column in the file will be the start time of the slot Parameters ---------- file_name : string Name of the file If None given the def_amps_fname attribuite will be used times : List type (or string) List / array of the start times for each slot If None given this will be retrieved through get_amp_times() If 'exclude' then times will not be saved in the file, just the amplitudes amps : Array[num_tslots, num_ctrls] Amplitudes to be saved If None given the ctrl_amps attribute will be used verbose : Boolean If True then an info message will be logged """ self.check_ctrls_initialized() inctimes = True if file_name is None: file_name = self.def_amps_fname if amps is None: amps = self.ctrl_amps if times is None: times = self.get_amp_times() else: if _is_string(times): if times.lower() == 'exclude': inctimes = False else: logger.warn("Unknown option for times '{}' " "when saving amplitudes".format(times)) times = self.get_amp_times() try: if inctimes: shp = amps.shape data = np.empty([shp[0], shp[1] + 1], dtype=float) data[:, 0] = times data[:, 1:] = amps else: data = amps np.savetxt(file_name, data, delimiter='\t', fmt='%14.6g') if verbose: logger.info("Amplitudes saved to file: " + file_name) except Exception as e: logger.error("Failed to save amplitudes due to underling " "error: {}".format(e)) def update_ctrl_amps(self, new_amps): """ Determine if any amplitudes have changed. If so, then mark the timeslots as needing recalculation The actual work is completed by the compare_amps method of the timeslot computer """ if self.log_level <= logging.DEBUG_INTENSE: logger.log(logging.DEBUG_INTENSE, "Updating amplitudes...\n" "Current control amplitudes:\n" + str(self.ctrl_amps) + "\n(potenially) new amplitudes:\n" + str(new_amps)) self.tslot_computer.compare_amps(new_amps) def flag_system_changed(self): """ Flag evolution, fidelity and gradients as needing recalculation """ self.evo_current = False self.fid_computer.flag_system_changed() def get_drift_dim(self): """ Returns the size of the matrix that defines the drift dynamics that is assuming the drift is NxN, then this returns N """ if self.dyn_shape is None: self.refresh_drift_attribs() return self.dyn_shape[0] def refresh_drift_attribs(self): """Reset the dyn_shape, dyn_dims and time_depend_drift attribs""" if isinstance(self.drift_dyn_gen, (list, tuple)): d0 = self.drift_dyn_gen[0] self.time_depend_drift = True else: d0 = self.drift_dyn_gen self.time_depend_drift = False if not isinstance(d0, Qobj): raise TypeError("Unable to determine drift attributes, " "because drift_dyn_gen is not Qobj (nor list of)") self.dyn_shape = d0.shape self.dyn_dims = d0.dims def get_num_ctrls(self): """ calculate the of controls from the length of the control list sets the num_ctrls property, which can be used alternatively subsequently """ _func_deprecation("'get_num_ctrls' has been replaced by " "'num_ctrls' property") return self.num_ctrls def _get_num_ctrls(self): if not self._ctrl_dyn_gen_checked: self.ctrl_dyn_gen = _check_ctrls_container(self.ctrl_dyn_gen) self._ctrl_dyn_gen_checked = True if isinstance(self.ctrl_dyn_gen, np.ndarray): self._num_ctrls = self.ctrl_dyn_gen.shape[1] self.time_depend_ctrl_dyn_gen = True else: self._num_ctrls = len(self.ctrl_dyn_gen) return self._num_ctrls @property def num_ctrls(self): """ calculate the of controls from the length of the control list sets the num_ctrls property, which can be used alternatively subsequently """ if self._num_ctrls is None: self._num_ctrls = self._get_num_ctrls() return self._num_ctrls @property def onto_evo_target(self): if self._onto_evo_target is None: self._get_onto_evo_target() if self._onto_evo_target_qobj is None: if isinstance(self._onto_evo_target, Qobj): self._onto_evo_target_qobj = self._onto_evo_target else: rev_dims = [self.sys_dims[1], self.sys_dims[0]] self._onto_evo_target_qobj = Qobj(self._onto_evo_target, dims=rev_dims) return self._onto_evo_target_qobj def get_owd_evo_target(self): _func_deprecation("'get_owd_evo_target' has been replaced by " "'onto_evo_target' property") return self.onto_evo_target def _get_onto_evo_target(self): """ Get the inverse of the target. Used for calculating the 'onto target' evolution This is actually only relevant for unitary dynamics where the target.dag() is what is required However, for completeness, in general the inverse of the target operator is is required For state-to-state, the bra corresponding to the is required ket """ if self.target.shape[0] == self.target.shape[1]: #Target is operator targ = la.inv(self.target.full()) if self.oper_dtype == Qobj: self._onto_evo_target = Qobj(targ) elif self.oper_dtype == np.ndarray: self._onto_evo_target = targ elif self.oper_dtype == sp.csr_matrix: self._onto_evo_target = sp.csr_matrix(targ) else: targ_cls = self._target.__class__ self._onto_evo_target = targ_cls(targ) else: if self.oper_dtype == Qobj: self._onto_evo_target = self.target.dag() elif self.oper_dtype == np.ndarray: self._onto_evo_target = self.target.dag().full() elif self.oper_dtype == sp.csr_matrix: self._onto_evo_target = self.target.dag().data else: targ_cls = self._target.__class__ self._onto_evo_target = targ_cls(self.target.dag().full()) return self._onto_evo_target def combine_dyn_gen(self, k): """ Computes the dynamics generator for a given timeslot The is the combined Hamiltion for unitary systems """ _func_deprecation("'combine_dyn_gen' has been replaced by " "'_combine_dyn_gen'") self._combine_dyn_gen(k) return self._dyn_gen(k) def _combine_dyn_gen(self, k): """ Computes the dynamics generator for a given timeslot The is the combined Hamiltion for unitary systems Also applies the phase (if any required by the propagation) """ if self.time_depend_drift: dg = self._drift_dyn_gen[k] else: dg = self._drift_dyn_gen for j in range(self._num_ctrls): if self.time_depend_ctrl_dyn_gen: dg = dg + self.ctrl_amps[k, j]*self._ctrl_dyn_gen[k, j] else: dg = dg + self.ctrl_amps[k, j]*self._ctrl_dyn_gen[j] self._dyn_gen[k] = dg if self.cache_phased_dyn_gen: self._phased_dyn_gen[k] = self._apply_phase(dg) def get_dyn_gen(self, k): """ Get the combined dynamics generator for the timeslot Not implemented in the base class. Choose a subclass """ _func_deprecation("'get_dyn_gen' has been replaced by " "'_get_phased_dyn_gen'") return self._get_phased_dyn_gen(k) def _get_phased_dyn_gen(self, k): if self.dyn_gen_phase is None: return self._dyn_gen[k] else: if self._phased_dyn_gen is None: return self._apply_phase(self._dyn_gen[k]) else: return self._phased_dyn_gen[k] def get_ctrl_dyn_gen(self, j): """ Get the dynamics generator for the control Not implemented in the base class. Choose a subclass """ _func_deprecation("'get_ctrl_dyn_gen' has been replaced by " "'_get_phased_ctrl_dyn_gen'") return self._get_phased_ctrl_dyn_gen(0, j) def _get_phased_ctrl_dyn_gen(self, k, j): if self._phased_ctrl_dyn_gen is not None: if self.time_depend_ctrl_dyn_gen: return self._phased_ctrl_dyn_gen[k, j] else: return self._phased_ctrl_dyn_gen[j] else: if self.time_depend_ctrl_dyn_gen: if self.dyn_gen_phase is None: return self._ctrl_dyn_gen[k, j] else: return self._apply_phase(self._ctrl_dyn_gen[k, j]) else: if self.dyn_gen_phase is None: return self._ctrl_dyn_gen[j] else: return self._apply_phase(self._ctrl_dyn_gen[j]) @property def dyn_gen(self): """ List of combined dynamics generators (Qobj) for each timeslot """ if self._dyn_gen is not None: if self._dyn_gen_qobj is None: if self.oper_dtype == Qobj: self._dyn_gen_qobj = self._dyn_gen else: self._dyn_gen_qobj = [Qobj(dg, dims=self.dyn_dims) for dg in self._dyn_gen] return self._dyn_gen_qobj @property def prop(self): """ List of propagators (Qobj) for each timeslot """ if self._prop is not None: if self._prop_qobj is None: if self.oper_dtype == Qobj: self._prop_qobj = self._prop else: self._prop_qobj = [Qobj(dg, dims=self.dyn_dims) for dg in self._prop] return self._prop_qobj @property def prop_grad(self): """ Array of propagator gradients (Qobj) for each timeslot, control """ if self._prop_grad is not None: if self._prop_grad_qobj is None: if self.oper_dtype == Qobj: self._prop_grad_qobj = self._prop_grad else: self._prop_grad_qobj = np.empty( [self.num_tslots, self.num_ctrls], dtype=object) for k in range(self.num_tslots): for j in range(self.num_ctrls): self._prop_grad_qobj[k, j] = Qobj( self._prop_grad[k, j], dims=self.dyn_dims) return self._prop_grad_qobj def _get_prop_grad(self, k, j): if self.cache_prop_grad: prop_grad = self._prop_grad[k, j] else: prop_grad = self.prop_computer._compute_prop_grad(k, j, compute_prop = False) return prop_grad @property def evo_init2t(self): _attrib_deprecation( "'evo_init2t' has been replaced by '_fwd_evo'") return self._fwd_evo @property def fwd_evo(self): """ List of evolution operators (Qobj) from the initial to the given timeslot """ if self._fwd_evo is not None: if self._fwd_evo_qobj is None: if self.oper_dtype == Qobj: self._fwd_evo_qobj = self._fwd_evo else: self._fwd_evo_qobj = [self.initial] for k in range(1, self.num_tslots+1): self._fwd_evo_qobj.append(Qobj(self._fwd_evo[k], dims=self.sys_dims)) return self._fwd_evo_qobj def _get_full_evo(self): return self._fwd_evo[self._num_tslots] @property def full_evo(self): """Full evolution - time evolution at final time slot""" return self.fwd_evo[self.num_tslots] @property def evo_t2end(self): _attrib_deprecation( "'evo_t2end' has been replaced by '_onwd_evo'") return self._onwd_evo @property def onwd_evo(self): """ List of evolution operators (Qobj) from the initial to the given timeslot """ if self._onwd_evo is not None: if self._onwd_evo_qobj is None: if self.oper_dtype == Qobj: self._onwd_evo_qobj = self._fwd_evo else: self._onwd_evo_qobj = [Qobj(dg, dims=self.sys_dims) for dg in self._onwd_evo] return self._onwd_evo_qobj @property def evo_t2targ(self): _attrib_deprecation( "'evo_t2targ' has been replaced by '_onto_evo'") return self._onto_evo @property def onto_evo(self): """ List of evolution operators (Qobj) from the initial to the given timeslot """ if self._onto_evo is not None: if self._onto_evo_qobj is None: if self.oper_dtype == Qobj: self._onto_evo_qobj = self._onto_evo else: self._onto_evo_qobj = [] for k in range(0, self.num_tslots): self._onto_evo_qobj.append(Qobj(self._onto_evo[k], dims=self.sys_dims)) self._onto_evo_qobj.append(self.onto_evo_target) return self._onto_evo_qobj def compute_evolution(self): """ Recalculate the time evolution operators Dynamics generators (e.g. Hamiltonian) and prop (propagators) are calculated as necessary Actual work is completed by the recompute_evolution method of the timeslot computer """ # Check if values are already current, otherwise calculate all values if not self.evo_current: if self.log_level <= logging.DEBUG_VERBOSE: logger.log(logging.DEBUG_VERBOSE, "Computing evolution") self.tslot_computer.recompute_evolution() self.evo_current = True return True else: return False def _ensure_decomp_curr(self, k): """ Checks to see if the diagonalisation has been completed since the last update of the dynamics generators (after the amplitude update) If not then the diagonlisation is completed """ if self._decomp_curr is None: raise errors.UsageError("Decomp lists have not been created") if not self._decomp_curr[k]: self._spectral_decomp(k) def _spectral_decomp(self, k): """ Calculate the diagonalization of the dynamics generator generating lists of eigenvectors, propagators in the diagonalised basis, and the 'factormatrix' used in calculating the propagator gradient Not implemented in this base class, because the method is specific to the matrix type """ raise errors.UsageError("Decomposition cannot be completed by " "this class. Try a(nother) subclass") def _is_unitary(self, A): """ Checks whether operator A is unitary A can be either Qobj or ndarray """ if isinstance(A, Qobj): unitary = np.allclose(np.eye(A.shape[0]), A*A.dag().full(), atol=self.unitarity_tol) else: unitary = np.allclose(np.eye(len(A)), A.dot(A.T.conj()), atol=self.unitarity_tol) return unitary def _calc_unitary_err(self, A): if isinstance(A, Qobj): err = np.sum(abs(np.eye(A.shape[0]) - A*A.dag().full())) else: err = np.sum(abs(np.eye(len(A)) - A.dot(A.T.conj()))) return err def unitarity_check(self): """ Checks whether all propagators are unitary """ for k in range(self.num_tslots): if not self._is_unitary(self._prop[k]): logger.warning( "Progator of timeslot {} is not unitary".format(k)) class DynamicsGenMat(Dynamics): """ This sub class can be used for any system where no additional operator is applied to the dynamics generator before calculating the propagator, e.g. classical dynamics, Lindbladian """ def reset(self): Dynamics.reset(self) self.id_text = 'GEN_MAT' self.apply_params() class DynamicsUnitary(Dynamics): """ This is the subclass to use for systems with dynamics described by unitary matrices. E.g. closed systems with Hermitian Hamiltonians Note a matrix diagonalisation is used to compute the exponent The eigen decomposition is also used to calculate the propagator gradient. The method is taken from DYNAMO (see file header) Attributes ---------- drift_ham : Qobj This is the drift Hamiltonian for unitary dynamics It is mapped to drift_dyn_gen during initialize_controls ctrl_ham : List of Qobj These are the control Hamiltonians for unitary dynamics It is mapped to ctrl_dyn_gen during initialize_controls H : List of Qobj The combined drift and control Hamiltonians for each timeslot These are the dynamics generators for unitary dynamics. It is mapped to dyn_gen during initialize_controls """ def reset(self): Dynamics.reset(self) self.id_text = 'UNIT' self.drift_ham = None self.ctrl_ham = None self.H = None self._dyn_gen_phase = -1j self._phase_application = 'preop' self.apply_params() def _create_computers(self): """ Create the default timeslot, fidelity and propagator computers """ # The time slot computer. By default it is set to _UpdateAll # can be set to _DynUpdate in the configuration # (see class file for details) if self.config.tslot_type == 'DYNAMIC': self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self) else: self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self) # set the default fidelity computer self.fid_computer = fidcomp.FidCompUnitary(self) # set the default propagator computer self.prop_computer = propcomp.PropCompDiag(self) def initialize_controls(self, amplitudes, init_tslots=True): # Either the _dyn_gen or _ham names can be used # This assumes that one or other has been set in the configuration self._map_dyn_gen_to_ham() Dynamics.initialize_controls(self, amplitudes, init_tslots=init_tslots) #self.H = self._dyn_gen def _map_dyn_gen_to_ham(self): if self.drift_dyn_gen is None: self.drift_dyn_gen = self.drift_ham else: self.drift_ham = self.drift_dyn_gen if self.ctrl_dyn_gen is None: self.ctrl_dyn_gen = self.ctrl_ham else: self.ctrl_ham = self.ctrl_dyn_gen self._dyn_gen_mapped = True @property def num_ctrls(self): if not self._dyn_gen_mapped: self._map_dyn_gen_to_ham() if self._num_ctrls is None: self._num_ctrls = self._get_num_ctrls() return self._num_ctrls def _get_onto_evo_target(self): """ Get the adjoint of the target. Used for calculating the 'backward' evolution """ if self.oper_dtype == Qobj: self._onto_evo_target = self.target.dag() else: self._onto_evo_target = self._target.T.conj() return self._onto_evo_target def _spectral_decomp(self, k): """ Calculates the diagonalization of the dynamics generator generating lists of eigenvectors, propagators in the diagonalised basis, and the 'factormatrix' used in calculating the propagator gradient """ if self.oper_dtype == Qobj: H = self._dyn_gen[k] # Returns eigenvalues as array (row) # and eigenvectors as rows of an array eig_val, eig_vec = sp_eigs(H.data, H.isherm, sparse=self.sparse_eigen_decomp) eig_vec = eig_vec.T elif self.oper_dtype == np.ndarray: H = self._dyn_gen[k] # returns row vector of eigenvals, columns with the eigenvecs eig_val, eig_vec = np.linalg.eigh(H) else: if sparse: H = self._dyn_gen[k].toarray() else: H = self._dyn_gen[k] # returns row vector of eigenvals, columns with the eigenvecs eig_val, eig_vec = la.eigh(H) # assuming H is an nxn matrix, find n n = self.get_drift_dim() # Calculate the propagator in the diagonalised basis eig_val_tau = -1j*eig_val*self.tau[k] prop_eig = np.exp(eig_val_tau) # Generate the factor matrix through the differences # between each of the eigenvectors and the exponentiations # create nxn matrix where each eigen val is repeated n times # down the columns o = np.ones([n, n]) eig_val_cols = eig_val_tau*o # calculate all the differences by subtracting it from its transpose eig_val_diffs = eig_val_cols - eig_val_cols.T # repeat for the propagator prop_eig_cols = prop_eig*o prop_eig_diffs = prop_eig_cols - prop_eig_cols.T # the factor matrix is the elementwise quotient of the # differeneces between the exponentiated eigen vals and the # differences between the eigen vals # need to avoid division by zero that would arise due to denegerate # eigenvalues and the diagonals degen_mask = np.abs(eig_val_diffs) < self.fact_mat_round_prec eig_val_diffs[degen_mask] = 1 factors = prop_eig_diffs / eig_val_diffs # for degenerate eigenvalues the factor is just the exponent factors[degen_mask] = prop_eig_cols[degen_mask] # Store eigenvectors, propagator and factor matric # for use in propagator computations self._decomp_curr[k] = True if isinstance(factors, np.ndarray): self._dyn_gen_factormatrix[k] = factors else: self._dyn_gen_factormatrix[k] = np.array(factors) if self.oper_dtype == Qobj: self._prop_eigen[k] = Qobj(np.diagflat(prop_eig), dims=self.dyn_dims) self._dyn_gen_eigenvectors[k] = Qobj(eig_vec, dims=self.dyn_dims) # The _dyn_gen_eigenvectors_adj list is not used in # memory optimised modes if self._dyn_gen_eigenvectors_adj is not None: self._dyn_gen_eigenvectors_adj[k] = \ self._dyn_gen_eigenvectors[k].dag() else: self._prop_eigen[k] = np.diagflat(prop_eig) self._dyn_gen_eigenvectors[k] = eig_vec # The _dyn_gen_eigenvectors_adj list is not used in # memory optimised modes if self._dyn_gen_eigenvectors_adj is not None: self._dyn_gen_eigenvectors_adj[k] = \ self._dyn_gen_eigenvectors[k].conj().T def _get_dyn_gen_eigenvectors_adj(self, k): # The _dyn_gen_eigenvectors_adj list is not used in # memory optimised modes if self._dyn_gen_eigenvectors_adj is not None: return self._dyn_gen_eigenvectors_adj[k] else: if self.oper_dtype == Qobj: return self._dyn_gen_eigenvectors[k].dag() else: return self._dyn_gen_eigenvectors[k].conj().T def check_unitarity(self): """ Checks whether all propagators are unitary For propagators found not to be unitary, the potential underlying causes are investigated. """ for k in range(self.num_tslots): prop_unit = self._is_unitary(self._prop[k]) if not prop_unit: logger.warning( "Progator of timeslot {} is not unitary".format(k)) if not prop_unit or self.unitarity_check_level > 1: # Check Hamiltonian H = self._dyn_gen[k] if isinstance(H, Qobj): herm = H.isherm else: diff = np.abs(H.T.conj() - H) herm = False if np.any(diff > settings.atol) else True eigval_unit = self._is_unitary(self._prop_eigen[k]) eigvec_unit = self._is_unitary(self._dyn_gen_eigenvectors[k]) if self._dyn_gen_eigenvectors_adj is not None: eigvecadj_unit = self._is_unitary( self._dyn_gen_eigenvectors_adj[k]) else: eigvecadj_unit = None msg = ("prop unit: {}; H herm: {}; " "eigval unit: {}; eigvec unit: {}; " "eigvecadj_unit: {}".format( prop_unit, herm, eigval_unit, eigvec_unit, eigvecadj_unit)) logger.info(msg) class DynamicsSymplectic(Dynamics): """ Symplectic systems This is the subclass to use for systems where the dynamics is described by symplectic matrices, e.g. coupled oscillators, quantum optics Attributes ---------- omega : array[drift_dyn_gen.shape] matrix used in the calculation of propagators (time evolution) with symplectic systems. """ def reset(self): Dynamics.reset(self) self.id_text = 'SYMPL' self._omega = None self._omega_qobj = None self._phase_application = 'postop' self.grad_exact = True self.apply_params() def _create_computers(self): """ Create the default timeslot, fidelity and propagator computers """ # The time slot computer. By default it is set to _UpdateAll # can be set to _DynUpdate in the configuration # (see class file for details) if self.config.tslot_type == 'DYNAMIC': self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self) else: self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self) self.prop_computer = propcomp.PropCompFrechet(self) self.fid_computer = fidcomp.FidCompTraceDiff(self) @property def omega(self): if self._omega is None: self._get_omega() if self._omega_qobj is None: self._omega_qobj = Qobj(self._omega, dims=self.dyn_dims) return self._omega_qobj def _get_omega(self): if self._omega is None: n = self.get_drift_dim() // 2 omg = sympl.calc_omega(n) if self.oper_dtype == Qobj: self._omega = Qobj(omg, dims=self.dyn_dims) self._omega_qobj = self._omega elif self.oper_dtype == sp.csr_matrix: self._omega = sp.csr_matrix(omg) else: self._omega = omg return self._omega def _set_phase_application(self, value): Dynamics._set_phase_application(self, value) if self._evo_initialized: phase = self._get_dyn_gen_phase() if phase is not None: self._dyn_gen_phase = phase def _get_dyn_gen_phase(self): if self._phase_application == 'postop': phase = -self._get_omega() elif self._phase_application == 'preop': phase = self._get_omega() elif self._phase_application == 'custom': phase = None # Assume phase set by user else: raise ValueError("No option for phase_application " "'{}'".format(self._phase_application)) return phase @property def dyn_gen_phase(self): """ The phasing operator for the symplectic group generators usually refered to as \Omega By default this is applied as 'postop' dyn_gen*-\Omega If phase_application is 'preop' it is applied as \Omega*dyn_gen """ # Cannot be calculated until the dyn_shape is set # that is after the drift dyn gen has been set. if self._dyn_gen_phase is None: self._dyn_gen_phase = self._get_dyn_gen_phase() return self._dyn_gen_phase qutip-4.4.1/qutip/control/errors.py000066400000000000000000000077571352460343600174200ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Exception classes for the Quantum Control library """ class Error(Exception): """Base class for all qutip control exceptions""" def __str__(self): return repr(self.message) class UsageError(Error): """ A function has been used incorrectly. Most likely when a base class was used when a sub class should have been. funcname: function name where error occurred msg: Explanation """ def __init__(self, msg): self.message = msg class FunctionalError(Error): """ A function behaved in an unexpected way Attributes: funcname: function name where error occurred msg: Explanation """ def __init__(self, msg): self.message = msg class OptimizationTerminate(Error): """ Superclass for all early terminations from the optimisation algorithm """ pass class GoalAchievedTerminate(OptimizationTerminate): """ Exception raised to terminate execution when the goal has been reached during the optimisation algorithm """ def __init__(self, fid_err): self.reason = "Goal achieved" self.fid_err = fid_err class MaxWallTimeTerminate(OptimizationTerminate): """ Exception raised to terminate execution when the optimisation time has exceeded the maximum set in the config """ def __init__(self): self.reason = "Max wall time exceeded" class MaxFidFuncCallTerminate(OptimizationTerminate): """ Exception raised to terminate execution when the number of calls to the fidelity error function has exceeded the maximum """ def __init__(self): self.reason = "Number of fidelity error calls has exceeded the maximum" class GradMinReachedTerminate(OptimizationTerminate): """ Exception raised to terminate execution when the minimum gradient normal has been reached during the optimisation algorithm """ def __init__(self, gradient): self.reason = "Gradient normal minimum reached" self.gradient = gradient qutip-4.4.1/qutip/control/fidcomp.py000066400000000000000000000714261352460343600175170ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Fidelity Computer These classes calculate the fidelity error - function to be minimised and fidelity error gradient, which is used to direct the optimisation They may calculate the fidelity as an intermediary step, as in some case e.g. unitary dynamics, this is more efficient The idea is that different methods for computing the fidelity can be tried and compared using simple configuration switches. Note the methods in these classes were inspired by: DYNAMO - Dynamic Framework for Quantum Optimal Control See Machnes et.al., arXiv.1011.4874 The unitary dynamics fidelity is taken directly frm DYNAMO The other fidelity measures are extensions, and the sources are given in the class descriptions. """ import os import warnings import numpy as np import scipy.sparse as sp # import scipy.linalg as la import timeit # QuTiP from qutip import Qobj # QuTiP logging import qutip.logging_utils as logging logger = logging.get_logger() # QuTiP control modules import qutip.control.errors as errors warnings.simplefilter('always', DeprecationWarning) #turn off filter def _attrib_deprecation(message, stacklevel=3): """ Issue deprecation warning Using stacklevel=3 will ensure message refers the function calling with the deprecated parameter, """ warnings.warn(message, DeprecationWarning, stacklevel=stacklevel) def _func_deprecation(message, stacklevel=3): """ Issue deprecation warning Using stacklevel=3 will ensure message refers the function calling with the deprecated parameter, """ warnings.warn(message, DeprecationWarning, stacklevel=stacklevel) def _trace(A): """wrapper for calculating the trace""" # input is an operator (Qobj, array, sparse etc), so if isinstance(A, Qobj): return A.tr() elif isinstance(A, np.ndarray): return np.trace(A) else: #Assume A some sparse matrix return np.sum(A.diagonal()) class FidelityComputer(object): """ Base class for all Fidelity Computers. This cannot be used directly. See subclass descriptions and choose one appropriate for the application Note: this must be instantiated with a Dynamics object, that is the container for the data that the methods operate on Attributes ---------- log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN dimensional_norm : float Normalisation constant fid_norm_func : function Used to normalise the fidelity See SU and PSU options for the unitary dynamics grad_norm_func : function Used to normalise the fidelity gradient See SU and PSU options for the unitary dynamics uses_onwd_evo : boolean flag to specify whether the onwd_evo evolution operator (see Dynamics) is used by the FidelityComputer uses_onto_evo : boolean flag to specify whether the onto_evo evolution operator (see Dynamics) is used by the FidelityComputer fid_err : float Last computed value of the fidelity error fidelity : float Last computed value of the normalised fidelity fidelity_current : boolean flag to specify whether the fidelity / fid_err are based on the current amplitude values. Set False when amplitudes change fid_err_grad: array[num_tslot, num_ctrls] of float Last computed values for the fidelity error gradients wrt the control in the timeslot grad_norm : float Last computed value for the norm of the fidelity error gradients (sqrt of the sum of the squares) fid_err_grad_current : boolean flag to specify whether the fidelity / fid_err are based on the current amplitude values. Set False when amplitudes change """ def __init__(self, dynamics, params=None): self.parent = dynamics self.params = params self.reset() def reset(self): """ reset any configuration data and clear any temporarily held status data """ self.log_level = self.parent.log_level self.id_text = 'FID_COMP_BASE' self.dimensional_norm = 1.0 self.fid_norm_func = None self.grad_norm_func = None self.uses_onwd_evo = False self.uses_onto_evo = False self.apply_params() self.clear() def clear(self): """ clear any temporarily held status data """ self.fid_err = None self.fidelity = None self.fid_err_grad = None self.grad_norm = np.inf self.fidelity_current = False self.fid_err_grad_current = False self.grad_norm = 0.0 def apply_params(self, params=None): """ Set object attributes based on the dictionary (if any) passed in the instantiation, or passed as a parameter This is called during the instantiation automatically. The key value pairs are the attribute name and value Note: attributes are created if they do not exist already, and are overwritten if they do. """ if not params: params = self.params if isinstance(params, dict): self.params = params for key in params: setattr(self, key, params[key]) @property def log_level(self): return logger.level @log_level.setter def log_level(self, lvl): """ Set the log_level attribute and set the level of the logger that is call logger.setLevel(lvl) """ logger.setLevel(lvl) def init_comp(self): """ initialises the computer based on the configuration of the Dynamics """ # optionally implemented in subclass pass def get_fid_err(self): """ returns the absolute distance from the maximum achievable fidelity """ # must be implemented by subclass raise errors.UsageError( "No method defined for getting fidelity error." " Suspect base class was used where sub class should have been") def get_fid_err_gradient(self): """ Returns the normalised gradient of the fidelity error in a (nTimeslots x n_ctrls) array wrt the timeslot control amplitude """ # must be implemented by subclass raise errors.UsageError("No method defined for getting fidelity" " error gradient. Suspect base class was" " used where sub class should have been") def flag_system_changed(self): """ Flag fidelity and gradients as needing recalculation """ self.fidelity_current = False # Flag gradient as needing recalculating self.fid_err_grad_current = False @property def uses_evo_t2end(self): _attrib_deprecation( "'uses_evo_t2end' has been replaced by 'uses_onwd_evo'") return self.uses_onwd_evo @uses_evo_t2end.setter def uses_evo_t2end(self, value): _attrib_deprecation( "'uses_evo_t2end' has been replaced by 'uses_onwd_evo'") self.uses_onwd_evo = value @property def uses_evo_t2targ(self): _attrib_deprecation( "'uses_evo_t2targ' has been replaced by 'uses_onto_evo'") return self.uses_onto_evo @uses_evo_t2targ.setter def uses_evo_t2targ(self, value): _attrib_deprecation( "'uses_evo_t2targ' has been replaced by 'uses_onto_evo'") self.uses_onto_evo = value class FidCompUnitary(FidelityComputer): """ Computes fidelity error and gradient assuming unitary dynamics, e.g. closed qubit systems Note fidelity and gradient calculations were taken from DYNAMO (see file header) Attributes ---------- phase_option : string determines how global phase is treated in fidelity calculations: PSU - global phase ignored SU - global phase included fidelity_prenorm : complex Last computed value of the fidelity before it is normalised It is stored to use in the gradient normalisation calculation fidelity_prenorm_current : boolean flag to specify whether fidelity_prenorm are based on the current amplitude values. Set False when amplitudes change """ def reset(self): FidelityComputer.reset(self) self.id_text = 'UNIT' self.uses_onto_evo = True self._init_phase_option('PSU') self.apply_params() def clear(self): FidelityComputer.clear(self) self.fidelity_prenorm = None self.fidelity_prenorm_current = False def set_phase_option(self, phase_option=None): """ Deprecated - use phase_option Phase options are SU - global phase important PSU - global phase is not important """ _func_deprecation("'set_phase_option' is deprecated. " "Use phase_option property") self._init_phase_option(phase_option) @property def phase_option(self): return self._phase_option @phase_option.setter def phase_option(self, value): """ # Phase options are # SU - global phase important # PSU - global phase is not important """ self._init_phase_option(value) def _init_phase_option(self, value): self._phase_option = value if value == 'PSU': self.fid_norm_func = self.normalize_PSU self.grad_norm_func = self.normalize_gradient_PSU elif value == 'SU': self.fid_norm_func = self.normalize_SU self.grad_norm_func = self.normalize_gradient_SU elif value is None: raise errors.UsageError("phase_option cannot be set to None" " for this FidelityComputer.") else: raise errors.UsageError( "No option for phase_option '{}'".format(value)) def init_comp(self): """ Check configuration and initialise the normalisation """ if self.fid_norm_func is None or self.grad_norm_func is None: raise errors.UsageError("The phase_option must be be set" "for this fidelity computer") self.init_normalization() def flag_system_changed(self): """ Flag fidelity and gradients as needing recalculation """ FidelityComputer.flag_system_changed(self) # Flag the fidelity (prenormalisation) value as needing calculation self.fidelity_prenorm_current = False def init_normalization(self): """ Calc norm of to scale subsequent norms When considering unitary time evolution operators, this basically results in calculating the trace of the identity matrix and is hence equal to the size of the target matrix There may be situations where this is not the case, and hence it is not assumed to be so. The normalisation function called should be set to either the PSU - global phase ignored SU - global phase respected """ dyn = self.parent self.dimensional_norm = 1.0 self.dimensional_norm = \ self.fid_norm_func(dyn.target.dag()*dyn.target) def normalize_SU(self, A): """ """ try: if A.shape[0] == A.shape[1]: # input is an operator (Qobj, array, sparse etc), so norm = _trace(A) else: raise TypeError("Cannot compute trace (not square)") except: # assume input is already scalar and hence assumed # to be the prenormalised scalar value, e.g. fidelity norm = A return np.real(norm) / self.dimensional_norm def normalize_gradient_SU(self, grad): """ Normalise the gradient matrix passed as grad This SU version respects global phase """ grad_normalized = np.real(grad) / self.dimensional_norm return grad_normalized def normalize_PSU(self, A): """ """ try: if A.shape[0] == A.shape[1]: # input is an operator (Qobj, array, sparse etc), so norm = _trace(A) else: raise TypeError("Cannot compute trace (not square)") except: # assume input is already scalar and hence assumed # to be the prenormalised scalar value, e.g. fidelity norm = A return np.abs(norm) / self.dimensional_norm def normalize_gradient_PSU(self, grad): """ Normalise the gradient matrix passed as grad This PSU version is independent of global phase """ fid_pn = self.get_fidelity_prenorm() grad_normalized = np.real(grad * np.exp(-1j * np.angle(fid_pn)) / self.dimensional_norm) return grad_normalized def get_fid_err(self): """ Gets the absolute error in the fidelity """ return np.abs(1 - self.get_fidelity()) def get_fidelity(self): """ Gets the appropriately normalised fidelity value The normalisation is determined by the fid_norm_func pointer which should be set in the config """ if not self.fidelity_current: self.fidelity = \ self.fid_norm_func(self.get_fidelity_prenorm()) self.fidelity_current = True if self.log_level <= logging.DEBUG: logger.debug("Fidelity (normalised): {}".format(self.fidelity)) return self.fidelity def get_fidelity_prenorm(self): """ Gets the current fidelity value prior to normalisation Note the gradient function uses this value The value is cached, because it is used in the gradient calculation """ if not self.fidelity_prenorm_current: dyn = self.parent k = dyn.tslot_computer._get_timeslot_for_fidelity_calc() dyn.compute_evolution() if dyn.oper_dtype == Qobj: f = (dyn._onto_evo[k]*dyn._fwd_evo[k]).tr() else: f = _trace(dyn._onto_evo[k].dot(dyn._fwd_evo[k])) self.fidelity_prenorm = f self.fidelity_prenorm_current = True if dyn.stats is not None: dyn.stats.num_fidelity_computes += 1 if self.log_level <= logging.DEBUG: logger.debug("Fidelity (pre normalisation): {}".format( self.fidelity_prenorm)) return self.fidelity_prenorm def get_fid_err_gradient(self): """ Returns the normalised gradient of the fidelity error in a (nTimeslots x n_ctrls) array The gradients are cached in case they are requested mutliple times between control updates (although this is not typically found to happen) """ if not self.fid_err_grad_current: dyn = self.parent grad_prenorm = self.compute_fid_grad() if self.log_level <= logging.DEBUG_INTENSE: logger.log(logging.DEBUG_INTENSE, "pre-normalised fidelity " "gradients:\n{}".format(grad_prenorm)) # AJGP: Note this check should not be necessary if dynamics are # unitary. However, if they are not then this gradient # can still be used, however the interpretation is dubious if self.get_fidelity() >= 1: self.fid_err_grad = self.grad_norm_func(grad_prenorm) else: self.fid_err_grad = -self.grad_norm_func(grad_prenorm) self.fid_err_grad_current = True if dyn.stats is not None: dyn.stats.num_grad_computes += 1 self.grad_norm = np.sqrt(np.sum(self.fid_err_grad**2)) if self.log_level <= logging.DEBUG_INTENSE: logger.log(logging.DEBUG_INTENSE, "Normalised fidelity error " "gradients:\n{}".format(self.fid_err_grad)) if self.log_level <= logging.DEBUG: logger.debug("Gradient (sum sq norm): " "{} ".format(self.grad_norm)) return self.fid_err_grad def compute_fid_grad(self): """ Calculates exact gradient of function wrt to each timeslot control amplitudes. Note these gradients are not normalised These are returned as a (nTimeslots x n_ctrls) array """ dyn = self.parent n_ctrls = dyn.num_ctrls n_ts = dyn.num_tslots # create n_ts x n_ctrls zero array for grad start point grad = np.zeros([n_ts, n_ctrls], dtype=complex) dyn.tslot_computer.flag_all_calc_now() dyn.compute_evolution() # loop through all ctrl timeslots calculating gradients time_st = timeit.default_timer() for j in range(n_ctrls): for k in range(n_ts): fwd_evo = dyn._fwd_evo[k] onto_evo = dyn._onto_evo[k+1] if dyn.oper_dtype == Qobj: g = (onto_evo*dyn._get_prop_grad(k, j)*fwd_evo).tr() else: g = _trace(onto_evo.dot( dyn._get_prop_grad(k, j)).dot(fwd_evo)) grad[k, j] = g if dyn.stats is not None: dyn.stats.wall_time_gradient_compute += \ timeit.default_timer() - time_st return grad class FidCompTraceDiff(FidelityComputer): """ Computes fidelity error and gradient for general system dynamics by calculating the the fidelity error as the trace of the overlap of the difference between the target and evolution resulting from the pulses with the transpose of the same. This should provide a distance measure for dynamics described by matrices Note the gradient calculation is taken from: 'Robust quantum gates for open systems via optimal control: Markovian versus non-Markovian dynamics' Frederik F Floether, Pierre de Fouquieres, and Sophie G Schirmer Attributes ---------- scale_factor : float The fidelity error calculated is of some arbitary scale. This factor can be used to scale the fidelity error such that it may represent some physical measure If None is given then it is caculated as 1/2N, where N is the dimension of the drift, when the Dynamics are initialised. """ def reset(self): FidelityComputer.reset(self) self.id_text = 'TRACEDIFF' self.scale_factor = None self.uses_onwd_evo = True if not self.parent.prop_computer.grad_exact: raise errors.UsageError( "This FidelityComputer can only be" " used with an exact gradient PropagatorComputer.") self.apply_params() def init_comp(self): """ initialises the computer based on the configuration of the Dynamics Calculates the scale_factor is not already set """ if self.scale_factor is None: self.scale_factor = 1.0 / (2.0*self.parent.get_drift_dim()) if self.log_level <= logging.DEBUG: logger.debug("Scale factor calculated as {}".format( self.scale_factor)) def get_fid_err(self): """ Gets the absolute error in the fidelity """ if not self.fidelity_current: dyn = self.parent dyn.compute_evolution() n_ts = dyn.num_tslots evo_final = dyn._fwd_evo[n_ts] evo_f_diff = dyn._target - evo_final if self.log_level <= logging.DEBUG_VERBOSE: logger.log(logging.DEBUG_VERBOSE, "Calculating TraceDiff " "fidelity...\n Target:\n{}\n Evo final:\n{}\n" "Evo final diff:\n{}".format(dyn._target, evo_final, evo_f_diff)) # Calculate the fidelity error using the trace difference norm # Note that the value should have not imagnary part, so using # np.real, just avoids the complex casting warning if dyn.oper_dtype == Qobj: self.fid_err = self.scale_factor*np.real( (evo_f_diff.dag()*evo_f_diff).tr()) else: self.fid_err = self.scale_factor*np.real(_trace( evo_f_diff.conj().T.dot(evo_f_diff))) if np.isnan(self.fid_err): self.fid_err = np.Inf if dyn.stats is not None: dyn.stats.num_fidelity_computes += 1 self.fidelity_current = True if self.log_level <= logging.DEBUG: logger.debug("Fidelity error: {}".format(self.fid_err)) return self.fid_err def get_fid_err_gradient(self): """ Returns the normalised gradient of the fidelity error in a (nTimeslots x n_ctrls) array The gradients are cached in case they are requested mutliple times between control updates (although this is not typically found to happen) """ if not self.fid_err_grad_current: dyn = self.parent self.fid_err_grad = self.compute_fid_err_grad() self.fid_err_grad_current = True if dyn.stats is not None: dyn.stats.num_grad_computes += 1 self.grad_norm = np.sqrt(np.sum(self.fid_err_grad**2)) if self.log_level <= logging.DEBUG_INTENSE: logger.log(logging.DEBUG_INTENSE, "fidelity error gradients:\n" "{}".format(self.fid_err_grad)) if self.log_level <= logging.DEBUG: logger.debug("Gradient norm: " "{} ".format(self.grad_norm)) return self.fid_err_grad def compute_fid_err_grad(self): """ Calculate exact gradient of the fidelity error function wrt to each timeslot control amplitudes. Uses the trace difference norm fidelity These are returned as a (nTimeslots x n_ctrls) array """ dyn = self.parent n_ctrls = dyn.num_ctrls n_ts = dyn.num_tslots # create n_ts x n_ctrls zero array for grad start point grad = np.zeros([n_ts, n_ctrls]) dyn.tslot_computer.flag_all_calc_now() dyn.compute_evolution() # loop through all ctrl timeslots calculating gradients time_st = timeit.default_timer() evo_final = dyn._fwd_evo[n_ts] evo_f_diff = dyn._target - evo_final for j in range(n_ctrls): for k in range(n_ts): fwd_evo = dyn._fwd_evo[k] if dyn.oper_dtype == Qobj: evo_grad = dyn._get_prop_grad(k, j)*fwd_evo if k+1 < n_ts: evo_grad = dyn._onwd_evo[k+1]*evo_grad # Note that the value should have not imagnary part, so # using np.real, just avoids the complex casting warning g = -2*self.scale_factor*np.real( (evo_f_diff.dag()*evo_grad).tr()) else: evo_grad = dyn._get_prop_grad(k, j).dot(fwd_evo) if k+1 < n_ts: evo_grad = dyn._onwd_evo[k+1].dot(evo_grad) g = -2*self.scale_factor*np.real(_trace( evo_f_diff.conj().T.dot(evo_grad))) if np.isnan(g): g = np.Inf grad[k, j] = g if dyn.stats is not None: dyn.stats.wall_time_gradient_compute += \ timeit.default_timer() - time_st return grad class FidCompTraceDiffApprox(FidCompTraceDiff): """ As FidCompTraceDiff, except uses the finite difference method to compute approximate gradients Attributes ---------- epsilon : float control amplitude offset to use when approximating the gradient wrt a timeslot control amplitude """ def reset(self): FidelityComputer.reset(self) self.id_text = 'TDAPPROX' self.uses_onwd_evo = True self.scale_factor = None self.epsilon = 0.001 self.apply_params() def compute_fid_err_grad(self): """ Calculates gradient of function wrt to each timeslot control amplitudes. Note these gradients are not normalised They are calulated These are returned as a (nTimeslots x n_ctrls) array """ dyn = self.parent prop_comp = dyn.prop_computer n_ctrls = dyn.num_ctrls n_ts = dyn.num_tslots if self.log_level >= logging.DEBUG: logger.debug("Computing fidelity error gradient") # create n_ts x n_ctrls zero array for grad start point grad = np.zeros([n_ts, n_ctrls]) dyn.tslot_computer.flag_all_calc_now() dyn.compute_evolution() curr_fid_err = self.get_fid_err() # loop through all ctrl timeslots calculating gradients time_st = timeit.default_timer() for j in range(n_ctrls): for k in range(n_ts): fwd_evo = dyn._fwd_evo[k] prop_eps = prop_comp._compute_diff_prop(k, j, self.epsilon) if dyn.oper_dtype == Qobj: evo_final_eps = fwd_evo*prop_eps if k+1 < n_ts: evo_final_eps = evo_final_eps*dyn._onwd_evo[k+1] evo_f_diff_eps = dyn._target - evo_final_eps # Note that the value should have not imagnary part, so # using np.real, just avoids the complex casting warning fid_err_eps = self.scale_factor*np.real( (evo_f_diff_eps.dag()*evo_f_diff_eps).tr()) else: evo_final_eps = fwd_evo.dot(prop_eps) if k+1 < n_ts: evo_final_eps = evo_final_eps.dot(dyn._onwd_evo[k+1]) evo_f_diff_eps = dyn._target - evo_final_eps fid_err_eps = self.scale_factor*np.real(_trace( evo_f_diff_eps.conj().T.dot(evo_f_diff_eps))) g = (fid_err_eps - curr_fid_err)/self.epsilon if np.isnan(g): g = np.Inf grad[k, j] = g if dyn.stats is not None: dyn.stats.wall_time_gradient_compute += \ timeit.default_timer() - time_st return grad qutip-4.4.1/qutip/control/grape.py000066400000000000000000000470631352460343600171740ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ This module contains functions that implement the GRAPE algorithm for calculating pulse sequences for quantum systems. """ __all__ = ['plot_grape_control_fields', 'grape_unitary', 'cy_grape_unitary', 'grape_unitary_adaptive'] import warnings import time import numpy as np from scipy.interpolate import interp1d import scipy.sparse as sp from qutip.qobj import Qobj from qutip.ui.progressbar import BaseProgressBar from qutip.control.cy_grape import cy_overlap, cy_grape_inner from qutip.qip.gates import gate_sequence_product import qutip.logging_utils logger = qutip.logging_utils.get_logger() class GRAPEResult: """ Class for representing the result of a GRAPE simulation. Attributes ---------- u : array GRAPE control pulse matrix. H_t : time-dependent Hamiltonian The time-dependent Hamiltonian that realize the GRAPE pulse sequence. U_f : Qobj The final unitary transformation that is realized by the evolution of the system with the GRAPE generated pulse sequences. """ def __init__(self, u=None, H_t=None, U_f=None): self.u = u self.H_t = H_t self.U_f = U_f def plot_grape_control_fields(times, u, labels, uniform_axes=False): """ Plot a series of plots showing the GRAPE control fields given in the given control pulse matrix u. Parameters ---------- times : array Time coordinate array. u : array Control pulse matrix. labels : list List of labels for each control pulse sequence in the control pulse matrix. uniform_axes : bool Whether or not to plot all pulse sequences using the same y-axis scale. """ import matplotlib.pyplot as plt R, J, M = u.shape fig, axes = plt.subplots(J, 1, figsize=(8, 2 * J), squeeze=False) y_max = abs(u).max() for r in range(R): for j in range(J): if r == R - 1: lw, lc, alpha = 2.0, 'k', 1.0 axes[j, 0].set_ylabel(labels[j], fontsize=18) axes[j, 0].set_xlabel(r'$t$', fontsize=18) axes[j, 0].set_xlim(0, times[-1]) else: lw, lc, alpha = 0.5, 'b', 0.25 axes[j, 0].step(times, u[r, j, :], lw=lw, color=lc, alpha=alpha) if uniform_axes: axes[j, 0].set_ylim(-y_max, y_max) fig.tight_layout() return fig, axes def _overlap(A, B): return (A.dag() * B).tr() / A.shape[0] # return cy_overlap(A.data, B.data) def grape_unitary(U, H0, H_ops, R, times, eps=None, u_start=None, u_limits=None, interp_kind='linear', use_interp=False, alpha=None, beta=None, phase_sensitive=True, progress_bar=BaseProgressBar()): """ Calculate control pulses for the Hamiltonian operators in H_ops so that the unitary U is realized. Experimental: Work in progress. Parameters ---------- U : Qobj Target unitary evolution operator. H0 : Qobj Static Hamiltonian (that cannot be tuned by the control fields). H_ops: list of Qobj A list of operators that can be tuned in the Hamiltonian via the control fields. R : int Number of GRAPE iterations. time : array / list Array of time coordinates for control pulse evalutation. u_start : array Optional array with initial control pulse values. Returns ------- Instance of GRAPEResult, which contains the control pulses calculated with GRAPE, a time-dependent Hamiltonian that is defined by the control pulses, as well as the resulting propagator. """ if eps is None: eps = 0.1 * (2 * np.pi) / (times[-1]) M = len(times) J = len(H_ops) u = np.zeros((R, J, M)) if u_limits and len(u_limits) != 2: raise ValueError("u_limits must be a list with two values") if u_limits: warnings.warn("Caution: Using experimental feature u_limits") if u_limits and u_start: # make sure that no values in u0 violates the u_limits conditions u_start = np.array(u_start) u_start[u_start < u_limits[0]] = u_limits[0] u_start[u_start > u_limits[1]] = u_limits[1] if u_start is not None: for idx, u0 in enumerate(u_start): u[0, idx, :] = u0 if beta: warnings.warn("Causion: Using experimental feature time-penalty") progress_bar.start(R) for r in range(R - 1): progress_bar.update(r) dt = times[1] - times[0] if use_interp: ip_funcs = [interp1d(times, u[r, j, :], kind=interp_kind, bounds_error=False, fill_value=u[r, j, -1]) for j in range(J)] def _H_t(t, args=None): return H0 + sum([float(ip_funcs[j](t)) * H_ops[j] for j in range(J)]) U_list = [(-1j * _H_t(times[idx]) * dt).expm() for idx in range(M-1)] else: def _H_idx(idx): return H0 + sum([u[r, j, idx] * H_ops[j] for j in range(J)]) U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)] U_f_list = [] U_b_list = [] U_f = 1 U_b = 1 for n in range(M - 1): U_f = U_list[n] * U_f U_f_list.append(U_f) U_b_list.insert(0, U_b) U_b = U_list[M - 2 - n].dag() * U_b for j in range(J): for m in range(M-1): P = U_b_list[m] * U Q = 1j * dt * H_ops[j] * U_f_list[m] if phase_sensitive: du = - _overlap(P, Q) else: du = - 2 * _overlap(P, Q) * _overlap(U_f_list[m], P) if alpha: # penalty term for high power control signals u du += -2 * alpha * u[r, j, m] * dt if beta: # penalty term for late control signals u du += -2 * beta * m * u[r, j, m] * dt u[r + 1, j, m] = u[r, j, m] + eps * du.real if u_limits: if u[r + 1, j, m] < u_limits[0]: u[r + 1, j, m] = u_limits[0] elif u[r + 1, j, m] > u_limits[1]: u[r + 1, j, m] = u_limits[1] u[r + 1, j, -1] = u[r + 1, j, -2] if use_interp: ip_funcs = [interp1d(times, u[R - 1, j, :], kind=interp_kind, bounds_error=False, fill_value=u[R - 1, j, -1]) for j in range(J)] H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)] for j in range(J)] else: H_td_func = [H0] + [[H_ops[j], u[-1, j, :]] for j in range(J)] progress_bar.finished() # return U_f_list[-1], H_td_func, u return GRAPEResult(u=u, U_f=U_f_list[-1], H_t=H_td_func) def cy_grape_unitary(U, H0, H_ops, R, times, eps=None, u_start=None, u_limits=None, interp_kind='linear', use_interp=False, alpha=None, beta=None, phase_sensitive=True, progress_bar=BaseProgressBar()): """ Calculate control pulses for the Hamitonian operators in H_ops so that the unitary U is realized. Experimental: Work in progress. Parameters ---------- U : Qobj Target unitary evolution operator. H0 : Qobj Static Hamiltonian (that cannot be tuned by the control fields). H_ops: list of Qobj A list of operators that can be tuned in the Hamiltonian via the control fields. R : int Number of GRAPE iterations. time : array / list Array of time coordinates for control pulse evalutation. u_start : array Optional array with initial control pulse values. Returns ------- Instance of GRAPEResult, which contains the control pulses calculated with GRAPE, a time-dependent Hamiltonian that is defined by the control pulses, as well as the resulting propagator. """ if eps is None: eps = 0.1 * (2 * np.pi) / (times[-1]) M = len(times) J = len(H_ops) u = np.zeros((R, J, M)) H_ops_data = [H_op.data for H_op in H_ops] if u_limits and len(u_limits) != 2: raise ValueError("u_limits must be a list with two values") if u_limits: warnings.warn("Causion: Using experimental feature u_limits") if u_limits and u_start: # make sure that no values in u0 violates the u_limits conditions u_start = np.array(u_start) u_start[u_start < u_limits[0]] = u_limits[0] u_start[u_start > u_limits[1]] = u_limits[1] if u_limits: use_u_limits = 1 u_min = u_limits[0] u_max = u_limits[1] else: use_u_limits = 0 u_min = 0.0 u_max = 0.0 if u_start is not None: for idx, u0 in enumerate(u_start): u[0, idx, :] = u0 if beta: warnings.warn("Causion: Using experimental feature time-penalty") alpha_val = alpha if alpha else 0.0 beta_val = beta if beta else 0.0 progress_bar.start(R) for r in range(R - 1): progress_bar.update(r) dt = times[1] - times[0] if use_interp: ip_funcs = [interp1d(times, u[r, j, :], kind=interp_kind, bounds_error=False, fill_value=u[r, j, -1]) for j in range(J)] def _H_t(t, args=None): return H0 + sum([float(ip_funcs[j](t)) * H_ops[j] for j in range(J)]) U_list = [(-1j * _H_t(times[idx]) * dt).expm().data for idx in range(M-1)] else: def _H_idx(idx): return H0 + sum([u[r, j, idx] * H_ops[j] for j in range(J)]) U_list = [(-1j * _H_idx(idx) * dt).expm().data for idx in range(M-1)] U_f_list = [] U_b_list = [] U_f = 1 U_b = sp.eye(*(U.shape)) for n in range(M - 1): U_f = U_list[n] * U_f U_f_list.append(U_f) U_b_list.insert(0, U_b) U_b = U_list[M - 2 - n].T.conj().tocsr() * U_b cy_grape_inner(U.data, u, r, J, M, U_b_list, U_f_list, H_ops_data, dt, eps, alpha_val, beta_val, phase_sensitive, use_u_limits, u_min, u_max) if use_interp: ip_funcs = [interp1d(times, u[R - 1, j, :], kind=interp_kind, bounds_error=False, fill_value=u[R - 1, j, -1]) for j in range(J)] H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)] for j in range(J)] else: H_td_func = [H0] + [[H_ops[j], u[-1, j, :]] for j in range(J)] progress_bar.finished() return GRAPEResult(u=u, U_f=Qobj(U_f_list[-1], dims=U.dims), H_t=H_td_func) def grape_unitary_adaptive(U, H0, H_ops, R, times, eps=None, u_start=None, u_limits=None, interp_kind='linear', use_interp=False, alpha=None, beta=None, phase_sensitive=False, overlap_terminate=1.0, progress_bar=BaseProgressBar()): """ Calculate control pulses for the Hamiltonian operators in H_ops so that the unitary U is realized. Experimental: Work in progress. Parameters ---------- U : Qobj Target unitary evolution operator. H0 : Qobj Static Hamiltonian (that cannot be tuned by the control fields). H_ops: list of Qobj A list of operators that can be tuned in the Hamiltonian via the control fields. R : int Number of GRAPE iterations. time : array / list Array of time coordinates for control pulse evalutation. u_start : array Optional array with initial control pulse values. Returns ------- Instance of GRAPEResult, which contains the control pulses calculated with GRAPE, a time-dependent Hamiltonian that is defined by the control pulses, as well as the resulting propagator. """ if eps is None: eps = 0.1 * (2 * np.pi) / (times[-1]) eps_vec = np.array([eps / 2, eps, 2 * eps]) eps_log = np.zeros(R) overlap_log = np.zeros(R) best_k = 0 _k_overlap = np.array([0.0, 0.0, 0.0]) M = len(times) J = len(H_ops) K = len(eps_vec) Uf = [None for _ in range(K)] u = np.zeros((R, J, M, K)) if u_limits and len(u_limits) != 2: raise ValueError("u_limits must be a list with two values") if u_limits: warnings.warn("Causion: Using experimental feature u_limits") if u_limits and u_start: # make sure that no values in u0 violates the u_limits conditions u_start = np.array(u_start) u_start[u_start < u_limits[0]] = u_limits[0] u_start[u_start > u_limits[1]] = u_limits[1] if u_start is not None: for idx, u0 in enumerate(u_start): for k in range(K): u[0, idx, :, k] = u0 if beta: warnings.warn("Causion: Using experimental feature time-penalty") if phase_sensitive: _fidelity_function = lambda x: x else: _fidelity_function = lambda x: abs(x) ** 2 best_k = 1 _r = 0 _prev_overlap = 0 progress_bar.start(R) for r in range(R - 1): progress_bar.update(r) _r = r eps_log[r] = eps_vec[best_k] logger.debug("eps_vec: {}".format(eps_vec)) _t0 = time.time() dt = times[1] - times[0] if use_interp: ip_funcs = [interp1d(times, u[r, j, :, best_k], kind=interp_kind, bounds_error=False, fill_value=u[r, j, -1, best_k]) for j in range(J)] def _H_t(t, args=None): return H0 + sum([float(ip_funcs[j](t)) * H_ops[j] for j in range(J)]) U_list = [(-1j * _H_t(times[idx]) * dt).expm() for idx in range(M-1)] else: def _H_idx(idx): return H0 + sum([u[r, j, idx, best_k] * H_ops[j] for j in range(J)]) U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)] logger.debug("Time 1: %fs" % (time.time() - _t0)) _t0 = time.time() U_f_list = [] U_b_list = [] U_f = 1 U_b = 1 for m in range(M - 1): U_f = U_list[m] * U_f U_f_list.append(U_f) U_b_list.insert(0, U_b) U_b = U_list[M - 2 - m].dag() * U_b logger.debug("Time 2: %fs" % (time.time() - _t0)) _t0 = time.time() for j in range(J): for m in range(M-1): P = U_b_list[m] * U Q = 1j * dt * H_ops[j] * U_f_list[m] if phase_sensitive: du = - cy_overlap(P.data, Q.data) else: du = (- 2 * cy_overlap(P.data, Q.data) * cy_overlap(U_f_list[m].data, P.data)) if alpha: # penalty term for high power control signals u du += -2 * alpha * u[r, j, m, best_k] * dt if beta: # penalty term for late control signals u du += -2 * beta * k ** 2 * u[r, j, k] * dt for k, eps_val in enumerate(eps_vec): u[r + 1, j, m, k] = u[r, j, m, k] + eps_val * du.real if u_limits: if u[r + 1, j, m, k] < u_limits[0]: u[r + 1, j, m, k] = u_limits[0] elif u[r + 1, j, m, k] > u_limits[1]: u[r + 1, j, m, k] = u_limits[1] u[r + 1, j, -1, :] = u[r + 1, j, -2, :] logger.debug("Time 3: %fs" % (time.time() - _t0)) _t0 = time.time() for k, eps_val in enumerate(eps_vec): def _H_idx(idx): return H0 + sum([u[r + 1, j, idx, k] * H_ops[j] for j in range(J)]) U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)] Uf[k] = gate_sequence_product(U_list) _k_overlap[k] = _fidelity_function(cy_overlap(Uf[k].data, U.data)).real best_k = np.argmax(_k_overlap) logger.debug("k_overlap: ", _k_overlap, best_k) if _prev_overlap > _k_overlap[best_k]: logger.debug("Regression, stepping back with smaller eps.") u[r + 1, :, :, :] = u[r, :, :, :] eps_vec /= 2 else: if best_k == 0: eps_vec /= 2 elif best_k == 2: eps_vec *= 2 _prev_overlap = _k_overlap[best_k] overlap_log[r] = _k_overlap[best_k] if overlap_terminate < 1.0: if _k_overlap[best_k] > overlap_terminate: logger.info("Reached target fidelity, terminating.") break logger.debug("Time 4: %fs" % (time.time() - _t0)) _t0 = time.time() if use_interp: ip_funcs = [interp1d(times, u[_r, j, :, best_k], kind=interp_kind, bounds_error=False, fill_value=u[R - 1, j, -1]) for j in range(J)] H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)] for j in range(J)] else: H_td_func = [H0] + [[H_ops[j], u[_r, j, :, best_k]] for j in range(J)] progress_bar.finished() result = GRAPEResult(u=u[:_r, :, :, best_k], U_f=Uf[best_k], H_t=H_td_func) result.eps = eps_log result.overlap = overlap_log return result qutip-4.4.1/qutip/control/io.py000066400000000000000000000066031352460343600165000ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2016 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import os import errno def create_dir(dir_name, desc='output'): """ Checks if the given directory exists, if not it is created Returns ------- dir_ok : boolean True if directory exists (previously or created) False if failed to create the directory dir_name : string Path to the directory, which may be been made absolute msg : string Error msg if directory creation failed """ dir_ok = True if '~' in dir_name: dir_name = os.path.expanduser(dir_name) elif not os.path.isabs(dir_name): # Assume relative path from cwd given dir_name = os.path.abspath(dir_name) msg = "{} directory is ready".format(desc) errmsg = "Failed to create {} directory:\n{}\n".format(desc, dir_name) if os.path.exists(dir_name): if os.path.isfile(dir_name): dir_ok = False errmsg += "A file already exists with the same name" else: try: os.makedirs(dir_name) msg += ("directory {} created " "(recursively)".format(dir_name)) except OSError as e: if e.errno == errno.EEXIST: msg += ("Assume directory {} created " "(recursively) by some other process. ".format(dir_name)) else: dir_ok = False errmsg += "Underling error (makedirs) :({}) {}".format( type(e).__name__, e) if dir_ok: return dir_ok, dir_name, msg else: return dir_ok, dir_name, errmsg qutip-4.4.1/qutip/control/loadparams.py000066400000000000000000000213141352460343600202100ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Loads parameters for config, termconds, dynamics and Optimiser objects from a parameter (ini) file with appropriate sections and options, these being Sections: optimconfig, termconds, dynamics, optimizer The options are assumed to be properties for these classes Note that new attributes will be created, even if they are not usually defined for that object """ import numpy as np try: # Python 3 from configparser import ConfigParser except: # Python 2 from ConfigParser import SafeConfigParser # QuTiP logging from qutip import Qobj import qutip.logging_utils as logging logger = logging.get_logger() def _is_string(var): try: if isinstance(var, basestring): return True except NameError: try: if isinstance(var, str): return True except: return False except: return False return False def load_parameters(file_name, config=None, term_conds=None, dynamics=None, optim=None, pulsegen=None, obj=None, section=None): """ Import parameters for the optimisation objects Will throw a ValueError if file_name does not exist """ try: parser = SafeConfigParser() except: parser = ConfigParser() readFiles = parser.read(str(file_name)) if len(readFiles) == 0: raise ValueError("Parameter file '{}' not found".format(file_name)) if config is not None: s = 'optimconfig' try: attr_names = parser.options(s) for a in attr_names: set_param(parser, s, a, config, a) except Exception as e: logger.warn("Unable to load {} parameters:({}) {}".format( s, type(e).__name__, e)) if term_conds is not None: s = 'termconds' try: attr_names = parser.options(s) for a in attr_names: set_param(parser, s, a, term_conds, a) except Exception as e: logger.warn("Unable to load {} parameters:({}) {}".format( s, type(e).__name__, e)) if dynamics is not None: s = 'dynamics' try: attr_names = parser.options(s) for a in attr_names: set_param(parser, s, a, dynamics, a) except Exception as e: logger.warn("Unable to load {} parameters:({}) {}".format( s, type(e).__name__, e)) if optim is not None: s = 'optimizer' try: attr_names = parser.options(s) for a in attr_names: set_param(parser, s, a, optim, a) except Exception as e: logger.warn("Unable to load {} parameters:({}) {}".format( s, type(e).__name__, e)) if pulsegen is not None: s = 'pulsegen' try: attr_names = parser.options(s) for a in attr_names: set_param(parser, s, a, pulsegen, a) except Exception as e: logger.warn("Unable to load {} parameters:({}) {}".format( s, type(e).__name__, e)) if obj is not None: if not _is_string(section): raise ValueError("Section name must be given when loading " "parameters of general object") s = section try: attr_names = parser.options(s) for a in attr_names: set_param(parser, s, a, obj, a) except Exception as e: logger.warn("Unable to load {} parameters:({}) {}".format( s, type(e).__name__, e)) def set_param(parser, section, option, obj, attrib_name): """ Set the object attribute value based on the option value from the config file. If the attribute exists already, then its datatype is used to call the appropriate parser.get method Otherwise the parameter is assumed to be a string """ val = parser.get(section, attrib_name) dtype = None if hasattr(obj, attrib_name): a = getattr(obj, attrib_name) dtype = type(a) else: logger.warn("Unable to load parameter {}.{}\n" "Attribute does not exist".format(section, attrib_name)) return if isinstance(a, Qobj): try: q = Qobj(eval(val)) except: raise ValueError("Value '{}' cannot be used to generate a Qobj" " in parameter file [{}].{}".format( val, section, option)) setattr(obj, attrib_name, q) elif isinstance(a, np.ndarray): try: arr = np.array(eval(val), dtype=a.dtype) except: raise ValueError("Value '{}' cannot be used to generate an ndarray" " in parameter file [{}].{}".format( val, section, option)) setattr(obj, attrib_name, arr) elif isinstance(a, list): try: l = list(eval(val)) except: raise ValueError("Value '{}' cannot be used to generate a list" " in parameter file [{}].{}".format( val, section, option)) setattr(obj, attrib_name, l) elif dtype == float: try: f = parser.getfloat(section, attrib_name) except: try: f = eval(val) except: raise ValueError( "Value '{}' cannot be cast or evaluated as a " "float in parameter file [{}].{}".format( val, section, option)) setattr(obj, attrib_name, f) elif dtype == complex: try: c = complex(val) except: raise ValueError("Value '{}' cannot be cast as complex" " in parameter file [{}].{}".format( val, section, option)) setattr(obj, attrib_name, c) elif dtype == int: try: i = parser.getint(section, attrib_name) except: raise ValueError("Value '{}' cannot be cast as an int" " in parameter file [{}].{}".format( val, section, option)) setattr(obj, attrib_name, i) elif dtype == bool: try: b = parser.getboolean(section, attrib_name) except: raise ValueError("Value '{}' cannot be cast as a bool" " in parameter file [{}].{}".format( val, section, option)) setattr(obj, attrib_name, b) else: try: val = parser.getfloat(section, attrib_name) except: try: val = parser.getboolean(section, attrib_name) except: pass setattr(obj, attrib_name, val) qutip-4.4.1/qutip/control/optimconfig.py000066400000000000000000000130601352460343600204020ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Configuration parameters for control pulse optimisation """ import numpy as np # QuTiP logging import qutip.logging_utils logger = qutip.logging_utils.get_logger() import qutip.control.io as qtrlio class OptimConfig(object): """ Configuration parameters for control pulse optimisation Attributes ---------- log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN dyn_type : string Dynamics type, i.e. the type of matrix used to describe the dynamics. Options are UNIT, GEN_MAT, SYMPL (see Dynamics classes for details) prop_type : string Propagator type i.e. the method used to calculate the propagtors and propagtor gradient for each timeslot options are DEF, APPROX, DIAG, FRECHET, AUG_MAT DEF will use the default for the specific dyn_type (see PropagatorComputer classes for details) fid_type : string Fidelity error (and fidelity error gradient) computation method Options are DEF, UNIT, TRACEDIFF, TD_APPROX DEF will use the default for the specific dyn_type (See FidelityComputer classes for details) """ def __init__(self): self.reset() def reset(self): self.log_level = logger.getEffectiveLevel() self.alg = 'GRAPE' # Alts: 'CRAB' # *** AJGP 2015-04-21: This has been replaced optim_method #self.optim_alg = 'LBFGSB' self.optim_method = 'DEF' self.dyn_type = 'DEF' self.fid_type = 'DEF' # *** AJGP 2015-04-21: phase_option has been moved to the FidComputer #self.phase_option = 'PSU' # *** AJGP 2015-04-21: amp_update_mode has been replaced by tslot_type #self.amp_update_mode = 'ALL' # Alts: 'DYNAMIC' self.fid_type = 'DEF' self.tslot_type = 'DEF' self.init_pulse_type = 'DEF' ###################### # Note the following parameteres are for constrained optimisation # methods e.g. L-BFGS-B # *** AJGP 2015-04-21: # These have been moved to the OptimizerLBFGSB class # self.amp_lbound = -np.Inf # self.amp_ubound = np.Inf # self.max_metric_corr = 10 # These moved to termination conditions # self.accuracy_factor = 1e7 # *** # #################### @property def log_level(self): return logger.level @log_level.setter def log_level(self, lvl): """ Set the log_level attribute and set the level of the logger that is call logger.setLevel(lvl) """ logger.setLevel(lvl) def check_create_output_dir(self, output_dir, desc='output'): """ Checks if the given directory exists, if not it is created Returns ------- dir_ok : boolean True if directory exists (previously or created) False if failed to create the directory output_dir : string Path to the directory, which may be been made absolute msg : string Error msg if directory creation failed """ return qtrlio.create_dir(output_dir, desc=desc) # create global instance optimconfig = OptimConfig() qutip-4.4.1/qutip/control/optimizer.py000066400000000000000000001415421352460343600201150ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Classes here are expected to implement a run_optimization function that will use some method for optimising the control pulse, as defined by the control amplitudes. The system that the pulse acts upon are defined by the Dynamics object that must be passed in the instantiation. The methods are typically N dimensional function optimisers that find the minima of a fidelity error function. Note the number of variables for the fidelity function is the number of control timeslots, i.e. n_ctrls x Ntimeslots The methods will call functions on the Dynamics.fid_computer object, one or many times per interation, to get the fidelity error and gradient wrt to the amplitudes. The optimisation will stop when one of the termination conditions are met, for example: the fidelity aim has be reached, a local minima has been found, the maximum time allowed has been exceeded These function optimisation methods are so far from SciPy.optimize The two methods implemented are: BFGS - Broyden–Fletcher–Goldfarb–Shanno algorithm This a quasi second order Newton method. It uses successive calls to the gradient function to make an estimation of the curvature (Hessian) and hence direct its search for the function minima The SciPy implementation is pure Python and hance is execution speed is not high use subclass: OptimizerBFGS L-BFGS-B - Bounded, limited memory BFGS This a version of the BFGS method where the Hessian approximation is only based on a set of the most recent gradient calls. It generally performs better where the are a large number of variables The SciPy implementation of L-BFGS-B is wrapper around a well established and actively maintained implementation in Fortran Its is therefore very fast. # See SciPy documentation for credit and details on the # scipy.optimize.fmin_l_bfgs_b function use subclass: OptimizerLBFGSB The baseclass Optimizer implements the function wrappers to the fidelity error, gradient, and iteration callback functions. These are called from the within the SciPy optimisation functions. The subclasses implement the algorithm specific pulse optimisation function. """ import os import numpy as np import timeit import scipy.optimize as spopt import copy import collections # QuTiP from qutip import Qobj import qutip.logging_utils as logging logger = logging.get_logger() # QuTiP control modules import qutip.control.optimresult as optimresult import qutip.control.termcond as termcond import qutip.control.errors as errors import qutip.control.dynamics as dynamics import qutip.control.pulsegen as pulsegen import qutip.control.dump as qtrldump def _is_string(var): try: if isinstance(var, basestring): return True except NameError: try: if isinstance(var, str): return True except: return False except: return False return False class Optimizer(object): """ Base class for all control pulse optimisers. This class should not be instantiated, use its subclasses This class implements the fidelity, gradient and interation callback functions. All subclass objects must be initialised with a OptimConfig instance - various configuration options Dynamics instance - describes the dynamics of the (quantum) system to be control optimised Attributes ---------- log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN params: Dictionary The key value pairs are the attribute name and value Note: attributes are created if they do not exist already, and are overwritten if they do. alg : string Algorithm to use in pulse optimisation. Options are: 'GRAPE' (default) - GRadient Ascent Pulse Engineering 'CRAB' - Chopped RAndom Basis alg_params : Dictionary options that are specific to the pulse optim algorithm that is GRAPE or CRAB disp_conv_msg : bool Set true to display a convergence message (for scipy.optimize.minimize methods anyway) optim_method : string a scipy.optimize.minimize method that will be used to optimise the pulse for minimum fidelity error method_params : Dictionary Options for the optim_method. Note that where there is an equivalent attribute of this instance or the termination_conditions (for example maxiter) it will override an value in these options approx_grad : bool If set True then the method will approximate the gradient itself (if it has requirement and facility for this) This will mean that the fid_err_grad_wrapper will not get called Note it should be left False when using the Dynamics to calculate approximate gradients Note it is set True automatically when the alg is CRAB amp_lbound : float or list of floats lower boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control amp_ubound : float or list of floats upper boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control bounds : List of floats Bounds for the parameters. If not set before the run_optimization call then the list is built automatically based on the amp_lbound and amp_ubound attributes. Setting this attribute directly allows specific bounds to be set for individual parameters. Note: Only some methods use bounds dynamics : Dynamics (subclass instance) describes the dynamics of the (quantum) system to be control optimised (see Dynamics classes for details) config : OptimConfig instance various configuration options (see OptimConfig for details) termination_conditions : TerminationCondition instance attributes determine when the optimisation will end pulse_generator : PulseGen (subclass instance) (can be) used to create initial pulses not used by the class, but set by pulseoptim.create_pulse_optimizer stats : Stats attributes of which give performance stats for the optimisation set to None to reduce overhead of calculating stats. Note it is (usually) shared with the Dynamics instance dump : :class:`dump.OptimDump` Container for data dumped during the optimisation. Can be set by specifying the dumping level or set directly. Note this is mainly intended for user and a development debugging but could be used for status information during a long optimisation. dumping : string level of data dumping: NONE, SUMMARY, FULL or CUSTOM See property docstring for details dump_to_file : bool If set True then data will be dumped to file during the optimisation dumping will be set to SUMMARY during init_optim if dump_to_file is True and dumping not set. Default is False dump_dir : string Basically a link to dump.dump_dir. Exists so that it can be set through optim_params. If dump is None then will return None or will set dumping to SUMMARY when setting a path iter_summary : :class:`OptimIterSummary` Summary of the most recent iteration. Note this is only set if dummping is on """ def __init__(self, config, dyn, params=None): self.dynamics = dyn self.config = config self.params = params self.reset() dyn.parent = self def reset(self): self.log_level = self.config.log_level self.id_text = 'OPTIM' self.termination_conditions = None self.pulse_generator = None self.disp_conv_msg = False self.iteration_steps = None self.record_iteration_steps=False self.alg = 'GRAPE' self.alg_params = None self.method = 'l_bfgs_b' self.method_params = None self.method_options = None self.approx_grad = False self.amp_lbound = None self.amp_ubound = None self.bounds = None self.num_iter = 0 self.num_fid_func_calls = 0 self.num_grad_func_calls = 0 self.stats = None self.wall_time_optim_start = 0.0 self.dump_to_file = False self.dump = None self.iter_summary = None # AJGP 2015-04-21: # These (copying from config) are here for backward compatibility if hasattr(self.config, 'amp_lbound'): if self.config.amp_lbound: self.amp_lbound = self.config.amp_lbound if hasattr(self.config, 'amp_ubound'): if self.config.amp_ubound: self.amp_ubound = self.config.amp_ubound self.apply_params() @property def log_level(self): return logger.level @log_level.setter def log_level(self, lvl): """ Set the log_level attribute and set the level of the logger that is call logger.setLevel(lvl) """ logger.setLevel(lvl) def apply_params(self, params=None): """ Set object attributes based on the dictionary (if any) passed in the instantiation, or passed as a parameter This is called during the instantiation automatically. The key value pairs are the attribute name and value Note: attributes are created if they do not exist already, and are overwritten if they do. """ if not params: params = self.params if isinstance(params, dict): self.params = params for key in params: setattr(self, key, params[key]) @property def dumping(self): """ The level of data dumping that will occur during the optimisation - NONE : No processing data dumped (Default) - SUMMARY : A summary at each iteration will be recorded - FULL : All logs will be generated and dumped - CUSTOM : Some customised level of dumping When first set to CUSTOM this is equivalent to SUMMARY. It is then up to the user to specify which logs are dumped """ if self.dump is None: lvl = 'NONE' else: lvl = self.dump.level return lvl @dumping.setter def dumping(self, value): if value is None: self.dump = None else: if not _is_string(value): raise TypeError("Value must be string value") lvl = value.upper() if lvl == 'NONE': self.dump = None else: if not isinstance(self.dump, qtrldump.OptimDump): self.dump = qtrldump.OptimDump(self, level=lvl) else: self.dump.level = lvl @property def dump_dir(self): if self.dump: return self.dump.dump_dir else: return None @dump_dir.setter def dump_dir(self, value): if not self.dump: self.dumping = 'SUMMARY' self.dump.dump_dir = value def _create_result(self): """ create the result object and set the initial_amps attribute as the current amplitudes """ result = optimresult.OptimResult() result.initial_fid_err = self.dynamics.fid_computer.get_fid_err() result.initial_amps = self.dynamics.ctrl_amps.copy() result.evo_full_initial = self.dynamics.full_evo.copy() result.time = self.dynamics.time.copy() result.optimizer = self return result def init_optim(self, term_conds): """ Check optimiser attribute status and passed parameters before running the optimisation. This is called by run_optimization, but could called independently to check the configuration. """ if term_conds is not None: self.termination_conditions = term_conds term_conds = self.termination_conditions if not isinstance(term_conds, termcond.TerminationConditions): raise errors.UsageError("No termination conditions for the " "optimisation function") if not isinstance(self.dynamics, dynamics.Dynamics): raise errors.UsageError("No dynamics object attribute set") self.dynamics.check_ctrls_initialized() self.apply_method_params() if term_conds.fid_err_targ is None and term_conds.fid_goal is None: raise errors.UsageError("Either the goal or the fidelity " "error tolerance must be set") if term_conds.fid_err_targ is None: term_conds.fid_err_targ = np.abs(1 - term_conds.fid_goal) if term_conds.fid_goal is None: term_conds.fid_goal = 1 - term_conds.fid_err_targ if self.alg == 'CRAB': self.approx_grad = True if self.stats is not None: self.stats.clear() if self.dump_to_file: if self.dump is None: self.dumping = 'SUMMARY' self.dump.write_to_file = True self.dump.create_dump_dir() logger.info("Optimiser dump will be written to:\n{}".format( self.dump.dump_dir)) if self.dump: self.iter_summary = OptimIterSummary() else: self.iter_summary = None self.num_iter = 0 self.num_fid_func_calls = 0 self.num_grad_func_calls = 0 self.iteration_steps = None def _build_method_options(self): """ Creates the method_options dictionary for the scipy.optimize.minimize function based on the attributes of this object and the termination_conditions It assumes that apply_method_params has already been run and hence the method_options attribute may already contain items. These values will NOT be overridden """ tc = self.termination_conditions if self.method_options is None: self.method_options = {} mo = self.method_options if 'max_metric_corr' in mo and not 'maxcor' in mo: mo['maxcor'] = mo['max_metric_corr'] elif hasattr(self, 'max_metric_corr') and not 'maxcor' in mo: mo['maxcor'] = self.max_metric_corr if 'accuracy_factor' in mo and not 'ftol' in mo: mo['ftol'] = mo['accuracy_factor'] elif hasattr(tc, 'accuracy_factor') and not 'ftol' in mo: mo['ftol'] = tc.accuracy_factor if tc.max_iterations > 0 and not 'maxiter' in mo: mo['maxiter'] = tc.max_iterations if tc.max_fid_func_calls > 0 and not 'maxfev' in mo: mo['maxfev'] = tc.max_fid_func_calls if tc.min_gradient_norm > 0 and not 'gtol' in mo: mo['gtol'] = tc.min_gradient_norm if not 'disp' in mo: mo['disp'] = self.disp_conv_msg return mo def apply_method_params(self, params=None): """ Loops through all the method_params (either passed here or the method_params attribute) If the name matches an attribute of this object or the termination conditions object, then the value of this attribute is set. Otherwise it is assumed to a method_option for the scipy.optimize.minimize function """ if not params: params = self.method_params if isinstance(params, dict): self.method_params = params unused_params = {} for key in params: val = params[key] if hasattr(self, key): setattr(self, key, val) if hasattr(self.termination_conditions, key): setattr(self.termination_conditions, key, val) else: unused_params[key] = val if len(unused_params) > 0: if not isinstance(self.method_options, dict): self.method_options = unused_params else: self.method_options.update(unused_params) def _build_bounds_list(self): cfg = self.config dyn = self.dynamics n_ctrls = dyn.num_ctrls self.bounds = [] for t in range(dyn.num_tslots): for c in range(n_ctrls): if isinstance(self.amp_lbound, list): lb = self.amp_lbound[c] else: lb = self.amp_lbound if isinstance(self.amp_ubound, list): ub = self.amp_ubound[c] else: ub = self.amp_ubound if not lb is None and np.isinf(lb): lb = None if not ub is None and np.isinf(ub): ub = None self.bounds.append((lb, ub)) def run_optimization(self, term_conds=None): """ This default function optimisation method is a wrapper to the scipy.optimize.minimize function. It will attempt to minimise the fidelity error with respect to some parameters, which are determined by _get_optim_var_vals (see below) The optimisation end when one of the passed termination conditions has been met, e.g. target achieved, wall time, or function call or iteration count exceeded. Note these conditions include gradient minimum met (local minima) for methods that use a gradient. The function minimisation method is taken from the optim_method attribute. Note that not all of these methods have been tested. Note that some of these use a gradient and some do not. See the scipy documentation for details. Options specific to the method can be passed setting the method_params attribute. If the parameter term_conds=None, then the termination_conditions attribute must already be set. It will be overwritten if the parameter is not None The result is returned in an OptimResult object, which includes the final fidelity, time evolution, reason for termination etc """ self.init_optim(term_conds) term_conds = self.termination_conditions dyn = self.dynamics cfg = self.config self.optim_var_vals = self._get_optim_var_vals() st_time = timeit.default_timer() self.wall_time_optimize_start = st_time if self.stats is not None: self.stats.wall_time_optim_start = st_time self.stats.wall_time_optim_end = 0.0 self.stats.num_iter = 0 if self.bounds is None: self._build_bounds_list() self._build_method_options() result = self._create_result() if self.approx_grad: jac=None else: jac=self.fid_err_grad_wrapper if self.log_level <= logging.INFO: msg = ("Optimising pulse(s) using {} with " "minimise '{}' method").format(self.alg, self.method) if self.approx_grad: msg += " (approx grad)" logger.info(msg) try: opt_res = spopt.minimize( self.fid_err_func_wrapper, self.optim_var_vals, method=self.method, jac=jac, bounds=self.bounds, options=self.method_options, callback=self.iter_step_callback_func) amps = self._get_ctrl_amps(opt_res.x) dyn.update_ctrl_amps(amps) result.termination_reason = opt_res.message # Note the iterations are counted in this object as well # so there are compared here for interest sake only if self.num_iter != opt_res.nit: logger.info("The number of iterations counted {} " " does not match the number reported {} " "by {}".format(self.num_iter, opt_res.nit, self.method)) result.num_iter = opt_res.nit except errors.OptimizationTerminate as except_term: self._interpret_term_exception(except_term, result) end_time = timeit.default_timer() self._add_common_result_attribs(result, st_time, end_time) return result def _get_optim_var_vals(self): """ Generate the 1d array that holds the current variable values of the function to be optimised By default (as used in GRAPE) these are the control amplitudes in each timeslot """ return self.dynamics.ctrl_amps.reshape([-1]) def _get_ctrl_amps(self, optim_var_vals): """ Get the control amplitudes from the current variable values of the function to be optimised. that is the 1d array that is passed from the optimisation method Note for GRAPE these are the function optimiser parameters (and this is the default) Returns ------- float array[dynamics.num_tslots, dynamics.num_ctrls] """ amps = optim_var_vals.reshape(self.dynamics.ctrl_amps.shape) return amps def fid_err_func_wrapper(self, *args): """ Get the fidelity error achieved using the ctrl amplitudes passed in as the first argument. This is called by generic optimisation algorithm as the func to the minimised. The argument is the current variable values, i.e. control amplitudes, passed as a flat array. Hence these are reshaped as [nTimeslots, n_ctrls] and then used to update the stored ctrl values (if they have changed) The error is checked against the target, and the optimisation is terminated if the target has been achieved. """ self.num_fid_func_calls += 1 # *** update stats *** if self.stats is not None: self.stats.num_fidelity_func_calls = self.num_fid_func_calls if self.log_level <= logging.DEBUG: logger.debug("fidelity error call {}".format( self.stats.num_fidelity_func_calls)) amps = self._get_ctrl_amps(args[0].copy()) self.dynamics.update_ctrl_amps(amps) tc = self.termination_conditions err = self.dynamics.fid_computer.get_fid_err() if self.iter_summary: self.iter_summary.fid_func_call_num = self.num_fid_func_calls self.iter_summary.fid_err = err if self.dump and self.dump.dump_fid_err: self.dump.update_fid_err_log(err) if err <= tc.fid_err_targ: raise errors.GoalAchievedTerminate(err) if self.num_fid_func_calls > tc.max_fid_func_calls: raise errors.MaxFidFuncCallTerminate() return err def fid_err_grad_wrapper(self, *args): """ Get the gradient of the fidelity error with respect to all of the variables, i.e. the ctrl amplidutes in each timeslot This is called by generic optimisation algorithm as the gradients of func to the minimised wrt the variables. The argument is the current variable values, i.e. control amplitudes, passed as a flat array. Hence these are reshaped as [nTimeslots, n_ctrls] and then used to update the stored ctrl values (if they have changed) Although the optimisation algorithms have a check within them for function convergence, i.e. local minima, the sum of the squares of the normalised gradient is checked explicitly, and the optimisation is terminated if this is below the min_gradient_norm condition """ # *** update stats *** self.num_grad_func_calls += 1 if self.stats is not None: self.stats.num_grad_func_calls = self.num_grad_func_calls if self.log_level <= logging.DEBUG: logger.debug("gradient call {}".format( self.stats.num_grad_func_calls)) amps = self._get_ctrl_amps(args[0].copy()) self.dynamics.update_ctrl_amps(amps) fid_comp = self.dynamics.fid_computer # gradient_norm_func is a pointer to the function set in the config # that returns the normalised gradients grad = fid_comp.get_fid_err_gradient() if self.iter_summary: self.iter_summary.grad_func_call_num = self.num_grad_func_calls self.iter_summary.grad_norm = fid_comp.grad_norm if self.dump: if self.dump.dump_grad_norm: self.dump.update_grad_norm_log(fid_comp.grad_norm) if self.dump.dump_grad: self.dump.update_grad_log(grad) tc = self.termination_conditions if fid_comp.grad_norm < tc.min_gradient_norm: raise errors.GradMinReachedTerminate(fid_comp.grad_norm) return grad.flatten() def iter_step_callback_func(self, *args): """ Check the elapsed wall time for the optimisation run so far. Terminate if this has exceeded the maximum allowed time """ self.num_iter += 1 if self.log_level <= logging.DEBUG: logger.debug("Iteration callback {}".format(self.num_iter)) wall_time = timeit.default_timer() - self.wall_time_optimize_start if self.iter_summary: self.iter_summary.iter_num = self.num_iter self.iter_summary.wall_time = wall_time if self.dump and self.dump.dump_summary: self.dump.add_iter_summary() tc = self.termination_conditions if wall_time > tc.max_wall_time: raise errors.MaxWallTimeTerminate() # *** update stats *** if self.stats is not None: self.stats.num_iter = self.num_iter def _interpret_term_exception(self, except_term, result): """ Update the result object based on the exception that occurred during the optimisation """ result.termination_reason = except_term.reason if isinstance(except_term, errors.GoalAchievedTerminate): result.goal_achieved = True elif isinstance(except_term, errors.MaxWallTimeTerminate): result.wall_time_limit_exceeded = True elif isinstance(except_term, errors.GradMinReachedTerminate): result.grad_norm_min_reached = True elif isinstance(except_term, errors.MaxFidFuncCallTerminate): result.max_fid_func_exceeded = True def _add_common_result_attribs(self, result, st_time, end_time): """ Update the result object attributes which are common to all optimisers and outcomes """ dyn = self.dynamics result.num_iter = self.num_iter result.num_fid_func_calls = self.num_fid_func_calls result.wall_time = end_time - st_time result.fid_err = dyn.fid_computer.get_fid_err() result.grad_norm_final = dyn.fid_computer.grad_norm result.final_amps = dyn.ctrl_amps final_evo = dyn.full_evo if isinstance(final_evo, Qobj): result.evo_full_final = final_evo else: result.evo_full_final = Qobj(final_evo, dims=dyn.sys_dims) # *** update stats *** if self.stats is not None: self.stats.wall_time_optim_end = end_time self.stats.calculate() result.stats = copy.copy(self.stats) class OptimizerBFGS(Optimizer): """ Implements the run_optimization method using the BFGS algorithm """ def reset(self): Optimizer.reset(self) self.id_text = 'BFGS' def run_optimization(self, term_conds=None): """ Optimise the control pulse amplitudes to minimise the fidelity error using the BFGS (Broyden–Fletcher–Goldfarb–Shanno) algorithm The optimisation end when one of the passed termination conditions has been met, e.g. target achieved, gradient minimum met (local minima), wall time / iteration count exceeded. Essentially this is wrapper to the: scipy.optimize.fmin_bfgs function If the parameter term_conds=None, then the termination_conditions attribute must already be set. It will be overwritten if the parameter is not None The result is returned in an OptimResult object, which includes the final fidelity, time evolution, reason for termination etc """ self.init_optim(term_conds) term_conds = self.termination_conditions dyn = self.dynamics self.optim_var_vals = self._get_optim_var_vals() self._build_method_options() st_time = timeit.default_timer() self.wall_time_optimize_start = st_time if self.stats is not None: self.stats.wall_time_optim_start = st_time self.stats.wall_time_optim_end = 0.0 self.stats.num_iter = 1 if self.approx_grad: fprime = None else: fprime = self.fid_err_grad_wrapper if self.log_level <= logging.INFO: msg = ("Optimising pulse(s) using {} with " "'fmin_bfgs' method").format(self.alg) if self.approx_grad: msg += " (approx grad)" logger.info(msg) result = self._create_result() try: optim_var_vals, cost, grad, invHess, nFCalls, nGCalls, warn = \ spopt.fmin_bfgs(self.fid_err_func_wrapper, self.optim_var_vals, fprime=fprime, # approx_grad=self.approx_grad, callback=self.iter_step_callback_func, gtol=term_conds.min_gradient_norm, maxiter=term_conds.max_iterations, full_output=True, disp=True) amps = self._get_ctrl_amps(optim_var_vals) dyn.update_ctrl_amps(amps) if warn == 1: result.max_iter_exceeded = True result.termination_reason = "Iteration count limit reached" elif warn == 2: result.grad_norm_min_reached = True result.termination_reason = "Gradient normal minimum reached" except errors.OptimizationTerminate as except_term: self._interpret_term_exception(except_term, result) end_time = timeit.default_timer() self._add_common_result_attribs(result, st_time, end_time) return result class OptimizerLBFGSB(Optimizer): """ Implements the run_optimization method using the L-BFGS-B algorithm Attributes ---------- max_metric_corr : integer The maximum number of variable metric corrections used to define the limited memory matrix. That is the number of previous gradient values that are used to approximate the Hessian see the scipy.optimize.fmin_l_bfgs_b documentation for description of m argument """ def reset(self): Optimizer.reset(self) self.id_text = 'LBFGSB' self.max_metric_corr = 10 self.msg_level = None def init_optim(self, term_conds): """ Check optimiser attribute status and passed parameters before running the optimisation. This is called by run_optimization, but could called independently to check the configuration. """ if term_conds is None: term_conds = self.termination_conditions # AJGP 2015-04-21: # These (copying from config) are here for backward compatibility if hasattr(self.config, 'max_metric_corr'): if self.config.max_metric_corr: self.max_metric_corr = self.config.max_metric_corr if hasattr(self.config, 'accuracy_factor'): if self.config.accuracy_factor: term_conds.accuracy_factor = \ self.config.accuracy_factor Optimizer.init_optim(self, term_conds) if not isinstance(self.msg_level, int): if self.log_level < logging.DEBUG: self.msg_level = 2 elif self.log_level <= logging.DEBUG: self.msg_level = 1 else: self.msg_level = 0 def run_optimization(self, term_conds=None): """ Optimise the control pulse amplitudes to minimise the fidelity error using the L-BFGS-B algorithm, which is the constrained (bounded amplitude values), limited memory, version of the Broyden–Fletcher–Goldfarb–Shanno algorithm. The optimisation end when one of the passed termination conditions has been met, e.g. target achieved, gradient minimum met (local minima), wall time / iteration count exceeded. Essentially this is wrapper to the: scipy.optimize.fmin_l_bfgs_b function This in turn is a warpper for well established implementation of the L-BFGS-B algorithm written in Fortran, which is therefore very fast. See SciPy documentation for credit and details on this function. If the parameter term_conds=None, then the termination_conditions attribute must already be set. It will be overwritten if the parameter is not None The result is returned in an OptimResult object, which includes the final fidelity, time evolution, reason for termination etc """ self.init_optim(term_conds) term_conds = self.termination_conditions dyn = self.dynamics cfg = self.config self.optim_var_vals = self._get_optim_var_vals() self._build_method_options() st_time = timeit.default_timer() self.wall_time_optimize_start = st_time if self.stats is not None: self.stats.wall_time_optim_start = st_time self.stats.wall_time_optim_end = 0.0 self.stats.num_iter = 1 bounds = self._build_bounds_list() result = self._create_result() if self.approx_grad: fprime = None else: fprime = self.fid_err_grad_wrapper if 'accuracy_factor' in self.method_options: factr = self.method_options['accuracy_factor'] elif 'ftol' in self.method_options: factr = self.method_options['ftol'] elif hasattr(term_conds, 'accuracy_factor'): factr = term_conds.accuracy_factor else: factr = 1e7 if 'max_metric_corr' in self.method_options: m = self.method_options['max_metric_corr'] elif 'maxcor' in self.method_options: m = self.method_options['maxcor'] elif hasattr(self, 'max_metric_corr'): m = self.max_metric_corr else: m = 10 if self.log_level <= logging.INFO: msg = ("Optimising pulse(s) using {} with " "'fmin_l_bfgs_b' method").format(self.alg) if self.approx_grad: msg += " (approx grad)" logger.info(msg) try: optim_var_vals, fid, res_dict = spopt.fmin_l_bfgs_b( self.fid_err_func_wrapper, self.optim_var_vals, fprime=fprime, approx_grad=self.approx_grad, callback=self.iter_step_callback_func, bounds=self.bounds, m=m, factr=factr, pgtol=term_conds.min_gradient_norm, disp=self.msg_level, maxfun=term_conds.max_fid_func_calls, maxiter=term_conds.max_iterations) amps = self._get_ctrl_amps(optim_var_vals) dyn.update_ctrl_amps(amps) warn = res_dict['warnflag'] if warn == 0: result.grad_norm_min_reached = True result.termination_reason = "function converged" elif warn == 1: result.max_iter_exceeded = True result.termination_reason = ("Iteration or fidelity " "function call limit reached") elif warn == 2: result.termination_reason = res_dict['task'] result.num_iter = res_dict['nit'] except errors.OptimizationTerminate as except_term: self._interpret_term_exception(except_term, result) end_time = timeit.default_timer() self._add_common_result_attribs(result, st_time, end_time) return result class OptimizerCrab(Optimizer): """ Optimises the pulse using the CRAB algorithm [1]. It uses the scipy.optimize.minimize function with the method specified by the optim_method attribute. See Optimizer.run_optimization for details It minimises the fidelity error function with respect to the CRAB basis function coefficients. AJGP ToDo: Add citation here """ def reset(self): Optimizer.reset(self) self.id_text = 'CRAB' self.num_optim_vars = 0 def init_optim(self, term_conds): """ Check optimiser attribute status and passed parameters before running the optimisation. This is called by run_optimization, but could called independently to check the configuration. """ Optimizer.init_optim(self, term_conds) dyn = self.dynamics self.num_optim_vars = 0 pulse_gen_valid = True # check the pulse generators match the ctrls # (in terms of number) # and count the number of parameters if self.pulse_generator is None: pulse_gen_valid = False err_msg = "pulse_generator attribute is None" elif not isinstance(self.pulse_generator, collections.abc.Iterable): pulse_gen_valid = False err_msg = "pulse_generator is not iterable" elif len(self.pulse_generator) != dyn.num_ctrls: pulse_gen_valid = False err_msg = ("the number of pulse generators {} does not equal " "the number of controls {}".format( len(self.pulse_generator), dyn.num_ctrls)) if pulse_gen_valid: for p_gen in self.pulse_generator: if not isinstance(p_gen, pulsegen.PulseGenCrab): pulse_gen_valid = False err_msg = ( "pulse_generator contained object of type '{}'".format( p_gen.__class__.__name__)) break self.num_optim_vars += p_gen.num_optim_vars if not pulse_gen_valid: raise errors.UsageError( "The pulse_generator attribute must be set to a list of " "PulseGenCrab - one for each control. Here " + err_msg) def _build_bounds_list(self): """ No bounds necessary here, as the bounds for the CRAB parameters do not have much physical meaning. This needs to override the default method, otherwise the shape will be wrong """ return None def _get_optim_var_vals(self): """ Generate the 1d array that holds the current variable values of the function to be optimised For CRAB these are the basis coefficients Returns ------- ndarray (1d) of float """ pvals = [] for pgen in self.pulse_generator: pvals.extend(pgen.get_optim_var_vals()) return np.array(pvals) def _get_ctrl_amps(self, optim_var_vals): """ Get the control amplitudes from the current variable values of the function to be optimised. that is the 1d array that is passed from the optimisation method For CRAB the amplitudes will need to calculated by expanding the series Returns ------- float array[dynamics.num_tslots, dynamics.num_ctrls] """ dyn = self.dynamics if self.log_level <= logging.DEBUG: changed_params = self.optim_var_vals != optim_var_vals logger.debug( "{} out of {} optimisation parameters changed".format( changed_params.sum(), len(optim_var_vals))) amps = np.empty([dyn.num_tslots, dyn.num_ctrls]) j = 0 param_idx_st = 0 for p_gen in self.pulse_generator: param_idx_end = param_idx_st + p_gen.num_optim_vars pg_pvals = optim_var_vals[param_idx_st:param_idx_end] p_gen.set_optim_var_vals(pg_pvals) amps[:, j] = p_gen.gen_pulse() param_idx_st = param_idx_end j += 1 #print("param_idx_end={}".format(param_idx_end)) self.optim_var_vals = optim_var_vals return amps class OptimizerCrabFmin(OptimizerCrab): """ Optimises the pulse using the CRAB algorithm [1, 2]. It uses the scipy.optimize.fmin function which is effectively a wrapper for the Nelder-mead method. It minimises the fidelity error function with respect to the CRAB basis function coefficients. This is the default Optimizer for CRAB. Notes ----- [1] P. Doria, T. Calarco & S. Montangero. Phys. Rev. Lett. 106, 190501 (2011). [2] T. Caneva, T. Calarco, & S. Montangero. Phys. Rev. A 84, 022326 (2011). """ def reset(self): OptimizerCrab.reset(self) self.id_text = 'CRAB_FMIN' self.xtol = 1e-4 self.ftol = 1e-4 def run_optimization(self, term_conds=None): """ This function optimisation method is a wrapper to the scipy.optimize.fmin function. It will attempt to minimise the fidelity error with respect to some parameters, which are determined by _get_optim_var_vals which in the case of CRAB are the basis function coefficients The optimisation end when one of the passed termination conditions has been met, e.g. target achieved, wall time, or function call or iteration count exceeded. Specifically to the fmin method, the optimisation will stop when change parameter values is less than xtol or the change in function value is below ftol. If the parameter term_conds=None, then the termination_conditions attribute must already be set. It will be overwritten if the parameter is not None The result is returned in an OptimResult object, which includes the final fidelity, time evolution, reason for termination etc """ self.init_optim(term_conds) term_conds = self.termination_conditions dyn = self.dynamics cfg = self.config self.optim_var_vals = self._get_optim_var_vals() self._build_method_options() #print("Initial values:\n{}".format(self.optim_var_vals)) st_time = timeit.default_timer() self.wall_time_optimize_start = st_time if self.stats is not None: self.stats.wall_time_optim_start = st_time self.stats.wall_time_optim_end = 0.0 self.stats.num_iter = 1 result = self._create_result() if self.log_level <= logging.INFO: logger.info("Optimising pulse(s) using {} with " "'fmin' (Nelder-Mead) method".format(self.alg)) try: ret = spopt.fmin( self.fid_err_func_wrapper, self.optim_var_vals, xtol=self.xtol, ftol=self.ftol, maxiter=term_conds.max_iterations, maxfun=term_conds.max_fid_func_calls, full_output=True, disp=self.disp_conv_msg, retall=self.record_iteration_steps, callback=self.iter_step_callback_func) final_param_vals = ret[0] num_iter = ret[2] warn_flag = ret[4] if self.record_iteration_steps: self.iteration_steps = ret[5] amps = self._get_ctrl_amps(final_param_vals) dyn.update_ctrl_amps(amps) # Note the iterations are counted in this object as well # so there are compared here for interest sake only if self.num_iter != num_iter: logger.info("The number of iterations counted {} " " does not match the number reported {} " "by {}".format(self.num_iter, num_iter, self.method)) result.num_iter = num_iter if warn_flag == 0: result.termination_reason = \ "Function converged (within tolerance)" elif warn_flag == 1: result.termination_reason = \ "Maximum number of function evaluations reached" result.max_fid_func_exceeded = True elif warn_flag == 2: result.termination_reason = \ "Maximum number of iterations reached" result.max_iter_exceeded = True else: result.termination_reason = \ "Unknown (warn_flag={})".format(warn_flag) except errors.OptimizationTerminate as except_term: self._interpret_term_exception(except_term, result) end_time = timeit.default_timer() self._add_common_result_attribs(result, st_time, end_time) return result class OptimIterSummary(qtrldump.DumpSummaryItem): """A summary of the most recent iteration of the pulse optimisation Attributes ---------- iter_num : int Iteration number of the pulse optimisation fid_func_call_num : int Fidelity function call number of the pulse optimisation grad_func_call_num : int Gradient function call number of the pulse optimisation fid_err : float Fidelity error grad_norm : float fidelity gradient (wrt the control parameters) vector norm that is the magnitude of the gradient wall_time : float Time spent computing the pulse optimisation so far (in seconds of elapsed time) """ # Note there is some duplication here with Optimizer attributes # this exists solely to be copied into the summary dump min_col_width = 11 summary_property_names = ( "idx", "iter_num", "fid_func_call_num", "grad_func_call_num", "fid_err", "grad_norm", "wall_time" ) summary_property_fmt_type = ( 'd', 'd', 'd', 'd', 'g', 'g', 'g' ) summary_property_fmt_prec = ( 0, 0, 0, 0, 4, 4, 2 ) def __init__(self): self.reset() def reset(self): qtrldump.DumpSummaryItem.reset(self) self.iter_num = None self.fid_func_call_num = None self.grad_func_call_num = None self.fid_err = None self.grad_norm = None self.wall_time = 0.0 qutip-4.4.1/qutip/control/optimresult.py000066400000000000000000000113561352460343600204610ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Class containing the results of the pulse optimisation """ import numpy as np class OptimResult(object): """ Attributes give the result of the pulse optimisation attempt Attributes ---------- termination_reason : string Description of the reason for terminating the optimisation fidelity : float final (normalised) fidelity that was achieved initial_fid_err : float fidelity error before optimisation starting fid_err : float final fidelity error that was achieved goal_achieved : boolean True is the fidely error achieved was below the target grad_norm_final : float Final value of the sum of the squares of the (normalised) fidelity error gradients grad_norm_min_reached : float True if the optimisation terminated due to the minimum value of the gradient being reached num_iter : integer Number of iterations of the optimisation algorithm completed max_iter_exceeded : boolean True if the iteration limit was reached max_fid_func_exceeded : boolean True if the fidelity function call limit was reached wall_time : float time elapsed during the optimisation wall_time_limit_exceeded : boolean True if the wall time limit was reached time : array[num_tslots+1] of float Time are the start of each timeslot with the final value being the total evolution time initial_amps : array[num_tslots, n_ctrls] The amplitudes at the start of the optimisation final_amps : array[num_tslots, n_ctrls] The amplitudes at the end of the optimisation evo_full_final : Qobj The evolution operator from t=0 to t=T based on the final amps evo_full_initial : Qobj The evolution operator from t=0 to t=T based on the initial amps stats : Stats Object contaning the stats for the run (if any collected) optimizer : Optimizer Instance of the Optimizer used to generate the result """ def __init__(self): self.reset() def reset(self): self.fidelity = 0.0 self.initial_fid_err = np.Inf self.fid_err = np.Inf self.goal_achieved = False self.grad_norm_final = 0.0 self.grad_norm_min_reached = False self.num_iter = 0 self.max_iter_exceeded = False self.num_fid_func_calls = 0 self.max_fid_func_exceeded = False self.wall_time = 0.0 self.wall_time_limit_exceeded = False self.termination_reason = "not started yet" self.time = None self.initial_amps = None self.final_amps = None self.evo_full_final = None self.evo_full_initial = None self.stats = None self.optimizer = None qutip-4.4.1/qutip/control/propcomp.py000066400000000000000000000403751352460343600177340ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Propagator Computer Classes used to calculate the propagators, and also the propagator gradient when exact gradient methods are used Note the methods in the _Diag class was inspired by: DYNAMO - Dynamic Framework for Quantum Optimal Control See Machnes et.al., arXiv.1011.4874 """ # import os import warnings import numpy as np import scipy.linalg as la import scipy.sparse as sp # QuTiP from qutip import Qobj # QuTiP logging import qutip.logging_utils as logging logger = logging.get_logger() # QuTiP control modules from qutip.control import errors def _func_deprecation(message, stacklevel=3): """ Issue deprecation warning Using stacklevel=3 will ensure message refers the function calling with the deprecated parameter, """ warnings.warn(message, DeprecationWarning, stacklevel=stacklevel) class PropagatorComputer(object): """ Base for all Propagator Computer classes that are used to calculate the propagators, and also the propagator gradient when exact gradient methods are used Note: they must be instantiated with a Dynamics object, that is the container for the data that the functions operate on This base class cannot be used directly. See subclass descriptions and choose the appropriate one for the application Attributes ---------- log_level : integer level of messaging output from the logger. Options are attributes of qutip_utils.logging, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN grad_exact : boolean indicates whether the computer class instance is capable of computing propagator gradients. It is used to determine whether to create the Dynamics prop_grad array """ def __init__(self, dynamics, params=None): self.parent = dynamics self.params = params self.reset() def reset(self): """ reset any configuration data """ self.id_text = 'PROP_COMP_BASE' self.log_level = self.parent.log_level self._grad_exact = False def apply_params(self, params=None): """ Set object attributes based on the dictionary (if any) passed in the instantiation, or passed as a parameter This is called during the instantiation automatically. The key value pairs are the attribute name and value Note: attributes are created if they do not exist already, and are overwritten if they do. """ if not params: params = self.params if isinstance(params, dict): self.params = params for key in params: setattr(self, key, params[key]) @property def log_level(self): return logger.level @log_level.setter def log_level(self, lvl): """ Set the log_level attribute and set the level of the logger that is call logger.setLevel(lvl) """ logger.setLevel(lvl) def grad_exact(self): return self._grad_exact def compute_propagator(self, k): _func_deprecation("'compute_propagator' has been replaced " "by '_compute_propagator'") return self._compute_propagator(k) def _compute_propagator(self, k): """ calculate the progator between X(k) and X(k+1) Uses matrix expm of the dyn_gen at that point (in time) Assumes that the dyn_gen have been been calculated, i.e. drift and ctrls combined Return the propagator """ dyn = self.parent dgt = dyn._get_phased_dyn_gen(k)*dyn.tau[k] if dyn.oper_dtype == Qobj: prop = dgt.expm() else: prop = la.expm(dgt) return prop def compute_diff_prop(self, k, j, epsilon): _func_deprecation("'compute_diff_prop' has been replaced " "by '_compute_diff_prop'") return self._compute_diff_prop( k, j, epsilon) def _compute_diff_prop(self, k, j, epsilon): """ Calculate the propagator from the current point to a trial point a distance 'epsilon' (change in amplitude) in the direction the given control j in timeslot k Returns the propagator """ raise errors.UsageError("Not implemented in the baseclass." " Choose a subclass") def compute_prop_grad(self, k, j, compute_prop=True): _func_deprecation("'compute_prop_grad' has been replaced " "by '_compute_prop_grad'") return self._compute_prop_grad(self, k, j, compute_prop=compute_prop) def _compute_prop_grad(self, k, j, compute_prop=True): """ Calculate the gradient of propagator wrt the control amplitude in the timeslot. """ raise errors.UsageError("Not implemented in the baseclass." " Choose a subclass") class PropCompApproxGrad(PropagatorComputer): """ This subclass can be used when the propagator is calculated simply by expm of the dynamics generator, i.e. when gradients will be calculated using approximate methods. """ def reset(self): """ reset any configuration data """ PropagatorComputer.reset(self) self.id_text = 'APPROX' self.grad_exact = False self.apply_params() def _compute_diff_prop(self, k, j, epsilon): """ Calculate the propagator from the current point to a trial point a distance 'epsilon' (change in amplitude) in the direction the given control j in timeslot k Returns the propagator """ dyn = self.parent dgt_eps = (dyn._get_phased_dyn_gen(k) + epsilon*dyn._get_phased_ctrl_dyn_gen(k, j))*dyn.tau[k] if dyn.oper_dtype == Qobj: prop_eps = dgt_eps.expm() else: prop_eps = la.expm(dgt_eps) return prop_eps class PropCompDiag(PropagatorComputer): """ Coumputes the propagator exponentiation using diagonalisation of of the dynamics generator """ def reset(self): """ reset any configuration data """ PropagatorComputer.reset(self) self.id_text = 'DIAG' self.grad_exact = True self.apply_params() def _compute_propagator(self, k): """ Calculates the exponentiation of the dynamics generator (H) As part of the calc the the eigen decomposition is required, which is reused in the propagator gradient calculation """ dyn = self.parent dyn._ensure_decomp_curr(k) if dyn.oper_dtype == Qobj: prop = (dyn._dyn_gen_eigenvectors[k]*dyn._prop_eigen[k]* dyn._get_dyn_gen_eigenvectors_adj(k)) else: prop = dyn._dyn_gen_eigenvectors[k].dot( dyn._prop_eigen[k]).dot( dyn._get_dyn_gen_eigenvectors_adj(k)) return prop def _compute_prop_grad(self, k, j, compute_prop=True): """ Calculate the gradient of propagator wrt the control amplitude in the timeslot. Returns: [prop], prop_grad """ dyn = self.parent dyn._ensure_decomp_curr(k) if compute_prop: prop = self._compute_propagator(k) if dyn.oper_dtype == Qobj: # put control dyn_gen in combined dg diagonal basis cdg = (dyn._get_dyn_gen_eigenvectors_adj(k)* dyn._get_phased_ctrl_dyn_gen(k, j)* dyn._dyn_gen_eigenvectors[k]) # multiply (elementwise) by timeslice and factor matrix cdg = Qobj(np.multiply(cdg.full()*dyn.tau[k], dyn._dyn_gen_factormatrix[k]), dims=dyn.dyn_dims) # Return to canonical basis prop_grad = (dyn._dyn_gen_eigenvectors[k]*cdg* dyn._get_dyn_gen_eigenvectors_adj(k)) else: # put control dyn_gen in combined dg diagonal basis cdg = dyn._get_dyn_gen_eigenvectors_adj(k).dot( dyn._get_phased_ctrl_dyn_gen(k, j)).dot( dyn._dyn_gen_eigenvectors[k]) # multiply (elementwise) by timeslice and factor matrix cdg = np.multiply(cdg*dyn.tau[k], dyn._dyn_gen_factormatrix[k]) # Return to canonical basis prop_grad = dyn._dyn_gen_eigenvectors[k].dot(cdg).dot( dyn._get_dyn_gen_eigenvectors_adj(k)) if compute_prop: return prop, prop_grad else: return prop_grad class PropCompAugMat(PropagatorComputer): """ Augmented Matrix (deprecated - see _Frechet) It should work for all systems, e.g. open, symplectic There will be other PropagatorComputer subclasses that are more efficient The _Frechet class should provide exactly the same functionality more efficiently. Note the propagator gradient calculation using the augmented matrix is taken from: 'Robust quantum gates for open systems via optimal control: Markovian versus non-Markovian dynamics' Frederik F Floether, Pierre de Fouquieres, and Sophie G Schirmer """ def reset(self): PropagatorComputer.reset(self) self.id_text = 'AUG_MAT' self.grad_exact = True self.apply_params() def _get_aug_mat(self, k, j): """ Generate the matrix [[A, E], [0, A]] where A is the overall dynamics generator E is the control dynamics generator for a given timeslot and control returns this augmented matrix """ dyn = self.parent dg = dyn._get_phased_dyn_gen(k) if dyn.oper_dtype == Qobj: A = dg.data*dyn.tau[k] E = dyn._get_phased_ctrl_dyn_gen(k, j).data*dyn.tau[k] Z = sp.csr_matrix(dg.data.shape) aug = Qobj(sp.vstack([sp.hstack([A, E]), sp.hstack([Z, A])])) elif dyn.oper_dtype == np.ndarray: A = dg*dyn.tau[k] E = dyn._get_phased_ctrl_dyn_gen(k, j)*dyn.tau[k] Z = np.zeros(dg.shape) aug = np.vstack([np.hstack([A, E]), np.hstack([Z, A])]) else: A = dg*dyn.tau[k] E = dyn._get_phased_ctrl_dyn_gen(k, j)*dyn.tau[k] Z = dg*0.0 aug = sp.vstack([sp.hstack([A, E]), sp.hstack([Z, A])]) return aug def _compute_prop_grad(self, k, j, compute_prop=True): """ Calculate the gradient of propagator wrt the control amplitude in the timeslot using the exponentiation of the the augmented matrix. The propagtor is calculated for 'free' in this method and hence it is returned if compute_prop==True Returns: [prop], prop_grad """ dyn = self.parent dg = dyn._get_phased_dyn_gen(k) aug = self._get_aug_mat(k, j) if dyn.oper_dtype == Qobj: aug_exp = aug.expm() prop_grad = Qobj(aug_exp.data[:dg.shape[0], dg.shape[1]:], dims=dyn.dyn_dims) if compute_prop: prop = Qobj(aug_exp.data[:dg.shape[0], :dg.shape[1]], dims=dyn.dyn_dims) else: aug_exp = la.expm(aug) prop_grad = aug_exp[:dg.shape[0], dg.shape[1]:] if compute_prop: prop = aug_exp[:dg.shape[0], :dg.shape[1]] if compute_prop: return prop, prop_grad else: return prop_grad class PropCompFrechet(PropagatorComputer): """ Frechet method for calculating the propagator: exponentiating the combined dynamics generator and the propagator gradient It should work for all systems, e.g. unitary, open, symplectic There are other PropagatorComputer subclasses that may be more efficient """ def reset(self): PropagatorComputer.reset(self) self.id_text = 'FRECHET' self.grad_exact = True self.apply_params() def _compute_prop_grad(self, k, j, compute_prop=True): """ Calculate the gradient of propagator wrt the control amplitude in the timeslot using the expm_frechet method The propagtor is calculated (almost) for 'free' in this method and hence it is returned if compute_prop==True Returns: [prop], prop_grad """ dyn = self.parent if dyn.oper_dtype == Qobj: A = dyn._get_phased_dyn_gen(k).full()*dyn.tau[k] E = dyn._get_phased_ctrl_dyn_gen(k, j).full()*dyn.tau[k] if compute_prop: prop_dense, prop_grad_dense = la.expm_frechet(A, E) prop = Qobj(prop_dense, dims=dyn.dyn_dims) prop_grad = Qobj(prop_grad_dense, dims=dyn.dyn_dims) else: prop_grad_dense = la.expm_frechet(A, E, compute_expm=False) prop_grad = Qobj(prop_grad_dense, dims=dyn.dyn_dims) elif dyn.oper_dtype == np.ndarray: A = dyn._get_phased_dyn_gen(k)*dyn.tau[k] E = dyn._get_phased_ctrl_dyn_gen(k, j)*dyn.tau[k] if compute_prop: prop, prop_grad = la.expm_frechet(A, E) else: prop_grad = la.expm_frechet(A, E, compute_expm=False) else: # Assuming some sparse matrix spcls = dyn._dyn_gen[k].__class__ A = (dyn._get_phased_dyn_gen(k)*dyn.tau[k]).toarray() E = (dyn._get_phased_ctrl_dyn_gen(k, j)*dyn.tau[k]).toarray() if compute_prop: prop_dense, prop_grad_dense = la.expm_frechet(A, E) prop = spcls(prop_dense) prop_grad = spcls(prop_grad_dense) else: prop_grad_dense = la.expm_frechet(A, E, compute_expm=False) prop_grad = spcls(prop_grad_dense) if compute_prop: return prop, prop_grad else: return prop_grad qutip-4.4.1/qutip/control/pulsegen.py000066400000000000000000001262271352460343600177200ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Pulse generator - Generate pulses for the timeslots Each class defines a gen_pulse function that produces a float array of size num_tslots. Each class produces a differ type of pulse. See the class and gen_pulse function descriptions for details """ import numpy as np import qutip.logging_utils as logging logger = logging.get_logger() import qutip.control.dynamics as dynamics import qutip.control.errors as errors def create_pulse_gen(pulse_type='RND', dyn=None, pulse_params=None): """ Create and return a pulse generator object matching the given type. The pulse generators each produce a different type of pulse, see the gen_pulse function description for details. These are the random pulse options: RND - Independent random value in each timeslot RNDFOURIER - Fourier series with random coefficients RNDWAVES - Summation of random waves RNDWALK1 - Random change in amplitude each timeslot RNDWALK2 - Random change in amp gradient each timeslot These are the other non-periodic options: LIN - Linear, i.e. contant gradient over the time ZERO - special case of the LIN pulse, where the gradient is 0 These are the periodic options SINE - Sine wave SQUARE - Square wave SAW - Saw tooth wave TRIANGLE - Triangular wave If a Dynamics object is passed in then this is used in instantiate the PulseGen, meaning that some timeslot and amplitude properties are copied over. """ if pulse_type == 'RND': return PulseGenRandom(dyn, params=pulse_params) if pulse_type == 'RNDFOURIER': return PulseGenRndFourier(dyn, params=pulse_params) if pulse_type == 'RNDWAVES': return PulseGenRndWaves(dyn, params=pulse_params) if pulse_type == 'RNDWALK1': return PulseGenRndWalk1(dyn, params=pulse_params) if pulse_type == 'RNDWALK2': return PulseGenRndWalk2(dyn, params=pulse_params) elif pulse_type == 'LIN': return PulseGenLinear(dyn, params=pulse_params) elif pulse_type == 'ZERO': return PulseGenZero(dyn, params=pulse_params) elif pulse_type == 'SINE': return PulseGenSine(dyn, params=pulse_params) elif pulse_type == 'SQUARE': return PulseGenSquare(dyn, params=pulse_params) elif pulse_type == 'SAW': return PulseGenSaw(dyn, params=pulse_params) elif pulse_type == 'TRIANGLE': return PulseGenTriangle(dyn, params=pulse_params) elif pulse_type == 'GAUSSIAN': return PulseGenGaussian(dyn, params=pulse_params) elif pulse_type == 'CRAB_FOURIER': return PulseGenCrabFourier(dyn, params=pulse_params) elif pulse_type == 'GAUSSIAN_EDGE': return PulseGenGaussianEdge(dyn, params=pulse_params) else: raise ValueError("No option for pulse_type '{}'".format(pulse_type)) class PulseGen(object): """ Pulse generator Base class for all Pulse generators The object can optionally be instantiated with a Dynamics object, in which case the timeslots and amplitude scaling and offset are copied from that. Otherwise the class can be used independently by setting: tau (array of timeslot durations) or num_tslots and pulse_time for equally spaced timeslots Attributes ---------- num_tslots : integer Number of timeslots, aka timeslices (copied from Dynamics if given) pulse_time : float total duration of the pulse (copied from Dynamics.evo_time if given) scaling : float linear scaling applied to the pulse (copied from Dynamics.initial_ctrl_scaling if given) offset : float linear offset applied to the pulse (copied from Dynamics.initial_ctrl_offset if given) tau : array[num_tslots] of float Duration of each timeslot (copied from Dynamics if given) lbound : float Lower boundary for the pulse amplitudes Note that the scaling and offset attributes can be used to fully bound the pulse for all generators except some of the random ones This bound (if set) may result in additional shifting / scaling Default is -Inf ubound : float Upper boundary for the pulse amplitudes Note that the scaling and offset attributes can be used to fully bound the pulse for all generators except some of the random ones This bound (if set) may result in additional shifting / scaling Default is Inf periodic : boolean True if the pulse generator produces periodic pulses random : boolean True if the pulse generator produces random pulses log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN """ def __init__(self, dyn=None, params=None): self.parent = dyn self.params = params self.reset() def reset(self): """ reset attributes to default values """ if isinstance(self.parent, dynamics.Dynamics): dyn = self.parent self.num_tslots = dyn.num_tslots self.pulse_time = dyn.evo_time self.scaling = dyn.initial_ctrl_scaling self.offset = dyn.initial_ctrl_offset self.tau = dyn.tau self.log_level = dyn.log_level else: self.num_tslots = 100 self.pulse_time = 1.0 self.scaling = 1.0 self.tau = None self.offset = 0.0 self._uses_time = False self.time = None self._pulse_initialised = False self.periodic = False self.random = False self.lbound = None self.ubound = None self.ramping_pulse = None self.apply_params() def apply_params(self, params=None): """ Set object attributes based on the dictionary (if any) passed in the instantiation, or passed as a parameter This is called during the instantiation automatically. The key value pairs are the attribute name and value """ if not params: params = self.params if isinstance(params, dict): self.params = params for key in params: setattr(self, key, params[key]) @property def log_level(self): return logger.level @log_level.setter def log_level(self, lvl): """ Set the log_level attribute and set the level of the logger that is call logger.setLevel(lvl) """ logger.setLevel(lvl) def gen_pulse(self): """ returns the pulse as an array of vales for each timeslot Must be implemented by subclass """ # must be implemented by subclass raise errors.UsageError( "No method defined for generating a pulse. " " Suspect base class was used where sub class should have been") def init_pulse(self): """ Initialise the pulse parameters """ if self.tau is None: self.tau = np.ones(self.num_tslots, dtype='f') * \ self.pulse_time/self.num_tslots if self._uses_time: self.time = np.zeros(self.num_tslots, dtype=float) for k in range(self.num_tslots-1): self.time[k+1] = self.time[k] + self.tau[k] self._pulse_initialised = True if not self.lbound is None: if np.isinf(self.lbound): self.lbound = None if not self.ubound is None: if np.isinf(self.ubound): self.ubound = None if not self.ubound is None and not self.lbound is None: if self.ubound < self.lbound: raise ValueError("ubound cannot be less the lbound") def _apply_bounds_and_offset(self, pulse): """ Ensure that the randomly generated pulse fits within the bounds (after applying the offset) Assumes that pulses passed are centered around zero (on average) """ if self.lbound is None and self.ubound is None: return pulse + self.offset max_amp = max(pulse) min_amp = min(pulse) if ((self.ubound is None or max_amp + self.offset <= self.ubound) and (self.lbound is None or min_amp + self.offset >= self.lbound)): return pulse + self.offset # Some shifting / scaling is required. if self.ubound is None or self.lbound is None: # One of the bounds is inf, so just shift the pulse if self.lbound is None: # max_amp + offset must exceed the ubound return pulse + self.ubound - max_amp else: # min_amp + offset must exceed the lbound return pulse + self.lbound - min_amp else: bound_range = self.ubound - self.lbound amp_range = max_amp - min_amp if max_amp - min_amp > bound_range: # pulse range is too high, it must be scaled pulse = pulse * bound_range / amp_range # otherwise the pulse should fit anyway return pulse + self.lbound - min(pulse) def _apply_ramping_pulse(self, pulse, ramping_pulse=None): if ramping_pulse is None: ramping_pulse = self.ramping_pulse if ramping_pulse is not None: pulse = pulse*ramping_pulse return pulse class PulseGenZero(PulseGen): """ Generates a flat pulse """ def gen_pulse(self): """ Generate a pulse with the same value in every timeslot. The value will be zero, unless the offset is not zero, in which case it will be the offset """ pulse = np.zeros(self.num_tslots) return self._apply_bounds_and_offset(pulse) class PulseGenRandom(PulseGen): """ Generates random pulses as simply random values for each timeslot """ def reset(self): PulseGen.reset(self) self.random = True self.apply_params() def gen_pulse(self): """ Generate a pulse of random values between 1 and -1 Values are scaled using the scaling property and shifted using the offset property Returns the pulse as an array of vales for each timeslot """ pulse = (2*np.random.random(self.num_tslots) - 1) * self.scaling return self._apply_bounds_and_offset(pulse) class PulseGenRndFourier(PulseGen): """ Generates pulses by summing sine waves as a Fourier series with random coefficients Attributes ---------- scaling : float The pulses should fit approximately within -/+scaling (before the offset is applied) as it is used to set a maximum for each component wave Use bounds to be sure (copied from Dynamics.initial_ctrl_scaling if given) min_wavelen : float Minimum wavelength of any component wave Set by default to 1/10th of the pulse time """ def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self.random = True self._uses_time = True try: self.min_wavelen = self.pulse_time / 10.0 except: self.min_wavelen = 0.1 self.apply_params() def gen_pulse(self, min_wavelen=None): """ Generate a random pulse based on a Fourier series with a minimum wavelength """ if min_wavelen is not None: self.min_wavelen = min_wavelen min_wavelen = self.min_wavelen if min_wavelen > self.pulse_time: raise ValueError("Minimum wavelength cannot be greater than " "the pulse time") if not self._pulse_initialised: self.init_pulse() # use some phase to avoid the first pulse being always 0 sum_wave = np.zeros(self.tau.shape) wavelen = 2.0*self.pulse_time t = self.time wl = [] while wavelen > min_wavelen: wl.append(wavelen) wavelen = wavelen/2.0 num_comp_waves = len(wl) amp_scale = np.sqrt(8)*self.scaling / float(num_comp_waves) for wavelen in wl: amp = amp_scale*(np.random.rand()*2 - 1) phase_off = np.random.rand()*np.pi/2.0 curr_wave = amp*np.sin(2*np.pi*t/wavelen + phase_off) sum_wave += curr_wave return self._apply_bounds_and_offset(sum_wave) class PulseGenRndWaves(PulseGen): """ Generates pulses by summing sine waves with random frequencies amplitudes and phase offset Attributes ---------- scaling : float The pulses should fit approximately within -/+scaling (before the offset is applied) as it is used to set a maximum for each component wave Use bounds to be sure (copied from Dynamics.initial_ctrl_scaling if given) num_comp_waves : integer Number of component waves. That is the number of waves that are summed to make the pulse signal Set to 20 by default. min_wavelen : float Minimum wavelength of any component wave Set by default to 1/10th of the pulse time max_wavelen : float Maximum wavelength of any component wave Set by default to twice the pulse time """ def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self.random = True self._uses_time = True self.num_comp_waves = 20 try: self.min_wavelen = self.pulse_time / 10.0 except: self.min_wavelen = 0.1 try: self.max_wavelen = 2*self.pulse_time except: self.max_wavelen = 10.0 self.apply_params() def gen_pulse(self, num_comp_waves=None, min_wavelen=None, max_wavelen=None): """ Generate a random pulse by summing sine waves with random freq, amplitude and phase offset """ if num_comp_waves is not None: self.num_comp_waves = num_comp_waves if min_wavelen is not None: self.min_wavelen = min_wavelen if max_wavelen is not None: self.max_wavelen = max_wavelen num_comp_waves = self.num_comp_waves min_wavelen = self.min_wavelen max_wavelen = self.max_wavelen if min_wavelen > self.pulse_time: raise ValueError("Minimum wavelength cannot be greater than " "the pulse time") if max_wavelen <= min_wavelen: raise ValueError("Maximum wavelength must be greater than " "the minimum wavelength") if not self._pulse_initialised: self.init_pulse() # use some phase to avoid the first pulse being always 0 sum_wave = np.zeros(self.tau.shape) t = self.time wl_range = max_wavelen - min_wavelen amp_scale = np.sqrt(8)*self.scaling / float(num_comp_waves) for n in range(num_comp_waves): amp = amp_scale*(np.random.rand()*2 - 1) phase_off = np.random.rand()*np.pi/2.0 wavelen = min_wavelen + np.random.rand()*wl_range curr_wave = amp*np.sin(2*np.pi*t/wavelen + phase_off) sum_wave += curr_wave return self._apply_bounds_and_offset(sum_wave) class PulseGenRndWalk1(PulseGen): """ Generates pulses by using a random walk algorithm Attributes ---------- scaling : float Used as the range for the starting amplitude Note must used bounds if values must be restricted. Also scales the max_d_amp value (copied from Dynamics.initial_ctrl_scaling if given) max_d_amp : float Maximum amount amplitude will change between timeslots Note this is also factored by the scaling attribute """ def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self.random = True self.max_d_amp = 0.1 self.apply_params() def gen_pulse(self, max_d_amp=None): """ Generate a pulse by changing the amplitude a random amount between -max_d_amp and +max_d_amp at each timeslot. The walk will start at a random amplitude between -/+scaling. """ if max_d_amp is not None: self.max_d_amp = max_d_amp max_d_amp = self.max_d_amp*self.scaling if not self._pulse_initialised: self.init_pulse() walk = np.zeros(self.tau.shape) amp = self.scaling*(np.random.rand()*2 - 1) for k in range(len(walk)): walk[k] = amp amp += (np.random.rand()*2 - 1)*max_d_amp return self._apply_bounds_and_offset(walk) class PulseGenRndWalk2(PulseGen): """ Generates pulses by using a random walk algorithm Note this is best used with bounds as the walks tend to wander far Attributes ---------- scaling : float Used as the range for the starting amplitude Note must used bounds if values must be restricted. Also scales the max_d2_amp value (copied from Dynamics.initial_ctrl_scaling if given) max_d2_amp : float Maximum amount amplitude gradient will change between timeslots Note this is also factored by the scaling attribute """ def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self.random = True self.max_d2_amp = 0.01 self.apply_params() def gen_pulse(self, init_grad_range=None, max_d2_amp=None): """ Generate a pulse by changing the amplitude gradient a random amount between -max_d2_amp and +max_d2_amp at each timeslot. The walk will start at a random amplitude between -/+scaling. The gradient will start at 0 """ if max_d2_amp is not None: self.max_d2_amp = max_d2_amp max_d2_amp = self.max_d2_amp if not self._pulse_initialised: self.init_pulse() walk = np.zeros(self.tau.shape) amp = self.scaling*(np.random.rand()*2 - 1) print("Start amp {}".format(amp)) grad = 0.0 print("Start grad {}".format(grad)) for k in range(len(walk)): walk[k] = amp grad += (np.random.rand()*2 - 1)*max_d2_amp amp += grad # print("grad {}".format(grad)) return self._apply_bounds_and_offset(walk) class PulseGenLinear(PulseGen): """ Generates linear pulses Attributes ---------- gradient : float Gradient of the line. Note this is calculated from the start_val and end_val if these are given start_val : float Start point of the line. That is the starting amplitude end_val : float End point of the line. That is the amplitude at the start of the last timeslot """ def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self.gradient = None self.start_val = -1.0 self.end_val = 1.0 self.apply_params() def init_pulse(self, gradient=None, start_val=None, end_val=None): """ Calculate the gradient if pulse is defined by start and end point values """ PulseGen.init_pulse(self) if start_val is not None and end_val is not None: self.start_val = start_val self.end_val = end_val if self.start_val is not None and self.end_val is not None: self.gradient = float(self.end_val - self.start_val) / \ (self.pulse_time - self.tau[-1]) def gen_pulse(self, gradient=None, start_val=None, end_val=None): """ Generate a linear pulse using either the gradient and start value or using the end point to calulate the gradient Note that the scaling and offset parameters are still applied, so unless these values are the default 1.0 and 0.0, then the actual gradient etc will be different Returns the pulse as an array of vales for each timeslot """ if (gradient is not None or start_val is not None or end_val is not None): self.init_pulse(gradient, start_val, end_val) if not self._pulse_initialised: self.init_pulse() pulse = np.empty(self.num_tslots) t = 0.0 for k in range(self.num_tslots): y = self.gradient*t + self.start_val pulse[k] = self.scaling*y t = t + self.tau[k] return self._apply_bounds_and_offset(pulse) class PulseGenPeriodic(PulseGen): """ Intermediate class for all periodic pulse generators All of the periodic pulses range from -1 to 1 All have a start phase that can be set between 0 and 2pi Attributes ---------- num_waves : float Number of complete waves (cycles) that occur in the pulse. wavelen and freq calculated from this if it is given wavelen : float Wavelength of the pulse (assuming the speed is 1) freq is calculated from this if it is given freq : float Frequency of the pulse start_phase : float Phase of the pulse signal when t=0 """ def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self.periodic = True self.num_waves = None self.freq = 1.0 self.wavelen = None self.start_phase = 0.0 self.apply_params() def init_pulse(self, num_waves=None, wavelen=None, freq=None, start_phase=None): """ Calculate the wavelength, frequency, number of waves etc from the each other and the other parameters If num_waves is given then the other parameters are worked from this Otherwise if the wavelength is given then it is the driver Otherwise the frequency is used to calculate wavelength and num_waves """ PulseGen.init_pulse(self) if start_phase is not None: self.start_phase = start_phase if num_waves is not None or wavelen is not None or freq is not None: self.num_waves = num_waves self.wavelen = wavelen self.freq = freq if self.num_waves is not None: self.freq = float(self.num_waves) / self.pulse_time self.wavelen = 1.0/self.freq elif self.wavelen is not None: self.freq = 1.0/self.wavelen self.num_waves = self.wavelen*self.pulse_time else: self.wavelen = 1.0/self.freq self.num_waves = self.wavelen*self.pulse_time class PulseGenSine(PulseGenPeriodic): """ Generates sine wave pulses """ def gen_pulse(self, num_waves=None, wavelen=None, freq=None, start_phase=None): """ Generate a sine wave pulse If no params are provided then the class object attributes are used. If they are provided, then these will reinitialise the object attribs. returns the pulse as an array of vales for each timeslot """ if start_phase is not None: self.start_phase = start_phase if num_waves is not None or wavelen is not None or freq is not None: self.init_pulse(num_waves, wavelen, freq, start_phase) if not self._pulse_initialised: self.init_pulse() pulse = np.empty(self.num_tslots) t = 0.0 for k in range(self.num_tslots): phase = 2*np.pi*self.freq*t + self.start_phase pulse[k] = self.scaling*np.sin(phase) t = t + self.tau[k] return self._apply_bounds_and_offset(pulse) class PulseGenSquare(PulseGenPeriodic): """ Generates square wave pulses """ def gen_pulse(self, num_waves=None, wavelen=None, freq=None, start_phase=None): """ Generate a square wave pulse If no parameters are pavided then the class object attributes are used. If they are provided, then these will reinitialise the object attribs """ if start_phase is not None: self.start_phase = start_phase if num_waves is not None or wavelen is not None or freq is not None: self.init_pulse(num_waves, wavelen, freq, start_phase) if not self._pulse_initialised: self.init_pulse() pulse = np.empty(self.num_tslots) t = 0.0 for k in range(self.num_tslots): phase = 2*np.pi*self.freq*t + self.start_phase x = phase/(2*np.pi) y = 4*np.floor(x) - 2*np.floor(2*x) + 1 pulse[k] = self.scaling*y t = t + self.tau[k] return self._apply_bounds_and_offset(pulse) class PulseGenSaw(PulseGenPeriodic): """ Generates saw tooth wave pulses """ def gen_pulse(self, num_waves=None, wavelen=None, freq=None, start_phase=None): """ Generate a saw tooth wave pulse If no parameters are pavided then the class object attributes are used. If they are provided, then these will reinitialise the object attribs """ if start_phase is not None: self.start_phase = start_phase if num_waves is not None or wavelen is not None or freq is not None: self.init_pulse(num_waves, wavelen, freq, start_phase) if not self._pulse_initialised: self.init_pulse() pulse = np.empty(self.num_tslots) t = 0.0 for k in range(self.num_tslots): phase = 2*np.pi*self.freq*t + self.start_phase x = phase/(2*np.pi) y = 2*(x - np.floor(0.5 + x)) pulse[k] = self.scaling*y t = t + self.tau[k] return self._apply_bounds_and_offset(pulse) class PulseGenTriangle(PulseGenPeriodic): """ Generates triangular wave pulses """ def gen_pulse(self, num_waves=None, wavelen=None, freq=None, start_phase=None): """ Generate a sine wave pulse If no parameters are pavided then the class object attributes are used. If they are provided, then these will reinitialise the object attribs """ if start_phase is not None: self.start_phase = start_phase if num_waves is not None or wavelen is not None or freq is not None: self.init_pulse(num_waves, wavelen, freq, start_phase) if not self._pulse_initialised: self.init_pulse() pulse = np.empty(self.num_tslots) t = 0.0 for k in range(self.num_tslots): phase = 2*np.pi*self.freq*t + self.start_phase + np.pi/2.0 x = phase/(2*np.pi) y = 2*np.abs(2*(x - np.floor(0.5 + x))) - 1 pulse[k] = self.scaling*y t = t + self.tau[k] return self._apply_bounds_and_offset(pulse) class PulseGenGaussian(PulseGen): """ Generates pulses with a Gaussian profile """ def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self._uses_time = True self.mean = 0.5*self.pulse_time self.variance = 0.5*self.pulse_time self.apply_params() def gen_pulse(self, mean=None, variance=None): """ Generate a pulse with Gaussian shape. The peak is centre around the mean and the variance determines the breadth The scaling and offset attributes are applied as an amplitude and fixed linear offset. Note that the maximum amplitude will be scaling + offset. """ if not self._pulse_initialised: self.init_pulse() if mean: Tm = mean else: Tm = self.mean if variance: Tv = variance else: Tv = self.variance t = self.time T = self.pulse_time pulse = self.scaling*np.exp(-(t-Tm)**2/(2*Tv)) return self._apply_bounds_and_offset(pulse) class PulseGenGaussianEdge(PulseGen): """ Generate pulses with inverted Gaussian ramping in and out It's intended use for a ramping modulation, which is often required in experimental setups. Attributes ---------- decay_time : float Determines the ramping rate. It is approximately the time required to bring the pulse to full amplitude It is set to 1/10 of the pulse time by default """ def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self._uses_time = True self.decay_time = self.pulse_time / 10.0 self.apply_params() def gen_pulse(self, decay_time=None): """ Generate a pulse that starts and ends at zero and 1.0 in between then apply scaling and offset The tailing in and out is an inverted Gaussian shape """ if not self._pulse_initialised: self.init_pulse() t = self.time if decay_time: Td = decay_time else: Td = self.decay_time T = self.pulse_time pulse = 1.0 - np.exp(-t**2/Td) - np.exp(-(t-T)**2/Td) pulse = pulse*self.scaling return self._apply_bounds_and_offset(pulse) ### The following are pulse generators for the CRAB algorithm ### # AJGP 2015-05-14: # The intention is to have a more general base class that allows # setting of general basis functions class PulseGenCrab(PulseGen): """ Base class for all CRAB pulse generators Note these are more involved in the optimisation process as they are used to produce piecewise control amplitudes each time new optimisation parameters are tried Attributes ---------- num_coeffs : integer Number of coefficients used for each basis function num_basis_funcs : integer Number of basis functions In this case set at 2 and should not be changed coeffs : float array[num_coeffs, num_basis_funcs] The basis coefficient values randomize_coeffs : bool If True (default) then the coefficients are set to some random values when initialised, otherwise they will all be equal to self.scaling """ def __init__(self, dyn=None, num_coeffs=None, params=None): self.parent = dyn self.num_coeffs = num_coeffs self.params = params self.reset() def reset(self): """ reset attributes to default values """ PulseGen.reset(self) self.NUM_COEFFS_WARN_LVL = 20 self.DEF_NUM_COEFFS = 4 self._BSC_ALL = 1 self._BSC_GT_MEAN = 2 self._BSC_LT_MEAN = 3 self._uses_time = True self.time = None self.num_basis_funcs = 2 self.num_optim_vars = 0 self.coeffs = None self.randomize_coeffs = True self._num_coeffs_estimated = False self.guess_pulse_action = 'MODULATE' self.guess_pulse = None self.guess_pulse_func = None self.apply_params() def init_pulse(self, num_coeffs=None): """ Set the initial freq and coefficient values """ PulseGen.init_pulse(self) self.init_coeffs(num_coeffs=num_coeffs) if self.guess_pulse is not None: self.init_guess_pulse() self._init_bounds() if self.log_level <= logging.DEBUG and not self._num_coeffs_estimated: logger.debug( "CRAB pulse initialised with {} coefficients per basis " "function, which means a total of {} " "optimisation variables for this pulse".format( self.num_coeffs, self.num_optim_vars)) # def generate_guess_pulse(self) # if isinstance(self.guess_pulsegen, PulseGen): # self.guess_pulse = self.guess_pulsegen.gen_pulse() # return self.guess_pulse def init_coeffs(self, num_coeffs=None): """ Generate the initial ceofficent values. Parameters ---------- num_coeffs : integer Number of coefficients used for each basis function If given this overides the default and sets the attribute of the same name. """ if num_coeffs: self.num_coeffs = num_coeffs self._num_coeffs_estimated = False if not self.num_coeffs: if isinstance(self.parent, dynamics.Dynamics): dim = self.parent.get_drift_dim() self.num_coeffs = self.estimate_num_coeffs(dim) self._num_coeffs_estimated = True else: self.num_coeffs = self.DEF_NUM_COEFFS self.num_optim_vars = self.num_coeffs*self.num_basis_funcs if self._num_coeffs_estimated: if self.log_level <= logging.INFO: logger.info( "The number of CRAB coefficients per basis function " "has been estimated as {}, which means a total of {} " "optimisation variables for this pulse. Based on the " "dimension ({}) of the system".format( self.num_coeffs, self.num_optim_vars, dim)) # Issue warning if beyond the recommended level if self.log_level <= logging.WARN: if self.num_coeffs > self.NUM_COEFFS_WARN_LVL: logger.warn( "The estimated number of coefficients {} exceeds " "the amount ({}) recommended for efficient " "optimisation. You can set this level explicitly " "to suppress this message.".format( self.num_coeffs, self.NUM_COEFFS_WARN_LVL)) if self.randomize_coeffs: r = np.random.random([self.num_coeffs, self.num_basis_funcs]) self.coeffs = (2*r - 1.0) * self.scaling else: self.coeffs = np.ones([self.num_coeffs, self.num_basis_funcs])*self.scaling def estimate_num_coeffs(self, dim): """ Estimate the number coefficients based on the dimensionality of the system. Returns ------- num_coeffs : int estimated number of coefficients """ num_coeffs = max(2, dim - 1) return num_coeffs def get_optim_var_vals(self): """ Get the parameter values to be optimised Returns ------- list (or 1d array) of floats """ return self.coeffs.ravel().tolist() def set_optim_var_vals(self, param_vals): """ Set the values of the any of the pulse generation parameters based on new values from the optimisation method Typically this will be the basis coefficients """ # Type and size checking avoided here as this is in the # main optmisation call sequence self.set_coeffs(param_vals) def set_coeffs(self, param_vals): self.coeffs = param_vals.reshape( [self.num_coeffs, self.num_basis_funcs]) def init_guess_pulse(self): self.guess_pulse_func = None if not self.guess_pulse_action: logger.WARN("No guess pulse action given, hence ignored.") elif self.guess_pulse_action.upper() == 'MODULATE': self.guess_pulse_func = self.guess_pulse_modulate elif self.guess_pulse_action.upper() == 'ADD': self.guess_pulse_func = self.guess_pulse_add else: logger.WARN("No option for guess pulse action '{}' " ", hence ignored.".format(self.guess_pulse_action)) def guess_pulse_add(self, pulse): pulse = pulse + self.guess_pulse return pulse def guess_pulse_modulate(self, pulse): pulse = (1.0 + pulse)*self.guess_pulse return pulse def _init_bounds(self): add_guess_pulse_scale = False if self.lbound is None and self.ubound is None: # no bounds to apply self._bound_scale_cond = None elif self.lbound is None: # only upper bound if self.ubound > 0: self._bound_mean = 0.0 self._bound_scale = self.ubound else: add_guess_pulse_scale = True self._bound_scale = self.scaling*self.num_coeffs + \ self.get_guess_pulse_scale() self._bound_mean = -abs(self._bound_scale) + self.ubound self._bound_scale_cond = self._BSC_GT_MEAN elif self.ubound is None: # only lower bound if self.lbound < 0: self._bound_mean = 0.0 self._bound_scale = abs(self.lbound) else: self._bound_scale = self.scaling*self.num_coeffs + \ self.get_guess_pulse_scale() self._bound_mean = abs(self._bound_scale) + self.lbound self._bound_scale_cond = self._BSC_LT_MEAN else: # lower and upper bounds self._bound_mean = 0.5*(self.ubound + self.lbound) self._bound_scale = 0.5*(self.ubound - self.lbound) self._bound_scale_cond = self._BSC_ALL def get_guess_pulse_scale(self): scale = 0.0 if self.guess_pulse is not None: scale = max(np.amax(self.guess_pulse) - np.amin(self.guess_pulse), np.amax(self.guess_pulse)) return scale def _apply_bounds(self, pulse): """ Scaling the amplitudes using the tanh function if there are bounds """ if self._bound_scale_cond == self._BSC_ALL: pulse = np.tanh(pulse)*self._bound_scale + self._bound_mean return pulse elif self._bound_scale_cond == self._BSC_GT_MEAN: scale_where = pulse > self._bound_mean pulse[scale_where] = (np.tanh(pulse[scale_where])*self._bound_scale + self._bound_mean) return pulse elif self._bound_scale_cond == self._BSC_LT_MEAN: scale_where = pulse < self._bound_mean pulse[scale_where] = (np.tanh(pulse[scale_where])*self._bound_scale + self._bound_mean) return pulse else: return pulse class PulseGenCrabFourier(PulseGenCrab): """ Generates a pulse using the Fourier basis functions, i.e. sin and cos Attributes ---------- freqs : float array[num_coeffs] Frequencies for the basis functions randomize_freqs : bool If True (default) the some random offset is applied to the frequencies """ def reset(self): """ reset attributes to default values """ PulseGenCrab.reset(self) self.freqs = None self.randomize_freqs = True def init_pulse(self, num_coeffs=None): """ Set the initial freq and coefficient values """ PulseGenCrab.init_pulse(self) self.init_freqs() def init_freqs(self): """ Generate the frequencies These are the Fourier harmonics with a uniformly distributed random offset """ self.freqs = np.empty(self.num_coeffs) ff = 2*np.pi / self.pulse_time for i in range(self.num_coeffs): self.freqs[i] = ff*(i + 1) if self.randomize_freqs: self.freqs += np.random.random(self.num_coeffs) - 0.5 def gen_pulse(self, coeffs=None): """ Generate a pulse using the Fourier basis with the freqs and coeffs attributes. Parameters ---------- coeffs : float array[num_coeffs, num_basis_funcs] The basis coefficient values If given this overides the default and sets the attribute of the same name. """ if coeffs: self.coeffs = coeffs if not self._pulse_initialised: self.init_pulse() pulse = np.zeros(self.num_tslots) for i in range(self.num_coeffs): phase = self.freqs[i]*self.time # basis1comp = self.coeffs[i, 0]*np.sin(phase) # basis2comp = self.coeffs[i, 1]*np.cos(phase) # pulse += basis1comp + basis2comp pulse += self.coeffs[i, 0]*np.sin(phase) + \ self.coeffs[i, 1]*np.cos(phase) if self.guess_pulse_func: pulse = self.guess_pulse_func(pulse) if self.ramping_pulse is not None: pulse = self._apply_ramping_pulse(pulse) return self._apply_bounds(pulse) qutip-4.4.1/qutip/control/pulseoptim.py000066400000000000000000002355211352460343600202750ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Wrapper functions that will manage the creation of the objects, build the configuration, and execute the algorithm required to optimise a set of ctrl pulses for a given (quantum) system. The fidelity error is some measure of distance of the system evolution from the given target evolution in the time allowed for the evolution. The functions minimise this fidelity error wrt the piecewise control amplitudes in the timeslots There are currently two quantum control pulse optmisations algorithms implemented in this library. There are accessible through the methods in this module. Both the algorithms use the scipy.optimize methods to minimise the fidelity error with respect to to variables that define the pulse. GRAPE ----- The default algorithm (as it was implemented here first) is GRAPE GRadient Ascent Pulse Engineering [1][2]. It uses a gradient based method such as BFGS to minimise the fidelity error. This makes convergence very quick when an exact gradient can be calculated, but this limits the factors that can taken into account in the fidelity. CRAB ---- The CRAB [3][4] algorithm was developed at the University of Ulm. In full it is the Chopped RAndom Basis algorithm. The main difference is that it reduces the number of optimisation variables by defining the control pulses by expansions of basis functions, where the variables are the coefficients. Typically a Fourier series is chosen, i.e. the variables are the Fourier coefficients. Therefore it does not need to compute an explicit gradient. By default it uses the Nelder-Mead method for fidelity error minimisation. References ---------- 1. N Khaneja et. al. Optimal control of coupled spin dynamics: Design of NMR pulse sequences by gradient ascent algorithms. J. Magn. Reson. 172, 296–305 (2005). 2. Shai Machnes et.al DYNAMO - Dynamic Framework for Quantum Optimal Control arXiv.1011.4874 3. Doria, P., Calarco, T. & Montangero, S. Optimal Control Technique for Many-Body Quantum Dynamics. Phys. Rev. Lett. 106, 1–4 (2011). 4. Caneva, T., Calarco, T. & Montangero, S. Chopped random-basis quantum optimization. Phys. Rev. A - At. Mol. Opt. Phys. 84, (2011). """ import numpy as np import warnings # QuTiP from qutip import Qobj import qutip.logging_utils as logging logger = logging.get_logger() # QuTiP control modules import qutip.control.optimconfig as optimconfig import qutip.control.dynamics as dynamics import qutip.control.termcond as termcond import qutip.control.optimizer as optimizer import qutip.control.stats as stats import qutip.control.errors as errors import qutip.control.fidcomp as fidcomp import qutip.control.propcomp as propcomp import qutip.control.pulsegen as pulsegen #import qutip.control.pulsegencrab as pulsegencrab warnings.simplefilter('always', DeprecationWarning) #turn off filter def _param_deprecation(message, stacklevel=3): """ Issue deprecation warning Using stacklevel=3 will ensure message refers the function calling with the deprecated parameter, """ warnings.warn(message, DeprecationWarning, stacklevel=stacklevel) def _upper_safe(s): try: s = s.upper() except: pass return s def optimize_pulse( drift, ctrls, initial, target, num_tslots=None, evo_time=None, tau=None, amp_lbound=None, amp_ubound=None, fid_err_targ=1e-10, min_grad=1e-10, max_iter=500, max_wall_time=180, alg='GRAPE', alg_params=None, optim_params=None, optim_method='DEF', method_params=None, optim_alg=None, max_metric_corr=None, accuracy_factor=None, dyn_type='GEN_MAT', dyn_params=None, prop_type='DEF', prop_params=None, fid_type='DEF', fid_params=None, phase_option=None, fid_err_scale_factor=None, tslot_type='DEF', tslot_params=None, amp_update_mode=None, init_pulse_type='DEF', init_pulse_params=None, pulse_scaling=1.0, pulse_offset=0.0, ramping_pulse_type=None, ramping_pulse_params=None, log_level=logging.NOTSET, out_file_ext=None, gen_stats=False): """ Optimise a control pulse to minimise the fidelity error. The dynamics of the system in any given timeslot are governed by the combined dynamics generator, i.e. the sum of the drift+ctrl_amp[j]*ctrls[j] The control pulse is an [n_ts, n_ctrls)] array of piecewise amplitudes Starting from an intital (typically random) pulse, a multivariable optimisation algorithm attempts to determines the optimal values for the control pulse to minimise the fidelity error The fidelity error is some measure of distance of the system evolution from the given target evolution in the time allowed for the evolution. Parameters ---------- drift : Qobj or list of Qobj the underlying dynamics generator of the system can provide list (of length num_tslots) for time dependent drift ctrls : List of Qobj or array like [num_tslots, evo_time] a list of control dynamics generators. These are scaled by the amplitudes to alter the overall dynamics Array like imput can be provided for time dependent control generators initial : Qobj starting point for the evolution. Typically the identity matrix target : Qobj target transformation, e.g. gate or state, for the time evolution num_tslots : integer or None number of timeslots. None implies that timeslots will be given in the tau array evo_time : float or None total time for the evolution None implies that timeslots will be given in the tau array tau : array[num_tslots] of floats or None durations for the timeslots. if this is given then num_tslots and evo_time are dervived from it None implies that timeslot durations will be equal and calculated as evo_time/num_tslots amp_lbound : float or list of floats lower boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control amp_ubound : float or list of floats upper boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control fid_err_targ : float Fidelity error target. Pulse optimisation will terminate when the fidelity error falls below this value mim_grad : float Minimum gradient. When the sum of the squares of the gradients wrt to the control amplitudes falls below this value, the optimisation terminates, assuming local minima max_iter : integer Maximum number of iterations of the optimisation algorithm max_wall_time : float Maximum allowed elapsed time for the optimisation algorithm alg : string Algorithm to use in pulse optimisation. Options are: 'GRAPE' (default) - GRadient Ascent Pulse Engineering 'CRAB' - Chopped RAndom Basis alg_params : Dictionary options that are specific to the algorithm see above optim_params : Dictionary The key value pairs are the attribute name and value used to set attribute values Note: attributes are created if they do not exist already, and are overwritten if they do. Note: method_params are applied afterwards and so may override these optim_method : string a scipy.optimize.minimize method that will be used to optimise the pulse for minimum fidelity error Note that FMIN, FMIN_BFGS & FMIN_L_BFGS_B will all result in calling these specific scipy.optimize methods Note the LBFGSB is equivalent to FMIN_L_BFGS_B for backwards capatibility reasons. Supplying DEF will given alg dependent result: GRAPE - Default optim_method is FMIN_L_BFGS_B CRAB - Default optim_method is FMIN method_params : dict Parameters for the optim_method. Note that where there is an attribute of the Optimizer object or the termination_conditions matching the key that attribute. Otherwise, and in some case also, they are assumed to be method_options for the scipy.optimize.minimize method. optim_alg : string Deprecated. Use optim_method. max_metric_corr : integer Deprecated. Use method_params instead accuracy_factor : float Deprecated. Use method_params instead dyn_type : string Dynamics type, i.e. the type of matrix used to describe the dynamics. Options are UNIT, GEN_MAT, SYMPL (see Dynamics classes for details) dyn_params : dict Parameters for the Dynamics object The key value pairs are assumed to be attribute name value pairs They applied after the object is created prop_type : string Propagator type i.e. the method used to calculate the propagtors and propagtor gradient for each timeslot options are DEF, APPROX, DIAG, FRECHET, AUG_MAT DEF will use the default for the specific dyn_type (see PropagatorComputer classes for details) prop_params : dict Parameters for the PropagatorComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created fid_type : string Fidelity error (and fidelity error gradient) computation method Options are DEF, UNIT, TRACEDIFF, TD_APPROX DEF will use the default for the specific dyn_type (See FidelityComputer classes for details) fid_params : dict Parameters for the FidelityComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created phase_option : string Deprecated. Pass in fid_params instead. fid_err_scale_factor : float Deprecated. Use scale_factor key in fid_params instead. tslot_type : string Method for computing the dynamics generators, propagators and evolution in the timeslots. Options: DEF, UPDATE_ALL, DYNAMIC UPDATE_ALL is the only one that currently works (See TimeslotComputer classes for details) tslot_params : dict Parameters for the TimeslotComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created amp_update_mode : string Deprecated. Use tslot_type instead. init_pulse_type : string type / shape of pulse(s) used to initialise the the control amplitudes. Options (GRAPE) include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW DEF is RND (see PulseGen classes for details) For the CRAB the this the guess_pulse_type. init_pulse_params : dict Parameters for the initial / guess pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created pulse_scaling : float Linear scale factor for generated initial / guess pulses By default initial pulses are generated with amplitudes in the range (-1.0, 1.0). These will be scaled by this parameter pulse_offset : float Linear offset for the pulse. That is this value will be added to any initial / guess pulses generated. ramping_pulse_type : string Type of pulse used to modulate the control pulse. It's intended use for a ramping modulation, which is often required in experimental setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was added for this purpose. ramping_pulse_params : dict Parameters for the ramping pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN out_file_ext : string or None files containing the initial and final control pulse amplitudes are saved to the current directory. The default name will be postfixed with this extension Setting this to None will suppress the output of files gen_stats : boolean if set to True then statistics for the optimisation run will be generated - accessible through attributes of the stats object Returns ------- opt : OptimResult Returns instance of OptimResult, which has attributes giving the reason for termination, final fidelity error, final evolution final amplitudes, statistics etc """ if log_level == logging.NOTSET: log_level = logger.getEffectiveLevel() else: logger.setLevel(log_level) # The parameters types are checked in create_pulse_optimizer # so no need to do so here # However, the deprecation management is repeated here # so that the stack level is correct if not optim_alg is None: optim_method = optim_alg _param_deprecation( "The 'optim_alg' parameter is deprecated. " "Use 'optim_method' instead") if not max_metric_corr is None: if isinstance(method_params, dict): if not 'max_metric_corr' in method_params: method_params['max_metric_corr'] = max_metric_corr else: method_params = {'max_metric_corr':max_metric_corr} _param_deprecation( "The 'max_metric_corr' parameter is deprecated. " "Use 'max_metric_corr' in method_params instead") if not accuracy_factor is None: if isinstance(method_params, dict): if not 'accuracy_factor' in method_params: method_params['accuracy_factor'] = accuracy_factor else: method_params = {'accuracy_factor':accuracy_factor} _param_deprecation( "The 'accuracy_factor' parameter is deprecated. " "Use 'accuracy_factor' in method_params instead") # phase_option if not phase_option is None: if isinstance(fid_params, dict): if not 'phase_option' in fid_params: fid_params['phase_option'] = phase_option else: fid_params = {'phase_option':phase_option} _param_deprecation( "The 'phase_option' parameter is deprecated. " "Use 'phase_option' in fid_params instead") # fid_err_scale_factor if not fid_err_scale_factor is None: if isinstance(fid_params, dict): if not 'fid_err_scale_factor' in fid_params: fid_params['scale_factor'] = fid_err_scale_factor else: fid_params = {'scale_factor':fid_err_scale_factor} _param_deprecation( "The 'fid_err_scale_factor' parameter is deprecated. " "Use 'scale_factor' in fid_params instead") # amp_update_mode if not amp_update_mode is None: amp_update_mode_up = _upper_safe(amp_update_mode) if amp_update_mode_up == 'ALL': tslot_type = 'UPDATE_ALL' else: tslot_type = amp_update_mode _param_deprecation( "The 'amp_update_mode' parameter is deprecated. " "Use 'tslot_type' instead") optim = create_pulse_optimizer( drift, ctrls, initial, target, num_tslots=num_tslots, evo_time=evo_time, tau=tau, amp_lbound=amp_lbound, amp_ubound=amp_ubound, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, alg=alg, alg_params=alg_params, optim_params=optim_params, optim_method=optim_method, method_params=method_params, dyn_type=dyn_type, dyn_params=dyn_params, prop_type=prop_type, prop_params=prop_params, fid_type=fid_type, fid_params=fid_params, init_pulse_type=init_pulse_type, init_pulse_params=init_pulse_params, pulse_scaling=pulse_scaling, pulse_offset=pulse_offset, ramping_pulse_type=ramping_pulse_type, ramping_pulse_params=ramping_pulse_params, log_level=log_level, gen_stats=gen_stats) dyn = optim.dynamics dyn.init_timeslots() # Generate initial pulses for each control init_amps = np.zeros([dyn.num_tslots, dyn.num_ctrls]) if alg == 'CRAB': for j in range(dyn.num_ctrls): pgen = optim.pulse_generator[j] pgen.init_pulse() init_amps[:, j] = pgen.gen_pulse() else: pgen = optim.pulse_generator for j in range(dyn.num_ctrls): init_amps[:, j] = pgen.gen_pulse() # Initialise the starting amplitudes dyn.initialize_controls(init_amps) if log_level <= logging.INFO: msg = "System configuration:\n" dg_name = "dynamics generator" if dyn_type == 'UNIT': dg_name = "Hamiltonian" if dyn.time_depend_drift: msg += "Initial drift {}:\n".format(dg_name) msg += str(dyn.drift_dyn_gen[0]) else: msg += "Drift {}:\n".format(dg_name) msg += str(dyn.drift_dyn_gen) for j in range(dyn.num_ctrls): msg += "\nControl {} {}:\n".format(j+1, dg_name) msg += str(dyn.ctrl_dyn_gen[j]) msg += "\nInitial state / operator:\n" msg += str(dyn.initial) msg += "\nTarget state / operator:\n" msg += str(dyn.target) logger.info(msg) if out_file_ext is not None: # Save initial amplitudes to a text file pulsefile = "ctrl_amps_initial_" + out_file_ext dyn.save_amps(pulsefile) if log_level <= logging.INFO: logger.info("Initial amplitudes output to file: " + pulsefile) # Start the optimisation result = optim.run_optimization() if out_file_ext is not None: # Save final amplitudes to a text file pulsefile = "ctrl_amps_final_" + out_file_ext dyn.save_amps(pulsefile) if log_level <= logging.INFO: logger.info("Final amplitudes output to file: " + pulsefile) return result def optimize_pulse_unitary( H_d, H_c, U_0, U_targ, num_tslots=None, evo_time=None, tau=None, amp_lbound=None, amp_ubound=None, fid_err_targ=1e-10, min_grad=1e-10, max_iter=500, max_wall_time=180, alg='GRAPE', alg_params=None, optim_params=None, optim_method='DEF', method_params=None, optim_alg=None, max_metric_corr=None, accuracy_factor=None, phase_option='PSU', dyn_params=None, prop_params=None, fid_params=None, tslot_type='DEF', tslot_params=None, amp_update_mode=None, init_pulse_type='DEF', init_pulse_params=None, pulse_scaling=1.0, pulse_offset=0.0, ramping_pulse_type=None, ramping_pulse_params=None, log_level=logging.NOTSET, out_file_ext=None, gen_stats=False): """ Optimise a control pulse to minimise the fidelity error, assuming that the dynamics of the system are generated by unitary operators. This function is simply a wrapper for optimize_pulse, where the appropriate options for unitary dynamics are chosen and the parameter names are in the format familiar to unitary dynamics The dynamics of the system in any given timeslot are governed by the combined Hamiltonian, i.e. the sum of the H_d + ctrl_amp[j]*H_c[j] The control pulse is an [n_ts, n_ctrls] array of piecewise amplitudes Starting from an intital (typically random) pulse, a multivariable optimisation algorithm attempts to determines the optimal values for the control pulse to minimise the fidelity error The maximum fidelity for a unitary system is 1, i.e. when the time evolution resulting from the pulse is equivalent to the target. And therefore the fidelity error is 1 - fidelity Parameters ---------- H_d : Qobj or list of Qobj Drift (aka system) the underlying Hamiltonian of the system can provide list (of length num_tslots) for time dependent drift H_c : List of Qobj or array like [num_tslots, evo_time] a list of control Hamiltonians. These are scaled by the amplitudes to alter the overall dynamics Array like imput can be provided for time dependent control generators U_0 : Qobj starting point for the evolution. Typically the identity matrix U_targ : Qobj target transformation, e.g. gate or state, for the time evolution num_tslots : integer or None number of timeslots. None implies that timeslots will be given in the tau array evo_time : float or None total time for the evolution None implies that timeslots will be given in the tau array tau : array[num_tslots] of floats or None durations for the timeslots. if this is given then num_tslots and evo_time are dervived from it None implies that timeslot durations will be equal and calculated as evo_time/num_tslots amp_lbound : float or list of floats lower boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control amp_ubound : float or list of floats upper boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control fid_err_targ : float Fidelity error target. Pulse optimisation will terminate when the fidelity error falls below this value mim_grad : float Minimum gradient. When the sum of the squares of the gradients wrt to the control amplitudes falls below this value, the optimisation terminates, assuming local minima max_iter : integer Maximum number of iterations of the optimisation algorithm max_wall_time : float Maximum allowed elapsed time for the optimisation algorithm alg : string Algorithm to use in pulse optimisation. Options are: 'GRAPE' (default) - GRadient Ascent Pulse Engineering 'CRAB' - Chopped RAndom Basis alg_params : Dictionary options that are specific to the algorithm see above optim_params : Dictionary The key value pairs are the attribute name and value used to set attribute values Note: attributes are created if they do not exist already, and are overwritten if they do. Note: method_params are applied afterwards and so may override these optim_method : string a scipy.optimize.minimize method that will be used to optimise the pulse for minimum fidelity error Note that FMIN, FMIN_BFGS & FMIN_L_BFGS_B will all result in calling these specific scipy.optimize methods Note the LBFGSB is equivalent to FMIN_L_BFGS_B for backwards capatibility reasons. Supplying DEF will given alg dependent result: GRAPE - Default optim_method is FMIN_L_BFGS_B CRAB - Default optim_method is FMIN method_params : dict Parameters for the optim_method. Note that where there is an attribute of the Optimizer object or the termination_conditions matching the key that attribute. Otherwise, and in some case also, they are assumed to be method_options for the scipy.optimize.minimize method. optim_alg : string Deprecated. Use optim_method. max_metric_corr : integer Deprecated. Use method_params instead accuracy_factor : float Deprecated. Use method_params instead phase_option : string determines how global phase is treated in fidelity calculations (fid_type='UNIT' only). Options: PSU - global phase ignored SU - global phase included dyn_params : dict Parameters for the Dynamics object The key value pairs are assumed to be attribute name value pairs They applied after the object is created prop_params : dict Parameters for the PropagatorComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created fid_params : dict Parameters for the FidelityComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created tslot_type : string Method for computing the dynamics generators, propagators and evolution in the timeslots. Options: DEF, UPDATE_ALL, DYNAMIC UPDATE_ALL is the only one that currently works (See TimeslotComputer classes for details) tslot_params : dict Parameters for the TimeslotComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created amp_update_mode : string Deprecated. Use tslot_type instead. init_pulse_type : string type / shape of pulse(s) used to initialise the the control amplitudes. Options (GRAPE) include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW DEF is RND (see PulseGen classes for details) For the CRAB the this the guess_pulse_type. init_pulse_params : dict Parameters for the initial / guess pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created pulse_scaling : float Linear scale factor for generated initial / guess pulses By default initial pulses are generated with amplitudes in the range (-1.0, 1.0). These will be scaled by this parameter pulse_offset : float Linear offset for the pulse. That is this value will be added to any initial / guess pulses generated. ramping_pulse_type : string Type of pulse used to modulate the control pulse. It's intended use for a ramping modulation, which is often required in experimental setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was added for this purpose. ramping_pulse_params : dict Parameters for the ramping pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN out_file_ext : string or None files containing the initial and final control pulse amplitudes are saved to the current directory. The default name will be postfixed with this extension Setting this to None will suppress the output of files gen_stats : boolean if set to True then statistics for the optimisation run will be generated - accessible through attributes of the stats object Returns ------- opt : OptimResult Returns instance of OptimResult, which has attributes giving the reason for termination, final fidelity error, final evolution final amplitudes, statistics etc """ # parameters are checked in create pulse optimiser # The deprecation management is repeated here # so that the stack level is correct if not optim_alg is None: optim_method = optim_alg _param_deprecation( "The 'optim_alg' parameter is deprecated. " "Use 'optim_method' instead") if not max_metric_corr is None: if isinstance(method_params, dict): if not 'max_metric_corr' in method_params: method_params['max_metric_corr'] = max_metric_corr else: method_params = {'max_metric_corr':max_metric_corr} _param_deprecation( "The 'max_metric_corr' parameter is deprecated. " "Use 'max_metric_corr' in method_params instead") if not accuracy_factor is None: if isinstance(method_params, dict): if not 'accuracy_factor' in method_params: method_params['accuracy_factor'] = accuracy_factor else: method_params = {'accuracy_factor':accuracy_factor} _param_deprecation( "The 'accuracy_factor' parameter is deprecated. " "Use 'accuracy_factor' in method_params instead") # amp_update_mode if not amp_update_mode is None: amp_update_mode_up = _upper_safe(amp_update_mode) if amp_update_mode_up == 'ALL': tslot_type = 'UPDATE_ALL' else: tslot_type = amp_update_mode _param_deprecation( "The 'amp_update_mode' parameter is deprecated. " "Use 'tslot_type' instead") # phase_option is still valid for this method # pass it via the fid_params if not phase_option is None: if fid_params is None: fid_params = {'phase_option':phase_option} else: if not 'phase_option' in fid_params: fid_params['phase_option'] = phase_option return optimize_pulse( drift=H_d, ctrls=H_c, initial=U_0, target=U_targ, num_tslots=num_tslots, evo_time=evo_time, tau=tau, amp_lbound=amp_lbound, amp_ubound=amp_ubound, fid_err_targ=fid_err_targ, min_grad=min_grad, max_iter=max_iter, max_wall_time=max_wall_time, alg=alg, alg_params=alg_params, optim_params=optim_params, optim_method=optim_method, method_params=method_params, dyn_type='UNIT', dyn_params=dyn_params, prop_params=prop_params, fid_params=fid_params, init_pulse_type=init_pulse_type, init_pulse_params=init_pulse_params, pulse_scaling=pulse_scaling, pulse_offset=pulse_offset, ramping_pulse_type=ramping_pulse_type, ramping_pulse_params=ramping_pulse_params, log_level=log_level, out_file_ext=out_file_ext, gen_stats=gen_stats) def opt_pulse_crab( drift, ctrls, initial, target, num_tslots=None, evo_time=None, tau=None, amp_lbound=None, amp_ubound=None, fid_err_targ=1e-5, max_iter=500, max_wall_time=180, alg_params=None, num_coeffs=None, init_coeff_scaling=1.0, optim_params=None, optim_method='fmin', method_params=None, dyn_type='GEN_MAT', dyn_params=None, prop_type='DEF', prop_params=None, fid_type='DEF', fid_params=None, tslot_type='DEF', tslot_params=None, guess_pulse_type=None, guess_pulse_params=None, guess_pulse_scaling=1.0, guess_pulse_offset=0.0, guess_pulse_action='MODULATE', ramping_pulse_type=None, ramping_pulse_params=None, log_level=logging.NOTSET, out_file_ext=None, gen_stats=False): """ Optimise a control pulse to minimise the fidelity error. The dynamics of the system in any given timeslot are governed by the combined dynamics generator, i.e. the sum of the drift+ctrl_amp[j]*ctrls[j] The control pulse is an [n_ts, n_ctrls] array of piecewise amplitudes. The CRAB algorithm uses basis function coefficents as the variables to optimise. It does NOT use any gradient function. A multivariable optimisation algorithm attempts to determines the optimal values for the control pulse to minimise the fidelity error The fidelity error is some measure of distance of the system evolution from the given target evolution in the time allowed for the evolution. Parameters ---------- drift : Qobj or list of Qobj the underlying dynamics generator of the system can provide list (of length num_tslots) for time dependent drift ctrls : List of Qobj or array like [num_tslots, evo_time] a list of control dynamics generators. These are scaled by the amplitudes to alter the overall dynamics Array like imput can be provided for time dependent control generators initial : Qobj starting point for the evolution. Typically the identity matrix target : Qobj target transformation, e.g. gate or state, for the time evolution num_tslots : integer or None number of timeslots. None implies that timeslots will be given in the tau array evo_time : float or None total time for the evolution None implies that timeslots will be given in the tau array tau : array[num_tslots] of floats or None durations for the timeslots. if this is given then num_tslots and evo_time are dervived from it None implies that timeslot durations will be equal and calculated as evo_time/num_tslots amp_lbound : float or list of floats lower boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control amp_ubound : float or list of floats upper boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control fid_err_targ : float Fidelity error target. Pulse optimisation will terminate when the fidelity error falls below this value max_iter : integer Maximum number of iterations of the optimisation algorithm max_wall_time : float Maximum allowed elapsed time for the optimisation algorithm alg_params : Dictionary options that are specific to the algorithm see above optim_params : Dictionary The key value pairs are the attribute name and value used to set attribute values Note: attributes are created if they do not exist already, and are overwritten if they do. Note: method_params are applied afterwards and so may override these coeff_scaling : float Linear scale factor for the random basis coefficients By default these range from -1.0 to 1.0 Note this is overridden by alg_params (if given there) num_coeffs : integer Number of coefficients used for each basis function Note this is calculated automatically based on the dimension of the dynamics if not given. It is crucial to the performane of the algorithm that it is set as low as possible, while still giving high enough frequencies. Note this is overridden by alg_params (if given there) optim_method : string Multi-variable optimisation method The only tested options are 'fmin' and 'Nelder-mead' In theory any non-gradient method implemented in scipy.optimize.mininize could be used. method_params : dict Parameters for the optim_method. Note that where there is an attribute of the Optimizer object or the termination_conditions matching the key that attribute. Otherwise, and in some case also, they are assumed to be method_options for the scipy.optimize.minimize method. The commonly used parameter are: xtol - limit on variable change for convergence ftol - limit on fidelity error change for convergence dyn_type : string Dynamics type, i.e. the type of matrix used to describe the dynamics. Options are UNIT, GEN_MAT, SYMPL (see Dynamics classes for details) dyn_params : dict Parameters for the Dynamics object The key value pairs are assumed to be attribute name value pairs They applied after the object is created prop_type : string Propagator type i.e. the method used to calculate the propagtors and propagtor gradient for each timeslot options are DEF, APPROX, DIAG, FRECHET, AUG_MAT DEF will use the default for the specific dyn_type (see PropagatorComputer classes for details) prop_params : dict Parameters for the PropagatorComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created fid_type : string Fidelity error (and fidelity error gradient) computation method Options are DEF, UNIT, TRACEDIFF, TD_APPROX DEF will use the default for the specific dyn_type (See FidelityComputer classes for details) fid_params : dict Parameters for the FidelityComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created tslot_type : string Method for computing the dynamics generators, propagators and evolution in the timeslots. Options: DEF, UPDATE_ALL, DYNAMIC UPDATE_ALL is the only one that currently works (See TimeslotComputer classes for details) tslot_params : dict Parameters for the TimeslotComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created guess_pulse_type : string type / shape of pulse(s) used modulate the control amplitudes. Options include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW, GAUSSIAN Default is None guess_pulse_params : dict Parameters for the guess pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created guess_pulse_action : string Determines how the guess pulse is applied to the pulse generated by the basis expansion. Options are: MODULATE, ADD Default is MODULATE pulse_scaling : float Linear scale factor for generated guess pulses By default initial pulses are generated with amplitudes in the range (-1.0, 1.0). These will be scaled by this parameter pulse_offset : float Linear offset for the pulse. That is this value will be added to any guess pulses generated. ramping_pulse_type : string Type of pulse used to modulate the control pulse. It's intended use for a ramping modulation, which is often required in experimental setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was added for this purpose. ramping_pulse_params : dict Parameters for the ramping pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN out_file_ext : string or None files containing the initial and final control pulse amplitudes are saved to the current directory. The default name will be postfixed with this extension Setting this to None will suppress the output of files gen_stats : boolean if set to True then statistics for the optimisation run will be generated - accessible through attributes of the stats object Returns ------- opt : OptimResult Returns instance of OptimResult, which has attributes giving the reason for termination, final fidelity error, final evolution final amplitudes, statistics etc """ # The parameters are checked in create_pulse_optimizer # so no need to do so here if log_level == logging.NOTSET: log_level = logger.getEffectiveLevel() else: logger.setLevel(log_level) # build the algorithm options if not isinstance(alg_params, dict): alg_params = {'num_coeffs':num_coeffs, 'init_coeff_scaling':init_coeff_scaling} else: if (num_coeffs is not None and not 'num_coeffs' in alg_params): alg_params['num_coeffs'] = num_coeffs if (init_coeff_scaling is not None and not 'init_coeff_scaling' in alg_params): alg_params['init_coeff_scaling'] = init_coeff_scaling # Build the guess pulse options # Any options passed in the guess_pulse_params take precedence # over the parameter values. if guess_pulse_type: if not isinstance(guess_pulse_params, dict): guess_pulse_params = {} if (guess_pulse_scaling is not None and not 'scaling' in guess_pulse_params): guess_pulse_params['scaling'] = guess_pulse_scaling if (guess_pulse_offset is not None and not 'offset' in guess_pulse_params): guess_pulse_params['offset'] = guess_pulse_offset if (guess_pulse_action is not None and not 'pulse_action' in guess_pulse_params): guess_pulse_params['pulse_action'] = guess_pulse_action return optimize_pulse( drift, ctrls, initial, target, num_tslots=num_tslots, evo_time=evo_time, tau=tau, amp_lbound=amp_lbound, amp_ubound=amp_ubound, fid_err_targ=fid_err_targ, min_grad=0.0, max_iter=max_iter, max_wall_time=max_wall_time, alg='CRAB', alg_params=alg_params, optim_params=optim_params, optim_method=optim_method, method_params=method_params, dyn_type=dyn_type, dyn_params=dyn_params, prop_type=prop_type, prop_params=prop_params, fid_type=fid_type, fid_params=fid_params, tslot_type=tslot_type, tslot_params=tslot_params, init_pulse_type=guess_pulse_type, init_pulse_params=guess_pulse_params, ramping_pulse_type=ramping_pulse_type, ramping_pulse_params=ramping_pulse_params, log_level=log_level, out_file_ext=out_file_ext, gen_stats=gen_stats) def opt_pulse_crab_unitary( H_d, H_c, U_0, U_targ, num_tslots=None, evo_time=None, tau=None, amp_lbound=None, amp_ubound=None, fid_err_targ=1e-5, max_iter=500, max_wall_time=180, alg_params=None, num_coeffs=None, init_coeff_scaling=1.0, optim_params=None, optim_method='fmin', method_params=None, phase_option='PSU', dyn_params=None, prop_params=None, fid_params=None, tslot_type='DEF', tslot_params=None, guess_pulse_type=None, guess_pulse_params=None, guess_pulse_scaling=1.0, guess_pulse_offset=0.0, guess_pulse_action='MODULATE', ramping_pulse_type=None, ramping_pulse_params=None, log_level=logging.NOTSET, out_file_ext=None, gen_stats=False): """ Optimise a control pulse to minimise the fidelity error, assuming that the dynamics of the system are generated by unitary operators. This function is simply a wrapper for optimize_pulse, where the appropriate options for unitary dynamics are chosen and the parameter names are in the format familiar to unitary dynamics The dynamics of the system in any given timeslot are governed by the combined Hamiltonian, i.e. the sum of the H_d + ctrl_amp[j]*H_c[j] The control pulse is an [n_ts, n_ctrls] array of piecewise amplitudes The CRAB algorithm uses basis function coefficents as the variables to optimise. It does NOT use any gradient function. A multivariable optimisation algorithm attempts to determines the optimal values for the control pulse to minimise the fidelity error The fidelity error is some measure of distance of the system evolution from the given target evolution in the time allowed for the evolution. Parameters ---------- H_d : Qobj or list of Qobj Drift (aka system) the underlying Hamiltonian of the system can provide list (of length num_tslots) for time dependent drift H_c : List of Qobj or array like [num_tslots, evo_time] a list of control Hamiltonians. These are scaled by the amplitudes to alter the overall dynamics Array like imput can be provided for time dependent control generators U_0 : Qobj starting point for the evolution. Typically the identity matrix U_targ : Qobj target transformation, e.g. gate or state, for the time evolution num_tslots : integer or None number of timeslots. None implies that timeslots will be given in the tau array evo_time : float or None total time for the evolution None implies that timeslots will be given in the tau array tau : array[num_tslots] of floats or None durations for the timeslots. if this is given then num_tslots and evo_time are dervived from it None implies that timeslot durations will be equal and calculated as evo_time/num_tslots amp_lbound : float or list of floats lower boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control amp_ubound : float or list of floats upper boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control fid_err_targ : float Fidelity error target. Pulse optimisation will terminate when the fidelity error falls below this value max_iter : integer Maximum number of iterations of the optimisation algorithm max_wall_time : float Maximum allowed elapsed time for the optimisation algorithm alg_params : Dictionary options that are specific to the algorithm see above optim_params : Dictionary The key value pairs are the attribute name and value used to set attribute values Note: attributes are created if they do not exist already, and are overwritten if they do. Note: method_params are applied afterwards and so may override these coeff_scaling : float Linear scale factor for the random basis coefficients By default these range from -1.0 to 1.0 Note this is overridden by alg_params (if given there) num_coeffs : integer Number of coefficients used for each basis function Note this is calculated automatically based on the dimension of the dynamics if not given. It is crucial to the performane of the algorithm that it is set as low as possible, while still giving high enough frequencies. Note this is overridden by alg_params (if given there) optim_method : string Multi-variable optimisation method The only tested options are 'fmin' and 'Nelder-mead' In theory any non-gradient method implemented in scipy.optimize.mininize could be used. method_params : dict Parameters for the optim_method. Note that where there is an attribute of the Optimizer object or the termination_conditions matching the key that attribute. Otherwise, and in some case also, they are assumed to be method_options for the scipy.optimize.minimize method. The commonly used parameter are: xtol - limit on variable change for convergence ftol - limit on fidelity error change for convergence phase_option : string determines how global phase is treated in fidelity calculations (fid_type='UNIT' only). Options: PSU - global phase ignored SU - global phase included dyn_params : dict Parameters for the Dynamics object The key value pairs are assumed to be attribute name value pairs They applied after the object is created prop_params : dict Parameters for the PropagatorComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created fid_params : dict Parameters for the FidelityComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created tslot_type : string Method for computing the dynamics generators, propagators and evolution in the timeslots. Options: DEF, UPDATE_ALL, DYNAMIC UPDATE_ALL is the only one that currently works (See TimeslotComputer classes for details) tslot_params : dict Parameters for the TimeslotComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created guess_pulse_type : string type / shape of pulse(s) used modulate the control amplitudes. Options include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW, GAUSSIAN Default is None guess_pulse_params : dict Parameters for the guess pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created guess_pulse_action : string Determines how the guess pulse is applied to the pulse generated by the basis expansion. Options are: MODULATE, ADD Default is MODULATE pulse_scaling : float Linear scale factor for generated guess pulses By default initial pulses are generated with amplitudes in the range (-1.0, 1.0). These will be scaled by this parameter pulse_offset : float Linear offset for the pulse. That is this value will be added to any guess pulses generated. ramping_pulse_type : string Type of pulse used to modulate the control pulse. It's intended use for a ramping modulation, which is often required in experimental setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was added for this purpose. ramping_pulse_params : dict Parameters for the ramping pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN out_file_ext : string or None files containing the initial and final control pulse amplitudes are saved to the current directory. The default name will be postfixed with this extension Setting this to None will suppress the output of files gen_stats : boolean if set to True then statistics for the optimisation run will be generated - accessible through attributes of the stats object Returns ------- opt : OptimResult Returns instance of OptimResult, which has attributes giving the reason for termination, final fidelity error, final evolution final amplitudes, statistics etc """ # The parameters are checked in create_pulse_optimizer # so no need to do so here if log_level == logging.NOTSET: log_level = logger.getEffectiveLevel() else: logger.setLevel(log_level) # build the algorithm options if not isinstance(alg_params, dict): alg_params = {'num_coeffs':num_coeffs, 'init_coeff_scaling':init_coeff_scaling} else: if (num_coeffs is not None and not 'num_coeffs' in alg_params): alg_params['num_coeffs'] = num_coeffs if (init_coeff_scaling is not None and not 'init_coeff_scaling' in alg_params): alg_params['init_coeff_scaling'] = init_coeff_scaling # Build the guess pulse options # Any options passed in the guess_pulse_params take precedence # over the parameter values. if guess_pulse_type: if not isinstance(guess_pulse_params, dict): guess_pulse_params = {} if (guess_pulse_scaling is not None and not 'scaling' in guess_pulse_params): guess_pulse_params['scaling'] = guess_pulse_scaling if (guess_pulse_offset is not None and not 'offset' in guess_pulse_params): guess_pulse_params['offset'] = guess_pulse_offset if (guess_pulse_action is not None and not 'pulse_action' in guess_pulse_params): guess_pulse_params['pulse_action'] = guess_pulse_action return optimize_pulse_unitary( H_d, H_c, U_0, U_targ, num_tslots=num_tslots, evo_time=evo_time, tau=tau, amp_lbound=amp_lbound, amp_ubound=amp_ubound, fid_err_targ=fid_err_targ, min_grad=0.0, max_iter=max_iter, max_wall_time=max_wall_time, alg='CRAB', alg_params=alg_params, optim_params=optim_params, optim_method=optim_method, method_params=method_params, phase_option=phase_option, dyn_params=dyn_params, prop_params=prop_params, fid_params=fid_params, tslot_type=tslot_type, tslot_params=tslot_params, init_pulse_type=guess_pulse_type, init_pulse_params=guess_pulse_params, ramping_pulse_type=ramping_pulse_type, ramping_pulse_params=ramping_pulse_params, log_level=log_level, out_file_ext=out_file_ext, gen_stats=gen_stats) def create_pulse_optimizer( drift, ctrls, initial, target, num_tslots=None, evo_time=None, tau=None, amp_lbound=None, amp_ubound=None, fid_err_targ=1e-10, min_grad=1e-10, max_iter=500, max_wall_time=180, alg='GRAPE', alg_params=None, optim_params=None, optim_method='DEF', method_params=None, optim_alg=None, max_metric_corr=None, accuracy_factor=None, dyn_type='GEN_MAT', dyn_params=None, prop_type='DEF', prop_params=None, fid_type='DEF', fid_params=None, phase_option=None, fid_err_scale_factor=None, tslot_type='DEF', tslot_params=None, amp_update_mode=None, init_pulse_type='DEF', init_pulse_params=None, pulse_scaling=1.0, pulse_offset=0.0, ramping_pulse_type=None, ramping_pulse_params=None, log_level=logging.NOTSET, gen_stats=False): """ Generate the objects of the appropriate subclasses required for the pulse optmisation based on the parameters given Note this method may be preferable to calling optimize_pulse if more detailed configuration is required before running the optmisation algorthim, or the algorithm will be run many times, for instances when trying to finding global the optimum or minimum time optimisation Parameters ---------- drift : Qobj or list of Qobj the underlying dynamics generator of the system can provide list (of length num_tslots) for time dependent drift ctrls : List of Qobj or array like [num_tslots, evo_time] a list of control dynamics generators. These are scaled by the amplitudes to alter the overall dynamics Array like imput can be provided for time dependent control generators initial : Qobj starting point for the evolution. Typically the identity matrix target : Qobj target transformation, e.g. gate or state, for the time evolution num_tslots : integer or None number of timeslots. None implies that timeslots will be given in the tau array evo_time : float or None total time for the evolution None implies that timeslots will be given in the tau array tau : array[num_tslots] of floats or None durations for the timeslots. if this is given then num_tslots and evo_time are dervived from it None implies that timeslot durations will be equal and calculated as evo_time/num_tslots amp_lbound : float or list of floats lower boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control amp_ubound : float or list of floats upper boundaries for the control amplitudes Can be a scalar value applied to all controls or a list of bounds for each control fid_err_targ : float Fidelity error target. Pulse optimisation will terminate when the fidelity error falls below this value mim_grad : float Minimum gradient. When the sum of the squares of the gradients wrt to the control amplitudes falls below this value, the optimisation terminates, assuming local minima max_iter : integer Maximum number of iterations of the optimisation algorithm max_wall_time : float Maximum allowed elapsed time for the optimisation algorithm alg : string Algorithm to use in pulse optimisation. Options are: 'GRAPE' (default) - GRadient Ascent Pulse Engineering 'CRAB' - Chopped RAndom Basis alg_params : Dictionary options that are specific to the algorithm see above optim_params : Dictionary The key value pairs are the attribute name and value used to set attribute values Note: attributes are created if they do not exist already, and are overwritten if they do. Note: method_params are applied afterwards and so may override these optim_method : string a scipy.optimize.minimize method that will be used to optimise the pulse for minimum fidelity error Note that FMIN, FMIN_BFGS & FMIN_L_BFGS_B will all result in calling these specific scipy.optimize methods Note the LBFGSB is equivalent to FMIN_L_BFGS_B for backwards capatibility reasons. Supplying DEF will given alg dependent result: - GRAPE - Default optim_method is FMIN_L_BFGS_B - CRAB - Default optim_method is Nelder-Mead method_params : dict Parameters for the optim_method. Note that where there is an attribute of the Optimizer object or the termination_conditions matching the key that attribute. Otherwise, and in some case also, they are assumed to be method_options for the scipy.optimize.minimize method. optim_alg : string Deprecated. Use optim_method. max_metric_corr : integer Deprecated. Use method_params instead accuracy_factor : float Deprecated. Use method_params instead dyn_type : string Dynamics type, i.e. the type of matrix used to describe the dynamics. Options are UNIT, GEN_MAT, SYMPL (see Dynamics classes for details) dyn_params : dict Parameters for the Dynamics object The key value pairs are assumed to be attribute name value pairs They applied after the object is created prop_type : string Propagator type i.e. the method used to calculate the propagtors and propagtor gradient for each timeslot options are DEF, APPROX, DIAG, FRECHET, AUG_MAT DEF will use the default for the specific dyn_type (see PropagatorComputer classes for details) prop_params : dict Parameters for the PropagatorComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created fid_type : string Fidelity error (and fidelity error gradient) computation method Options are DEF, UNIT, TRACEDIFF, TD_APPROX DEF will use the default for the specific dyn_type (See FidelityComputer classes for details) fid_params : dict Parameters for the FidelityComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created phase_option : string Deprecated. Pass in fid_params instead. fid_err_scale_factor : float Deprecated. Use scale_factor key in fid_params instead. tslot_type : string Method for computing the dynamics generators, propagators and evolution in the timeslots. Options: DEF, UPDATE_ALL, DYNAMIC UPDATE_ALL is the only one that currently works (See TimeslotComputer classes for details) tslot_params : dict Parameters for the TimeslotComputer object The key value pairs are assumed to be attribute name value pairs They applied after the object is created amp_update_mode : string Deprecated. Use tslot_type instead. init_pulse_type : string type / shape of pulse(s) used to initialise the the control amplitudes. Options (GRAPE) include: RND, LIN, ZERO, SINE, SQUARE, TRIANGLE, SAW DEF is RND (see PulseGen classes for details) For the CRAB the this the guess_pulse_type. init_pulse_params : dict Parameters for the initial / guess pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created pulse_scaling : float Linear scale factor for generated initial / guess pulses By default initial pulses are generated with amplitudes in the range (-1.0, 1.0). These will be scaled by this parameter pulse_offset : float Linear offset for the pulse. That is this value will be added to any initial / guess pulses generated. ramping_pulse_type : string Type of pulse used to modulate the control pulse. It's intended use for a ramping modulation, which is often required in experimental setups. This is only currently implemented in CRAB. GAUSSIAN_EDGE was added for this purpose. ramping_pulse_params : dict Parameters for the ramping pulse generator object The key value pairs are assumed to be attribute name value pairs They applied after the object is created log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN gen_stats : boolean if set to True then statistics for the optimisation run will be generated - accessible through attributes of the stats object Returns ------- opt : Optimizer Instance of an Optimizer, through which the Config, Dynamics, PulseGen, and TerminationConditions objects can be accessed as attributes. The PropagatorComputer, FidelityComputer and TimeslotComputer objects can be accessed as attributes of the Dynamics object, e.g. optimizer.dynamics.fid_computer The optimisation can be run through the optimizer.run_optimization """ # check parameters ctrls = dynamics._check_ctrls_container(ctrls) dynamics._check_drift_dyn_gen(drift) if not isinstance(initial, Qobj): raise TypeError("initial must be a Qobj") if not isinstance(target, Qobj): raise TypeError("target must be a Qobj") # Deprecated parameter management if not optim_alg is None: optim_method = optim_alg _param_deprecation( "The 'optim_alg' parameter is deprecated. " "Use 'optim_method' instead") if not max_metric_corr is None: if isinstance(method_params, dict): if not 'max_metric_corr' in method_params: method_params['max_metric_corr'] = max_metric_corr else: method_params = {'max_metric_corr':max_metric_corr} _param_deprecation( "The 'max_metric_corr' parameter is deprecated. " "Use 'max_metric_corr' in method_params instead") if not accuracy_factor is None: if isinstance(method_params, dict): if not 'accuracy_factor' in method_params: method_params['accuracy_factor'] = accuracy_factor else: method_params = {'accuracy_factor':accuracy_factor} _param_deprecation( "The 'accuracy_factor' parameter is deprecated. " "Use 'accuracy_factor' in method_params instead") # phase_option if not phase_option is None: if isinstance(fid_params, dict): if not 'phase_option' in fid_params: fid_params['phase_option'] = phase_option else: fid_params = {'phase_option':phase_option} _param_deprecation( "The 'phase_option' parameter is deprecated. " "Use 'phase_option' in fid_params instead") # fid_err_scale_factor if not fid_err_scale_factor is None: if isinstance(fid_params, dict): if not 'fid_err_scale_factor' in fid_params: fid_params['scale_factor'] = fid_err_scale_factor else: fid_params = {'scale_factor':fid_err_scale_factor} _param_deprecation( "The 'fid_err_scale_factor' parameter is deprecated. " "Use 'scale_factor' in fid_params instead") # amp_update_mode if not amp_update_mode is None: amp_update_mode_up = _upper_safe(amp_update_mode) if amp_update_mode_up == 'ALL': tslot_type = 'UPDATE_ALL' else: tslot_type = amp_update_mode _param_deprecation( "The 'amp_update_mode' parameter is deprecated. " "Use 'tslot_type' instead") # set algorithm defaults alg_up = _upper_safe(alg) if alg is None: raise errors.UsageError( "Optimisation algorithm must be specified through 'alg' parameter") elif alg_up == 'GRAPE': if optim_method is None or optim_method.upper() == 'DEF': optim_method = 'FMIN_L_BFGS_B' if init_pulse_type is None or init_pulse_type.upper() == 'DEF': init_pulse_type = 'RND' elif alg_up == 'CRAB': if optim_method is None or optim_method.upper() == 'DEF': optim_method = 'FMIN' if prop_type is None or prop_type.upper() == 'DEF': prop_type = 'APPROX' if init_pulse_type is None or init_pulse_type.upper() == 'DEF': init_pulse_type = None else: raise errors.UsageError( "No option for pulse optimisation algorithm alg={}".format(alg)) cfg = optimconfig.OptimConfig() cfg.optim_method = optim_method cfg.dyn_type = dyn_type cfg.prop_type = prop_type cfg.fid_type = fid_type cfg.init_pulse_type = init_pulse_type if log_level == logging.NOTSET: log_level = logger.getEffectiveLevel() else: logger.setLevel(log_level) cfg.log_level = log_level # Create the Dynamics instance if dyn_type == 'GEN_MAT' or dyn_type is None or dyn_type == '': dyn = dynamics.DynamicsGenMat(cfg) elif dyn_type == 'UNIT': dyn = dynamics.DynamicsUnitary(cfg) elif dyn_type == 'SYMPL': dyn = dynamics.DynamicsSymplectic(cfg) else: raise errors.UsageError("No option for dyn_type: " + dyn_type) dyn.apply_params(dyn_params) dyn._drift_dyn_gen_checked = True dyn._ctrl_dyn_gen_checked = True # Create the PropagatorComputer instance # The default will be typically be the best option if prop_type == 'DEF' or prop_type is None or prop_type == '': # Do nothing use the default for the Dynamics pass elif prop_type == 'APPROX': if not isinstance(dyn.prop_computer, propcomp.PropCompApproxGrad): dyn.prop_computer = propcomp.PropCompApproxGrad(dyn) elif prop_type == 'DIAG': if not isinstance(dyn.prop_computer, propcomp.PropCompDiag): dyn.prop_computer = propcomp.PropCompDiag(dyn) elif prop_type == 'AUG_MAT': if not isinstance(dyn.prop_computer, propcomp.PropCompAugMat): dyn.prop_computer = propcomp.PropCompAugMat(dyn) elif prop_type == 'FRECHET': if not isinstance(dyn.prop_computer, propcomp.PropCompFrechet): dyn.prop_computer = propcomp.PropCompFrechet(dyn) else: raise errors.UsageError("No option for prop_type: " + prop_type) dyn.prop_computer.apply_params(prop_params) # Create the FidelityComputer instance # The default will be typically be the best option # Note: the FidCompTraceDiffApprox is a subclass of FidCompTraceDiff # so need to check this type first fid_type_up = _upper_safe(fid_type) if fid_type_up == 'DEF' or fid_type_up is None or fid_type_up == '': # None given, use the default for the Dynamics pass elif fid_type_up == 'TDAPPROX': if not isinstance(dyn.fid_computer, fidcomp.FidCompTraceDiffApprox): dyn.fid_computer = fidcomp.FidCompTraceDiffApprox(dyn) elif fid_type_up == 'TRACEDIFF': if not isinstance(dyn.fid_computer, fidcomp.FidCompTraceDiff): dyn.fid_computer = fidcomp.FidCompTraceDiff(dyn) elif fid_type_up == 'UNIT': if not isinstance(dyn.fid_computer, fidcomp.FidCompUnitary): dyn.fid_computer = fidcomp.FidCompUnitary(dyn) else: raise errors.UsageError("No option for fid_type: " + fid_type) dyn.fid_computer.apply_params(fid_params) # Currently the only working option for tslot computer is # TSlotCompUpdateAll. # so just apply the parameters dyn.tslot_computer.apply_params(tslot_params) # Create the Optimiser instance optim_method_up = _upper_safe(optim_method) if optim_method is None or optim_method_up == '': raise errors.UsageError("Optimisation method must be specified " "via 'optim_method' parameter") elif optim_method_up == 'FMIN_BFGS': optim = optimizer.OptimizerBFGS(cfg, dyn) elif optim_method_up == 'LBFGSB' or optim_method_up == 'FMIN_L_BFGS_B': optim = optimizer.OptimizerLBFGSB(cfg, dyn) elif optim_method_up == 'FMIN': if alg_up == 'CRAB': optim = optimizer.OptimizerCrabFmin(cfg, dyn) else: raise errors.UsageError( "Invalid optim_method '{}' for '{}' algorthim".format( optim_method, alg)) else: # Assume that the optim_method is a valid #scipy.optimize.minimize method # Choose an optimiser based on the algorithm if alg_up == 'CRAB': optim = optimizer.OptimizerCrab(cfg, dyn) else: optim = optimizer.Optimizer(cfg, dyn) optim.alg = alg optim.method = optim_method optim.amp_lbound = amp_lbound optim.amp_ubound = amp_ubound optim.apply_params(optim_params) # Create the TerminationConditions instance tc = termcond.TerminationConditions() tc.fid_err_targ = fid_err_targ tc.min_gradient_norm = min_grad tc.max_iterations = max_iter tc.max_wall_time = max_wall_time optim.termination_conditions = tc optim.apply_method_params(method_params) if gen_stats: # Create a stats object # Note that stats object is optional # if the Dynamics and Optimizer stats attribute is not set # then no stats will be collected, which could improve performance if amp_update_mode == 'DYNAMIC': sts = stats.StatsDynTsUpdate() else: sts = stats.Stats() dyn.stats = sts optim.stats = sts # Configure the dynamics dyn.drift_dyn_gen = drift dyn.ctrl_dyn_gen = ctrls dyn.initial = initial dyn.target = target if tau is None: # Check that parameters have been supplied to generate the # timeslot durations try: evo_time / num_tslots except: raise errors.UsageError( "Either the timeslot durations should be supplied as an " "array 'tau' or the number of timeslots 'num_tslots' " "and the evolution time 'evo_time' must be given.") dyn.num_tslots = num_tslots dyn.evo_time = evo_time else: dyn.tau = tau # this function is called, so that the num_ctrls attribute will be set n_ctrls = dyn.num_ctrls ramping_pgen = None if ramping_pulse_type: ramping_pgen = pulsegen.create_pulse_gen( pulse_type=ramping_pulse_type, dyn=dyn, pulse_params=ramping_pulse_params) if alg_up == 'CRAB': # Create a pulse generator for each ctrl crab_pulse_params = None num_coeffs = None init_coeff_scaling = None if isinstance(alg_params, dict): num_coeffs = alg_params.get('num_coeffs') init_coeff_scaling = alg_params.get('init_coeff_scaling') if 'crab_pulse_params' in alg_params: crab_pulse_params = alg_params.get('crab_pulse_params') guess_pulse_type = init_pulse_type if guess_pulse_type: guess_pulse_action = None guess_pgen = pulsegen.create_pulse_gen( pulse_type=guess_pulse_type, dyn=dyn) guess_pgen.scaling = pulse_scaling guess_pgen.offset = pulse_offset if init_pulse_params is not None: guess_pgen.apply_params(init_pulse_params) guess_pulse_action = init_pulse_params.get('pulse_action') optim.pulse_generator = [] for j in range(n_ctrls): crab_pgen = pulsegen.PulseGenCrabFourier( dyn=dyn, num_coeffs=num_coeffs) if init_coeff_scaling is not None: crab_pgen.scaling = init_coeff_scaling if isinstance(crab_pulse_params, dict): crab_pgen.apply_params(crab_pulse_params) lb = None if amp_lbound: if isinstance(amp_lbound, list): try: lb = amp_lbound[j] except: lb = amp_lbound[-1] else: lb = amp_lbound ub = None if amp_ubound: if isinstance(amp_ubound, list): try: ub = amp_ubound[j] except: ub = amp_ubound[-1] else: ub = amp_ubound crab_pgen.lbound = lb crab_pgen.ubound = ub if guess_pulse_type: guess_pgen.lbound = lb guess_pgen.ubound = ub crab_pgen.guess_pulse = guess_pgen.gen_pulse() if guess_pulse_action: crab_pgen.guess_pulse_action = guess_pulse_action if ramping_pgen: crab_pgen.ramping_pulse = ramping_pgen.gen_pulse() optim.pulse_generator.append(crab_pgen) #This is just for the debug message now pgen = optim.pulse_generator[0] else: # Create a pulse generator of the type specified pgen = pulsegen.create_pulse_gen(pulse_type=init_pulse_type, dyn=dyn, pulse_params=init_pulse_params) pgen.scaling = pulse_scaling pgen.offset = pulse_offset pgen.lbound = amp_lbound pgen.ubound = amp_ubound optim.pulse_generator = pgen if log_level <= logging.DEBUG: logger.debug( "Optimisation config summary...\n" " object classes:\n" " optimizer: " + optim.__class__.__name__ + "\n dynamics: " + dyn.__class__.__name__ + "\n tslotcomp: " + dyn.tslot_computer.__class__.__name__ + "\n fidcomp: " + dyn.fid_computer.__class__.__name__ + "\n propcomp: " + dyn.prop_computer.__class__.__name__ + "\n pulsegen: " + pgen.__class__.__name__) return optim qutip-4.4.1/qutip/control/stats.py000066400000000000000000000411071352460343600172250ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Statistics for the optimisation Note that some of the stats here are redundant copies from the optimiser used here for calculations """ import numpy as np import datetime class Stats(object): """ Base class for all optimisation statistics Used for configurations where all timeslots are updated each iteration e.g. exact gradients Note that all times are generated using timeit.default_timer() and are in seconds Attributes ---------- dyn_gen_name : string Text used in some report functions. Makes sense to set it to 'Hamiltonian' when using unitary dynamics Default is simply 'dynamics generator' num_iter : integer Number of iterations of the optimisation algorithm wall_time_optim_start : float Start time for the optimisation wall_time_optim_end : float End time for the optimisation wall_time_optim : float Time elasped during the optimisation wall_time_dyn_gen_compute : float Total wall (elasped) time computing combined dynamics generator (for example combining drift and control Hamiltonians) wall_time_prop_compute : float Total wall (elasped) time computing propagators, that is the time evolution from one timeslot to the next Includes calculating the propagator gradient for exact gradients wall_time_fwd_prop_compute : float Total wall (elasped) time computing combined forward propagation, that is the time evolution from the start to a specific timeslot. Excludes calculating the propagators themselves wall_time_onwd_prop_compute : float Total wall (elasped) time computing combined onward propagation, that is the time evolution from a specific timeslot to the end time. Excludes calculating the propagators themselves wall_time_gradient_compute : float Total wall (elasped) time computing the fidelity error gradient. Excludes calculating the propagator gradients (in exact gradient methods) num_fidelity_func_calls : integer Number of calls to fidelity function by the optimisation algorithm num_grad_func_calls : integer Number of calls to gradient function by the optimisation algorithm num_tslot_recompute : integer Number of time the timeslot evolution is recomputed (It is only computed if any amplitudes changed since the last call) num_fidelity_computes : integer Number of time the fidelity is computed (It is only computed if any amplitudes changed since the last call) num_grad_computes : integer Number of time the gradient is computed (It is only computed if any amplitudes changed since the last call) num_ctrl_amp_updates : integer Number of times the control amplitudes are updated mean_num_ctrl_amp_updates_per_iter : float Mean number of control amplitude updates per iteration num_timeslot_changes : integer Number of times the amplitudes of a any control in a timeslot changes mean_num_timeslot_changes_per_update : float Mean average number of timeslot amplitudes that are changed per update num_ctrl_amp_changes : integer Number of times individual control amplitudes that are changed mean_num_ctrl_amp_changes_per_update : float Mean average number of control amplitudes that are changed per update """ def __init__(self): self.reset() def reset(self): self.dyn_gen_name = "dynamics generator" self.clear() def clear(self): self.num_iter = 0 # Duration attributes self.wall_time_optim_start = 0.0 self.wall_time_optim_end = 0.0 self.wall_time_optim = 0.0 self.wall_time_dyn_gen_compute = 0.0 self.wall_time_prop_compute = 0.0 self.wall_time_fwd_prop_compute = 0.0 self.wall_time_onwd_prop_compute = 0.0 self.wall_time_gradient_compute = 0.0 # Fidelity and gradient function calls and computes self.num_fidelity_func_calls = 0 self.num_grad_func_calls = 0 self.num_tslot_recompute = 0 self.num_fidelity_computes = 0 self.num_grad_computes = 0 # Control amplitudes self.num_ctrl_amp_updates = 0 self.mean_num_ctrl_amp_updates_per_iter = 0.0 self.num_timeslot_changes = 0 self.mean_num_timeslot_changes_per_update = 0.0 self.num_ctrl_amp_changes = 0 self.mean_num_ctrl_amp_changes_per_update = 0.0 def calculate(self): """ Perform the calculations (e.g. averages) that are required on the stats Should be called before calling report """ # If the optimation is still running then the optimisation # time is the time so far if self.wall_time_optim_end > 0.0: self.wall_time_optim = \ self.wall_time_optim_end - self.wall_time_optim_start try: self.mean_num_ctrl_amp_updates_per_iter = \ self.num_ctrl_amp_updates / float(self.num_iter) except: self.mean_num_ctrl_amp_updates_per_iter = np.NaN try: self.mean_num_timeslot_changes_per_update = \ self.num_timeslot_changes / float(self.num_ctrl_amp_updates) except: self.mean_num_timeslot_changes_per_update = np.NaN try: self.mean_num_ctrl_amp_changes_per_update = \ self.num_ctrl_amp_changes / float(self.num_ctrl_amp_updates) except: self.mean_num_ctrl_amp_changes_per_update = np.NaN def _format_datetime(self, t, tot=0.0): dtStr = str(datetime.timedelta(seconds=t)) if tot > 0: percent = 100*t/tot dtStr += " ({:03.2f}%)".format(percent) return dtStr def report(self): """ Print a report of the stats to the console """ print("\n------------------------------------" "\n---- Control optimisation stats ----") self.report_timings() self.report_func_calls() self.report_amp_updates() print("------------------------------------") def report_timings(self): print("**** Timings (HH:MM:SS.US) ****") tot = self.wall_time_optim print("Total wall time elapsed during optimisation: " + self._format_datetime(tot)) print("Wall time computing Hamiltonians: " + self._format_datetime(self.wall_time_dyn_gen_compute, tot)) print("Wall time computing propagators: " + self._format_datetime(self.wall_time_prop_compute, tot)) print("Wall time computing forward propagation: " + self._format_datetime(self.wall_time_fwd_prop_compute, tot)) print("Wall time computing onward propagation: " + self._format_datetime(self.wall_time_onwd_prop_compute, tot)) print("Wall time computing gradient: " + self._format_datetime(self.wall_time_gradient_compute, tot)) print("") def report_func_calls(self): print("**** Iterations and function calls ****") print("Number of iterations: {}".format(self.num_iter)) print("Number of fidelity function calls: " "{}".format(self.num_fidelity_func_calls)) print("Number of times fidelity is computed: " "{}".format(self.num_fidelity_computes)) print("Number of gradient function calls: " "{}".format(self.num_grad_func_calls)) print("Number of times gradients are computed: " "{}".format(self.num_grad_computes)) print("Number of times timeslot evolution is recomputed: " "{}".format(self.num_tslot_recompute)) print("") def report_amp_updates(self): print("**** Control amplitudes ****") print("Number of control amplitude updates: " "{}".format(self.num_ctrl_amp_updates)) print("Mean number of updates per iteration: " "{}".format(self.mean_num_ctrl_amp_updates_per_iter)) print("Number of timeslot values changed: " "{}".format(self.num_timeslot_changes)) print("Mean number of timeslot changes per update: " "{}".format(self.mean_num_timeslot_changes_per_update)) print("Number of amplitude values changed: " "{}".format(self.num_ctrl_amp_changes)) print("Mean number of amplitude changes per update: " "{}".format(self.mean_num_ctrl_amp_changes_per_update)) class StatsDynTsUpdate(Stats): """ Optimisation stats class for configurations where all timeslots are not necessarily updated at each iteration. In this case it may be interesting to know how many Hamiltions etc are computed each ctrl amplitude update Attributes ---------- num_dyn_gen_computes : integer Total number of dynamics generator (Hamiltonian) computations, that is combining drift and control dynamics to calculate the combined dynamics generator for the timeslot mean_num_dyn_gen_computes_per_update : float # Mean average number of dynamics generator computations per update mean_wall_time_dyn_gen_compute : float # Mean average time to compute a timeslot dynamics generator num_prop_computes : integer Total number of propagator (and propagator gradient for exact gradient types) computations mean_num_prop_computes_per_update : float Mean average number of propagator computations per update mean_wall_time_prop_compute : float Mean average time to compute a propagator (and its gradient) num_fwd_prop_step_computes : integer Total number of steps (matrix product) computing forward propagation mean_num_fwd_prop_step_computes_per_update : float Mean average number of steps computing forward propagation mean_wall_time_fwd_prop_compute : float Mean average time to compute forward propagation num_onwd_prop_step_computes : integer Total number of steps (matrix product) computing onward propagation mean_num_onwd_prop_step_computes_per_update : float Mean average number of steps computing onward propagation mean_wall_time_onwd_prop_compute Mean average time to compute onward propagation """ def __init__(self): self.reset() def reset(self): Stats.reset(self) # Dynamics generators (Hamiltonians) self.num_dyn_gen_computes = 0 self.mean_num_dyn_gen_computes_per_update = 0.0 self.mean_wall_time_dyn_gen_compute = 0.0 # **** Propagators ***** self.num_prop_computes = 0 self.mean_num_prop_computes_per_update = 0.0 self.mean_wall_time_prop_compute = 0.0 # **** Forward propagation **** self.num_fwd_prop_step_computes = 0 self.mean_num_fwd_prop_step_computes_per_update = 0.0 self.mean_wall_time_fwd_prop_compute = 0.0 # **** onward propagation **** self.num_onwd_prop_step_computes = 0 self.mean_num_onwd_prop_step_computes_per_update = 0.0 self.mean_wall_time_onwd_prop_compute = 0.0 def calculate(self): Stats.calculate(self) self.mean_num_dyn_gen_computes_per_update = \ self.num_dyn_gen_computes / float(self.num_ctrl_amp_updates) self.mean_wall_time_dyn_gen_compute = \ (self.wall_time_dyn_gen_compute / float(self.num_dyn_gen_computes)) self.mean_num_prop_computes_per_update = \ self.num_prop_computes / float(self.num_ctrl_amp_updates) self.mean_wall_time_prop_compute = \ self.wall_time_prop_compute / float(self.num_prop_computes) self.mean_num_fwd_prop_step_computes_per_update = \ (self.num_fwd_prop_step_computes / float(self.num_ctrl_amp_updates)) self.mean_wall_time_fwd_prop_compute = \ (self.wall_time_fwd_prop_compute / float(self.num_fwd_prop_step_computes)) self.mean_num_onwd_prop_step_computes_per_update = \ (self.num_onwd_prop_step_computes / float(self.num_ctrl_amp_updates)) self.mean_wall_time_onwd_prop_compute = \ (self.wall_time_onwd_prop_compute / float(self.num_onwd_prop_step_computes)) def report(self): """ Print a report of the stats to the console """ print("\n------------------------------------" "\n---- Control optimisation stats ----") self.report_timings() self.report_func_calls() self.report_amp_updates() self.report_dyn_gen_comps() self.report_fwd_prop() self.report_onwd_prop() print("------------------------------------") def report_dyn_gen_comps(self): print("**** {} Computations ****".format(self.dyn_gen_name)) print("Total number of {} computations: " "{}".format(self.dyn_gen_name, self.num_dyn_gen_computes)) print("Mean number of {} computations per update: " "{}".format(self.dyn_gen_name, self.mean_num_dyn_gen_computes_per_update)) print("Mean wall time to compute {}s: " "{} s".format(self.dyn_gen_name, self.mean_wall_time_dyn_gen_compute)) print("**** Propagator Computations ****") print("Total number of propagator computations: " "{}".format(self.num_prop_computes)) print("Mean number of propagator computations per update: " "{}".format(self.mean_num_prop_computes_per_update)) print("Mean wall time to compute propagator " "{} s".format(self.mean_wall_time_prop_compute)) def report_fwd_prop(self): print("**** Forward Propagation ****") print("Total number of forward propagation step computations: " "{}".format(self.num_fwd_prop_step_computes)) print("Mean number of forward propagation step computations" " per update: " "{}".format(self.mean_num_fwd_prop_step_computes_per_update)) print("Mean wall time to compute forward propagation " "{} s".format(self.mean_wall_time_fwd_prop_compute)) def report_onwd_prop(self): print("**** Onward Propagation ****") print("Total number of onward propagation step computations: " "{}".format(self.num_onwd_prop_step_computes)) print("Mean number of onward propagation step computations" " per update: " "{}".format(self.mean_num_onwd_prop_step_computes_per_update)) print("Mean wall time to compute onward propagation " "{} s".format(self.mean_wall_time_onwd_prop_compute)) qutip-4.4.1/qutip/control/symplectic.py000066400000000000000000000051021352460343600202360ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Utility functions for symplectic matrices """ import numpy as np def calc_omega(n): """ Calculate the 2n x 2n Omega matrix Used as dynamics generator phase to calculate symplectic propagators Parameters ---------- n : scalar(int) number of modes in oscillator system Returns ------- array(float) Symplectic phase Omega """ omg = np.zeros((2*n, 2*n)) for j in range(2*n): for k in range(2*n): if k == j+1: omg[j, k] = (1 + (-1)**j)/2 if k == j-1: omg[j, k] = -(1 - (-1)**j)/2 return omg qutip-4.4.1/qutip/control/termcond.py000066400000000000000000000074541352460343600177110ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Classes containing termination conditions for the control pulse optimisation i.e. attributes that will be checked during the optimisation, that will determine if the algorithm has completed its task / exceeded limits """ class TerminationConditions(object): """ Base class for all termination conditions Used to determine when to stop the optimisation algorithm Note different subclasses should be used to match the type of optimisation being used Attributes ---------- fid_err_targ : float Target fidelity error fid_goal : float goal fidelity, e.g. 1 - self.fid_err_targ It its typical to set this for unitary systems max_wall_time : float # maximum time for optimisation (seconds) min_gradient_norm : float Minimum normalised gradient after which optimisation will terminate max_iterations : integer Maximum iterations of the optimisation algorithm max_fid_func_calls : integer Maximum number of calls to the fidelity function during the optimisation algorithm accuracy_factor : float Determines the accuracy of the result. Typical values for accuracy_factor are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy scipy.optimize.fmin_l_bfgs_b factr argument. Only set for specific methods (fmin_l_bfgs_b) that uses this Otherwise the same thing is passed as method_option ftol (although the scale is different) Hence it is not defined here, but may be set by the user """ def __init__(self): self.reset() def reset(self): self.fid_err_targ = 1e-5 self.fid_goal = None self.max_wall_time = 60*60.0 self.min_gradient_norm = 1e-5 self.max_iterations = 1e10 self.max_fid_func_calls = 1e10 qutip-4.4.1/qutip/control/tslotcomp.py000066400000000000000000000674021352460343600201210ustar00rootroot00000000000000# -*- coding: utf-8 -*- # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2014 and later, Alexander J G Pitchford # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### # @author: Alexander Pitchford # @email1: agp1@aber.ac.uk # @email2: alex.pitchford@gmail.com # @organization: Aberystwyth University # @supervisor: Daniel Burgarth """ Timeslot Computer These classes determine which dynamics generators, propagators and evolutions are recalculated when there is a control amplitude update. The timeslot computer processes the lists held by the dynamics object The default (UpdateAll) updates all of these each amp update, on the assumption that all amplitudes are changed each iteration. This is typical when using optimisation methods like BFGS in the GRAPE algorithm The alternative (DynUpdate) assumes that only a subset of amplitudes are updated each iteration and attempts to minimise the number of expensive calculations accordingly. This would be the appropriate class for Krotov type methods. Note that the Stats_DynTsUpdate class must be used for stats in conjunction with this class. NOTE: AJGP 2011-10-2014: This _DynUpdate class currently has some bug, no pressing need to fix it presently If all amplitudes change at each update, then the behavior of the classes is equivalent. _UpdateAll is easier to understand and potentially slightly faster in this situation. Note the methods in the _DynUpdate class were inspired by: DYNAMO - Dynamic Framework for Quantum Optimal Control See Machnes et.al., arXiv.1011.4874 """ import os import warnings import numpy as np import timeit # QuTiP from qutip import Qobj # QuTiP control modules import qutip.control.errors as errors import qutip.control.dump as qtrldump # QuTiP logging import qutip.logging_utils as logging logger = logging.get_logger() def _func_deprecation(message, stacklevel=3): """ Issue deprecation warning Using stacklevel=3 will ensure message refers the function calling with the deprecated parameter, """ warnings.warn(message, DeprecationWarning, stacklevel=stacklevel) class TimeslotComputer(object): """ Base class for all Timeslot Computers Note: this must be instantiated with a Dynamics object, that is the container for the data that the methods operate on Attributes ---------- log_level : integer level of messaging output from the logger. Options are attributes of qutip.logging_utils, in decreasing levels of messaging, are: DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL Anything WARN or above is effectively 'quiet' execution, assuming everything runs as expected. The default NOTSET implies that the level will be taken from the QuTiP settings file, which by default is WARN evo_comp_summary : EvoCompSummary A summary of the most recent evolution computation Used in the stats and dump Will be set to None if neither stats or dump are set """ def __init__(self, dynamics, params=None): from qutip.control.dynamics import Dynamics if not isinstance(dynamics, Dynamics): raise TypeError("Must instantiate with {} type".format( Dynamics)) self.parent = dynamics self.params = params self.reset() def reset(self): self.log_level = self.parent.log_level self.id_text = 'TS_COMP_BASE' self.evo_comp_summary = None def apply_params(self, params=None): """ Set object attributes based on the dictionary (if any) passed in the instantiation, or passed as a parameter This is called during the instantiation automatically. The key value pairs are the attribute name and value Note: attributes are created if they do not exist already, and are overwritten if they do. """ if not params: params = self.params if isinstance(params, dict): self.params = params for key in params: setattr(self, key, params[key]) def flag_all_calc_now(self): pass def init_comp(self): pass @property def log_level(self): return logger.level @log_level.setter def log_level(self, lvl): """ Set the log_level attribute and set the level of the logger that is call logger.setLevel(lvl) """ logger.setLevel(lvl) def dump_current(self): """Store a copy of the current time evolution""" dyn = self.parent dump = dyn.dump if not isinstance(dump, qtrldump.DynamicsDump): raise RuntimeError("Cannot dump current evolution, " "as dynamics.dump is not set") anything_dumped = False item_idx = None if dump.dump_any: dump_item = dump.add_evo_dump() item_idx = dump_item.idx anything_dumped = True if dump.dump_summary: dump.add_evo_comp_summary(dump_item_idx=item_idx) anything_dumped = True if not anything_dumped: logger.warning("Dump set, but nothing dumped, check dump config") class TSlotCompUpdateAll(TimeslotComputer): """ Timeslot Computer - Update All Updates all dynamics generators, propagators and evolutions when ctrl amplitudes are updated """ def reset(self): TimeslotComputer.reset(self) self.id_text = 'ALL' self.apply_params() def compare_amps(self, new_amps): """ Determine if any amplitudes have changed. If so, then mark the timeslots as needing recalculation Returns: True if amplitudes are the same, False if they have changed """ changed = False dyn = self.parent if (dyn.stats or dyn.dump): if self.evo_comp_summary: self.evo_comp_summary.reset() else: self.evo_comp_summary = EvoCompSummary() ecs = self.evo_comp_summary if dyn.ctrl_amps is None: # Flag fidelity and gradients as needing recalculation changed = True if ecs: ecs.num_amps_changed = len(new_amps.flat) ecs.num_timeslots_changed = new_amps.shape[0] else: # create boolean array with same shape as ctrl_amps # True where value in new_amps differs, otherwise false changed_amps = dyn.ctrl_amps != new_amps if np.any(changed_amps): # Flag fidelity and gradients as needing recalculation changed = True if self.log_level <= logging.DEBUG: logger.debug("{} amplitudes changed".format( changed_amps.sum())) if ecs: ecs.num_amps_changed = changed_amps.sum() ecs.num_timeslots_changed = np.any(changed_amps, 1).sum() else: if self.log_level <= logging.DEBUG: logger.debug("No amplitudes changed") # *** update stats *** if dyn.stats: dyn.stats.num_ctrl_amp_updates += bool(ecs.num_amps_changed) dyn.stats.num_ctrl_amp_changes += ecs.num_amps_changed dyn.stats.num_timeslot_changes += ecs.num_timeslots_changed if changed: dyn.ctrl_amps = new_amps dyn.flag_system_changed() return False else: return True def recompute_evolution(self): """ Recalculates the evolution operators. Dynamics generators (e.g. Hamiltonian) and prop (propagators) are calculated as necessary """ dyn = self.parent prop_comp = dyn.prop_computer n_ts = dyn.num_tslots n_ctrls = dyn.num_ctrls # Clear the public lists # These are only set if (external) users access them dyn._dyn_gen_qobj = None dyn._prop_qobj = None dyn._prop_grad_qobj = None dyn._fwd_evo_qobj = None dyn._onwd_evo_qobj = None dyn._onto_evo_qobj = None if (dyn.stats or dyn.dump) and not self.evo_comp_summary: self.evo_comp_summary = EvoCompSummary() ecs = self.evo_comp_summary if dyn.stats is not None: dyn.stats.num_tslot_recompute += 1 if self.log_level <= logging.DEBUG: logger.log(logging.DEBUG, "recomputing evolution {} " "(UpdateAll)".format( dyn.stats.num_tslot_recompute)) # calculate the Hamiltonians if ecs: time_start = timeit.default_timer() for k in range(n_ts): dyn._combine_dyn_gen(k) if dyn._decomp_curr is not None: dyn._decomp_curr[k] = False if ecs: ecs.wall_time_dyn_gen_compute = \ timeit.default_timer() - time_start # calculate the propagators and the propagotor gradients if ecs: time_start = timeit.default_timer() for k in range(n_ts): if prop_comp.grad_exact and dyn.cache_prop_grad: for j in range(n_ctrls): if j == 0: dyn._prop[k], dyn._prop_grad[k, j] = \ prop_comp._compute_prop_grad(k, j) if self.log_level <= logging.DEBUG_INTENSE: logger.log(logging.DEBUG_INTENSE, "propagator {}:\n{:10.3g}".format( k, self._prop[k])) else: dyn._prop_grad[k, j] = \ prop_comp._compute_prop_grad(k, j, compute_prop=False) else: dyn._prop[k] = prop_comp._compute_propagator(k) if ecs: ecs.wall_time_prop_compute = \ timeit.default_timer() - time_start if ecs: time_start = timeit.default_timer() # compute the forward propagation R = range(n_ts) for k in R: if dyn.oper_dtype == Qobj: dyn._fwd_evo[k+1] = dyn._prop[k]*dyn._fwd_evo[k] else: dyn._fwd_evo[k+1] = dyn._prop[k].dot(dyn._fwd_evo[k]) if ecs: ecs.wall_time_fwd_prop_compute = \ timeit.default_timer() - time_start time_start = timeit.default_timer() # compute the onward propagation if dyn.fid_computer.uses_onwd_evo: dyn._onwd_evo[n_ts-1] = dyn._prop[n_ts-1] R = range(n_ts-2, -1, -1) for k in R: if dyn.oper_dtype == Qobj: dyn._onwd_evo[k] = dyn._onwd_evo[k+1]*dyn._prop[k] else: dyn._onwd_evo[k] = dyn._onwd_evo[k+1].dot(dyn._prop[k]) if dyn.fid_computer.uses_onto_evo: #R = range(n_ts-1, -1, -1) R = range(n_ts-1, -1, -1) for k in R: if dyn.oper_dtype == Qobj: dyn._onto_evo[k] = dyn._onto_evo[k+1]*dyn._prop[k] else: dyn._onto_evo[k] = dyn._onto_evo[k+1].dot(dyn._prop[k]) if ecs: ecs.wall_time_onwd_prop_compute = \ timeit.default_timer() - time_start if dyn.stats: dyn.stats.wall_time_dyn_gen_compute += \ ecs.wall_time_dyn_gen_compute dyn.stats.wall_time_prop_compute += \ ecs.wall_time_prop_compute dyn.stats.wall_time_fwd_prop_compute += \ ecs.wall_time_fwd_prop_compute dyn.stats.wall_time_onwd_prop_compute += \ ecs.wall_time_onwd_prop_compute if dyn.unitarity_check_level: dyn.check_unitarity() if dyn.dump: self.dump_current() def get_timeslot_for_fidelity_calc(self): """ Returns the timeslot index that will be used calculate current fidelity value. This (default) method simply returns the last timeslot """ _func_deprecation("'get_timeslot_for_fidelity_calc' is deprecated. " "Use '_get_timeslot_for_fidelity_calc'") return self._get_timeslot_for_fidelity_calc def _get_timeslot_for_fidelity_calc(self): """ Returns the timeslot index that will be used calculate current fidelity value. This (default) method simply returns the last timeslot """ return self.parent.num_tslots class TSlotCompDynUpdate(TimeslotComputer): """ Timeslot Computer - Dynamic Update ******************************** ***** CURRENTLY HAS ISSUES ***** ***** AJGP 2014-10-02 ***** and is therefore not being maintained ***** i.e. changes made to _UpdateAll are not being implemented here ******************************** Updates only the dynamics generators, propagators and evolutions as required when a subset of the ctrl amplitudes are updated. Will update all if all amps have changed. """ def reset(self): self.dyn_gen_recalc = None self.prop_recalc = None self.evo_init2t_recalc = None self.evo_t2targ_recalc = None self.dyn_gen_calc_now = None self.prop_calc_now = None self.evo_init2t_calc_now = None self.evo_t2targ_calc_now = None TimeslotComputer.reset(self) self.id_text = 'DYNAMIC' self.apply_params() def init_comp(self): """ Initialise the flags """ #### # These maps are used to determine what needs to be updated #### # Note _recalc means the value needs updating at some point # e.g. here no values have been set, except the initial and final # evolution operator vals (which never change) and hence all other # values are set as requiring calculation. n_ts = self.parent.num_tslots self.dyn_gen_recalc = np.ones(n_ts, dtype=bool) # np.ones(n_ts, dtype=bool) self.prop_recalc = np.ones(n_ts, dtype=bool) self.evo_init2t_recalc = np.ones(n_ts + 1, dtype=bool) self.evo_init2t_recalc[0] = False self.evo_t2targ_recalc = np.ones(n_ts + 1, dtype=bool) self.evo_t2targ_recalc[-1] = False # The _calc_now map is used to during the calcs to specify # which values need updating immediately self.dyn_gen_calc_now = np.zeros(n_ts, dtype=bool) self.prop_calc_now = np.zeros(n_ts, dtype=bool) self.evo_init2t_calc_now = np.zeros(n_ts + 1, dtype=bool) self.evo_t2targ_calc_now = np.zeros(n_ts + 1, dtype=bool) def compare_amps(self, new_amps): """ Determine which timeslots will have changed Hamiltonians i.e. any where control amplitudes have changed for that slot and mark (using masks) them and corresponding exponentiations and time evo operators for update Returns: True if amplitudes are the same, False if they have changed """ dyn = self.parent n_ts = dyn.num_tslots # create boolean array with same shape as ctrl_amps # True where value in New_amps differs, otherwise false if self.parent.ctrl_amps is None: changed_amps = np.ones(new_amps.shape, dtype=bool) else: changed_amps = self.parent.ctrl_amps != new_amps if self.log_level <= logging.DEBUG_VERBOSE: logger.log(logging.DEBUG_VERBOSE, "changed_amps:\n{}".format( changed_amps)) # create Boolean vector with same length as number of timeslots # True where any of the amplitudes have changed, otherwise false changed_ts_mask = np.any(changed_amps, 1) # if any of the amplidudes have changed then mark for recalc if np.any(changed_ts_mask): self.dyn_gen_recalc[changed_ts_mask] = True self.prop_recalc[changed_ts_mask] = True dyn.ctrl_amps = new_amps if self.log_level <= logging.DEBUG: logger.debug("Control amplitudes updated") # find first and last changed dynamics generators first_changed = None for i in range(n_ts): if changed_ts_mask[i]: last_changed = i if first_changed is None: first_changed = i # set all fwd evo ops after first changed Ham to be recalculated self.evo_init2t_recalc[first_changed + 1:] = True # set all bkwd evo ops up to (incl) last changed Ham to be # recalculated self.evo_t2targ_recalc[:last_changed + 1] = True # Flag fidelity and gradients as needing recalculation dyn.flag_system_changed() # *** update stats *** if dyn.stats is not None: dyn.stats.num_ctrl_amp_updates += 1 dyn.stats.num_ctrl_amp_changes += changed_amps.sum() dyn.stats.num_timeslot_changes += changed_ts_mask.sum() return False else: return True def flag_all_calc_now(self): """ Flags all Hamiltonians, propagators and propagations to be calculated now """ # set flags for calculations self.dyn_gen_calc_now[:] = True self.prop_calc_now[:] = True self.evo_init2t_calc_now[:-1] = True self.evo_t2targ_calc_now[1:] = True def recompute_evolution(self): """ Recalculates the evo_init2t (forward) and evo_t2targ (onward) time evolution operators DynGen (Hamiltonians etc) and prop (propagator) are calculated as necessary """ if self.log_level <= logging.DEBUG_VERBOSE: logger.log(logging.DEBUG_VERBOSE, "recomputing evolution " "(DynUpdate)") dyn = self.parent n_ts = dyn.num_tslots # find the op slots that have been marked for update now # and need recalculation evo_init2t_recomp_now = self.evo_init2t_calc_now & \ self.evo_init2t_recalc evo_t2targ_recomp_now = self.evo_t2targ_calc_now & \ self.evo_t2targ_recalc # to recomupte evo_init2t, will need to start # at a cell that has been computed if np.any(evo_init2t_recomp_now): for k in range(n_ts, 0, -1): if evo_init2t_recomp_now[k] and self.evo_init2t_recalc[k-1]: evo_init2t_recomp_now[k-1] = True # for evo_t2targ, will also need to start # at a cell that has been computed if np.any(evo_t2targ_recomp_now): for k in range(0, n_ts): if evo_t2targ_recomp_now[k] and self.evo_t2targ_recalc[k+1]: evo_t2targ_recomp_now[k+1] = True # determine which dyn gen and prop need recalculating now in order to # calculate the forwrd and onward evolutions prop_recomp_now = (evo_init2t_recomp_now[1:] | evo_t2targ_recomp_now[:-1] | self.prop_calc_now[:]) & self.prop_recalc[:] dyn_gen_recomp_now = (prop_recomp_now[:] | self.dyn_gen_calc_now[:]) \ & self.dyn_gen_recalc[:] if np.any(dyn_gen_recomp_now): time_start = timeit.default_timer() for k in range(n_ts): if dyn_gen_recomp_now[k]: # calculate the dynamics generators dyn.dyn_gen[k] = dyn.compute_dyn_gen(k) self.dyn_gen_recalc[k] = False if dyn.stats is not None: dyn.stats.num_dyn_gen_computes += dyn_gen_recomp_now.sum() dyn.stats.wall_time_dyn_gen_compute += \ timeit.default_timer() - time_start if np.any(prop_recomp_now): time_start = timeit.default_timer() for k in range(n_ts): if prop_recomp_now[k]: # calculate exp(H) and other per H computations needed for # the gradient function dyn.prop[k] = dyn._compute_propagator(k) self.prop_recalc[k] = False if dyn.stats is not None: dyn.stats.num_prop_computes += prop_recomp_now.sum() dyn.stats.wall_time_prop_compute += \ timeit.default_timer() - time_start # compute the forward propagation if np.any(evo_init2t_recomp_now): time_start = timeit.default_timer() R = range(1, n_ts + 1) for k in R: if evo_init2t_recomp_now[k]: dyn.evo_init2t[k] = \ dyn.prop[k-1].dot(dyn.evo_init2t[k-1]) self.evo_init2t_recalc[k] = False if dyn.stats is not None: dyn.stats.num_fwd_prop_step_computes += \ evo_init2t_recomp_now.sum() dyn.stats.wall_time_fwd_prop_compute += \ timeit.default_timer() - time_start if np.any(evo_t2targ_recomp_now): time_start = timeit.default_timer() # compute the onward propagation R = range(n_ts-1, -1, -1) for k in R: if evo_t2targ_recomp_now[k]: dyn.evo_t2targ[k] = dyn.evo_t2targ[k+1].dot(dyn.prop[k]) self.evo_t2targ_recalc[k] = False if dyn.stats is not None: dyn.stats.num_onwd_prop_step_computes += \ evo_t2targ_recomp_now.sum() dyn.stats.wall_time_onwd_prop_compute += \ timeit.default_timer() - time_start # Clear calc now flags self.dyn_gen_calc_now[:] = False self.prop_calc_now[:] = False self.evo_init2t_calc_now[:] = False self.evo_t2targ_calc_now[:] = False def get_timeslot_for_fidelity_calc(self): """ Returns the timeslot index that will be used calculate current fidelity value. Attempts to find a timeslot where the least number of propagator calculations will be required. Flags the associated evolution operators for calculation now """ dyn = self.parent n_ts = dyn.num_tslots kBothEvoCurrent = -1 kFwdEvoCurrent = -1 kUse = -1 # If no specific timeslot set in config, then determine dynamically if kUse < 0: for k in range(n_ts): # find first timeslot where both evo_init2t and # evo_t2targ are current if not self.evo_init2t_recalc[k]: kFwdEvoCurrent = k if not self.evo_t2targ_recalc[k]: kBothEvoCurrent = k break if kBothEvoCurrent >= 0: kUse = kBothEvoCurrent elif kFwdEvoCurrent >= 0: kUse = kFwdEvoCurrent else: raise errors.FunctionalError("No timeslot found matching " "criteria") self.evo_init2t_calc_now[kUse] = True self.evo_t2targ_calc_now[kUse] = True return kUse class EvoCompSummary(qtrldump.DumpSummaryItem): """ A summary of the most recent time evolution computation Used in stats calculations and for data dumping Attributes ---------- evo_dump_idx : int Index of the linked :class:`dump.EvoCompDumpItem` None if no linked item iter_num : int Iteration number of the pulse optimisation None if evolution compute outside of a pulse optimisation fid_func_call_num : int Fidelity function call number of the pulse optimisation None if evolution compute outside of a pulse optimisation grad_func_call_num : int Gradient function call number of the pulse optimisation None if evolution compute outside of a pulse optimisation num_amps_changed : int Number of control timeslot amplitudes changed since previous evolution calculation num_timeslots_changed : int Number of timeslots in which any amplitudes changed since previous evolution calculation wall_time_dyn_gen_compute : float Time spent computing dynamics generators (in seconds of elapsed time) wall_time_prop_compute : float Time spent computing propagators (including and propagator gradients) (in seconds of elapsed time) wall_time_fwd_prop_compute : float Time spent computing the forward evolution of the system see :property:`dynamics.fwd_evo` (in seconds of elapsed time) wall_time_onwd_prop_compute : float Time spent computing the 'backward' evolution of the system see :property:`dynamics.onwd_evo` and :property:`dynamics.onto_evo` (in seconds of elapsed time) """ min_col_width = 11 summary_property_names = ( "idx", "evo_dump_idx", "iter_num", "fid_func_call_num", "grad_func_call_num", "num_amps_changed", "num_timeslots_changed", "wall_time_dyn_gen_compute", "wall_time_prop_compute", "wall_time_fwd_prop_compute", "wall_time_onwd_prop_compute") summary_property_fmt_type = ( 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'g', 'g', 'g', 'g' ) summary_property_fmt_prec = ( 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3 ) def __init__(self): self.reset() def reset(self): qtrldump.DumpSummaryItem.reset(self) self.evo_dump_idx = None self.iter_num = None self.fid_func_call_num = None self.grad_func_call_num = None self.num_amps_changed = 0 self.num_timeslots_changed = 0 self.wall_time_dyn_gen_compute = 0.0 self.wall_time_prop_compute = 0.0 self.wall_time_fwd_prop_compute = 0.0 self.wall_time_onwd_prop_compute = 0.0 qutip-4.4.1/qutip/correlation.py000066400000000000000000001324671352460343600167420ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['correlation_2op_1t', 'correlation_2op_2t', 'correlation_3op_1t', 'correlation_3op_2t', 'coherence_function_g1', 'coherence_function_g2', 'spectrum', 'spectrum_correlation_fft', 'correlation_ss', 'correlation', 'correlation_4op_1t', 'correlation_4op_2t', 'spectrum_ss', 'spectrum_pi'] from re import sub from warnings import warn import types import numpy as np import scipy.fftpack from qutip.eseries import esval, esspec from qutip.essolve import ode2es from qutip.expect import expect from qutip.mesolve import mesolve from qutip.mcsolve import mcsolve from qutip.operators import qeye from qutip.qobj import Qobj, isket, issuper from qutip.qobjevo import QobjEvo from qutip.rhs_generate import rhs_clear, _td_wrap_array_str from qutip.cy.utilities import _cython_build_cleanup from qutip.settings import debug from qutip.solver import Options, config from qutip.steadystate import steadystate from qutip.states import ket2dm from qutip.superoperator import liouvillian, spre, mat2vec from qutip.tensor import tensor if debug: import inspect # ----------------------------------------------------------------------------- # PUBLIC API # ----------------------------------------------------------------------------- # low level correlation def correlation_2op_1t(H, state0, taulist, c_ops, a_op, b_op, solver="me", reverse=False, args={}, options=Options(ntraj=[20, 100])): """ Calculate the two-operator two-time correlation function: :math:`\left` along one time axis using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. state0 : Qobj Initial state density matrix :math:`\\rho(t_0)` or state vector :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. b_op : Qobj operator B. reverse : bool {False, True} If `True`, calculate :math:`\left` instead of :math:`\left`. solver : str {'me', 'mc', 'es'} choice of solver (`me` for master-equation, `mc` for Monte Carlo, and `es` for exponential series). options : Options Solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- corr_vec : ndarray An array of correlation values for the times specified by `tlist`. References ---------- See, Gardiner, Quantum Noise, Section 5.2. """ if debug: print(inspect.stack()[0][3]) if reverse: A_op = a_op B_op = b_op C_op = 1 else: A_op = 1 B_op = a_op C_op = b_op return _correlation_2t(H, state0, [0], taulist, c_ops, A_op, B_op, C_op, solver=solver, args=args, options=options)[0] def correlation_2op_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, solver="me", reverse=False, args={}, options=Options(ntraj=[20, 100])): """ Calculate the two-operator two-time correlation function: :math:`\left` along two time axes using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. state0 : Qobj Initial state density matrix :math:`\\rho_0` or state vector :math:`\\psi_0`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. tlist : array_like list of times for :math:`t`. tlist must be positive and contain the element `0`. When taking steady-steady correlations only one tlist value is necessary, i.e. when :math:`t \\rightarrow \\infty`; here tlist is automatically set, ignoring user input. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. b_op : Qobj operator B. reverse : bool {False, True} If `True`, calculate :math:`\left` instead of :math:`\left`. solver : str choice of solver (`me` for master-equation, `mc` for Monte Carlo, and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- corr_mat : ndarray An 2-dimensional array (matrix) of correlation values for the times specified by `tlist` (first index) and `taulist` (second index). If `tlist` is `None`, then a 1-dimensional array of correlation values is returned instead. References ---------- See, Gardiner, Quantum Noise, Section 5.2. """ if debug: print(inspect.stack()[0][3]) if tlist is None: return correlation_2op_1t(H, state0, taulist, c_ops, a_op, b_op, solver=solver, reverse=reverse, args=args, options=options) else: if reverse: A_op = a_op B_op = b_op C_op = 1 else: A_op = 1 B_op = a_op C_op = b_op return _correlation_2t(H, state0, tlist, taulist, c_ops, A_op, B_op, C_op, solver=solver, args=args, options=options) def correlation_3op_1t(H, state0, taulist, c_ops, a_op, b_op, c_op, solver="me", args={}, options=Options(ntraj=[20, 100])): """ Calculate the three-operator two-time correlation function: :math:`\left` along one time axis using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Note: it is not possibly to calculate a physically meaningful correlation of this form where :math:`\\tau<0`. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. rho0 : Qobj Initial state density matrix :math:`\\rho(t_0)` or state vector :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. b_op : Qobj operator B. c_op : Qobj operator C. solver : str choice of solver (`me` for master-equation, `mc` for Monte Carlo, and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- corr_vec : array An array of correlation values for the times specified by `taulist` References ---------- See, Gardiner, Quantum Noise, Section 5.2. """ if debug: print(inspect.stack()[0][3]) return _correlation_2t(H, state0, [0], taulist, c_ops, a_op, b_op, c_op, solver=solver, args=args, options=options)[0] def correlation_3op_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op, solver="me", args={}, options=Options(ntraj=[20, 100])): """ Calculate the three-operator two-time correlation function: :math:`\left` along two time axes using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Note: it is not possibly to calculate a physically meaningful correlation of this form where :math:`\\tau<0`. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. rho0 : Qobj Initial state density matrix :math:`\\rho_0` or state vector :math:`\\psi_0`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. tlist : array_like list of times for :math:`t`. tlist must be positive and contain the element `0`. When taking steady-steady correlations only one tlist value is necessary, i.e. when :math:`t \\rightarrow \\infty`; here tlist is automatically set, ignoring user input. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. b_op : Qobj operator B. c_op : Qobj operator C. solver : str choice of solver (`me` for master-equation, `mc` for Monte Carlo, and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- corr_mat : array An 2-dimensional array (matrix) of correlation values for the times specified by `tlist` (first index) and `taulist` (second index). If `tlist` is `None`, then a 1-dimensional array of correlation values is returned instead. References ---------- See, Gardiner, Quantum Noise, Section 5.2. """ if debug: print(inspect.stack()[0][3]) if tlist is None: return correlation_3op_1t(H, state0, taulist, c_ops, a_op, b_op, c_op, solver=solver, args=args, options=options) else: return _correlation_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op, solver=solver, args=args, options=options) # high level correlation def coherence_function_g1(H, state0, taulist, c_ops, a_op, solver="me", args={}, options=Options(ntraj=[20, 100])): """ Calculate the normalized first-order quantum coherence function: .. math:: g^{(1)}(\\tau) = \\frac{\\langle A^\\dagger(\\tau)A(0)\\rangle} {\\sqrt{\\langle A^\\dagger(\\tau)A(\\tau)\\rangle \\langle A^\\dagger(0)A(0)\\rangle}} using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. state0 : Qobj Initial state density matrix :math:`\\rho(t_0)` or state vector :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. solver : str choice of solver (`me` for master-equation and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- g1, G1 : tuple The normalized and unnormalized second-order coherence function. """ # first calculate the photon number if state0 is None: state0 = steadystate(H, c_ops) n = np.array([expect(state0, a_op.dag() * a_op)]) else: n = mesolve(H, state0, taulist, c_ops, [a_op.dag() * a_op], options=options).expect[0] # calculate the correlation function G1 and normalize with n to obtain g1 G1 = correlation_2op_1t(H, state0, taulist, c_ops, a_op.dag(), a_op, solver=solver, args=args, options=options) g1 = G1 / np.sqrt(n[0] * n) return g1, G1 def coherence_function_g2(H, state0, taulist, c_ops, a_op, solver="me", args={}, options=Options(ntraj=[20, 100])): """ Calculate the normalized second-order quantum coherence function: .. math:: g^{(2)}(\\tau) = \\frac{\\langle A^\\dagger(0)A^\\dagger(\\tau)A(\\tau)A(0)\\rangle} {\\langle A^\\dagger(\\tau)A(\\tau)\\rangle \\langle A^\\dagger(0)A(0)\\rangle} using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. state0 : Qobj Initial state density matrix :math:`\\rho(t_0)` or state vector :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. args : dict Dictionary of arguments to be passed to solver. solver : str choice of solver (`me` for master-equation and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- g2, G2 : tuple The normalized and unnormalized second-order coherence function. """ # first calculate the photon number if state0 is None: state0 = steadystate(H, c_ops) n = np.array([expect(state0, a_op.dag() * a_op)]) else: n = mesolve(H, state0, taulist, c_ops, [a_op.dag() * a_op], args=args).expect[0] # calculate the correlation function G2 and normalize with n to obtain g2 G2 = correlation_3op_1t(H, state0, taulist, c_ops, a_op.dag(), a_op.dag()*a_op, a_op, solver=solver, args=args, options=options) g2 = G2 / (n[0] * n) return g2, G2 # spectrum def spectrum(H, wlist, c_ops, a_op, b_op, solver="es", use_pinv=False): """ Calculate the spectrum of the correlation function :math:`\lim_{t \\to \\infty} \left`, i.e., the Fourier transform of the correlation function: .. math:: S(\omega) = \int_{-\infty}^{\infty} \lim_{t \\to \\infty} \left e^{-i\omega\\tau} d\\tau. using the solver indicated by the `solver` parameter. Note: this spectrum is only defined for stationary statistics (uses steady state rho0) Parameters ---------- H : :class:`qutip.qobj` system Hamiltonian. wlist : array_like list of frequencies for :math:`\\omega`. c_ops : list list of collapse operators. a_op : Qobj operator A. b_op : Qobj operator B. solver : str choice of solver (`es` for exponential series and `pi` for psuedo-inverse). use_pinv : bool For use with the `pi` solver: if `True` use numpy's pinv method, otherwise use a generic solver. Returns ------- spectrum : array An array with spectrum :math:`S(\omega)` for the frequencies specified in `wlist`. """ if debug: print(inspect.stack()[0][3]) if solver == "es": return _spectrum_es(H, wlist, c_ops, a_op, b_op) elif solver == "pi": return _spectrum_pi(H, wlist, c_ops, a_op, b_op, use_pinv) else: raise ValueError("Unrecognized choice of solver" + "%s (use es or pi)." % solver) def spectrum_correlation_fft(tlist, y, inverse=False): """ Calculate the power spectrum corresponding to a two-time correlation function using FFT. Parameters ---------- tlist : array_like list/array of times :math:`t` which the correlation function is given. y : array_like list/array of correlations corresponding to time delays :math:`t`. inverse: boolean boolean parameter for using a positive exponent in the Fourier Transform instead. Default is False. Returns ------- w, S : tuple Returns an array of angular frequencies 'w' and the corresponding two-sided power spectrum 'S(w)'. """ if debug: print(inspect.stack()[0][3]) tlist = np.asarray(tlist) N = tlist.shape[0] dt = tlist[1] - tlist[0] if not np.allclose(np.diff(tlist), dt*np.ones(N-1,dtype=float)): raise Exception('tlist must be equally spaced for FFT.') if inverse: F = N * scipy.fftpack.ifft(y) else: F = scipy.fftpack.fft(y) # calculate the frequencies for the components in F f = scipy.fftpack.fftfreq(N, dt) # re-order frequencies from most negative to most positive (centre on 0) idx = np.array([], dtype = 'int') idx = np.append(idx, np.where(f < 0.0)) idx = np.append(idx, np.where(f >= 0.0)) return 2 * np.pi * f[idx], 2 * dt * np.real(F[idx]) # ----------------------------------------------------------------------------- # LEGACY API # ----------------------------------------------------------------------------- # low level correlation def correlation_ss(H, taulist, c_ops, a_op, b_op, solver="me", reverse=False, args={}, options=Options(ntraj=[20, 100])): """ Calculate the two-operator two-time correlation function: .. math:: \lim_{t \\to \\infty} \left along one time axis (given steady-state initial conditions) using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Parameters ---------- H : Qobj system Hamiltonian. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators. a_op : Qobj operator A. b_op : Qobj operator B. reverse : *bool* If `True`, calculate :math:`\lim_{t \\to \\infty} \left` instead of :math:`\lim_{t \\to \\infty} \left`. solver : str choice of solver (`me` for master-equation and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- corr_vec : array An array of correlation values for the times specified by `tlist`. References ---------- See, Gardiner, Quantum Noise, Section 5.2. """ warn("correlation_ss() now legacy, please use correlation_2op_1t() with" + "initial state as None", FutureWarning) if debug: print(inspect.stack()[0][3]) return correlation_2op_1t(H, None, taulist, c_ops, a_op, b_op, solver=solver, reverse=reverse, args=args, options=options) def correlation(H, state0, tlist, taulist, c_ops, a_op, b_op, solver="me", reverse=False, args={}, options=Options(ntraj=[20, 100])): """ Calculate the two-operator two-time correlation function: :math:`\left` along two time axes using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. state0 : Qobj Initial state density matrix :math:`\\rho(t_0)` or state vector :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. tlist : array_like list of times for :math:`t`. tlist must be positive and contain the element `0`. When taking steady-steady correlations only one tlist value is necessary, i.e. when :math:`t \\rightarrow \\infty`; here tlist is automatically set, ignoring user input. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. b_op : Qobj operator B. reverse : *bool* If `True`, calculate :math:`\left` instead of :math:`\left`. solver : str choice of solver (`me` for master-equation, `mc` for Monte Carlo, and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- corr_mat : array An 2-dimensional array (matrix) of correlation values for the times specified by `tlist` (first index) and `taulist` (second index). If `tlist` is `None`, then a 1-dimensional array of correlation values is returned instead. References ---------- See, Gardiner, Quantum Noise, Section 5.2. """ warn("correlation() now legacy, please use correlation_2op_2t()", FutureWarning) if debug: print(inspect.stack()[0][3]) return correlation_2op_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, solver=solver, reverse=reverse, args=args, options=options) def correlation_4op_1t(H, state0, taulist, c_ops, a_op, b_op, c_op, d_op, solver="me", args={}, options=Options(ntraj=[20, 100])): """ Calculate the four-operator two-time correlation function: :math:`\left` along one time axis using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Note: it is not possibly to calculate a physically meaningful correlation of this form where :math:`\\tau<0`. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. rho0 : Qobj Initial state density matrix :math:`\\rho(t_0)` or state vector :math:`\\psi(t_0)`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. b_op : Qobj operator B. c_op : Qobj operator C. d_op : Qobj operator D. solver : str choice of solver (`me` for master-equation, `mc` for Monte Carlo, and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- corr_vec : array An array of correlation values for the times specified by `taulist`. References ---------- See, Gardiner, Quantum Noise, Section 5.2. .. note:: Deprecated in QuTiP 3.1 Use correlation_3op_1t() instead. """ warn("correlation_4op_1t() now legacy, please use correlation_3op_1t()", FutureWarning) warn("the reverse argument has been removed as it did not contain any" + "new physical information", DeprecationWarning) if debug: print(inspect.stack()[0][3]) return correlation_3op_1t(H, state0, taulist, c_ops, a_op, b_op * c_op, d_op, solver=solver, args=args, options=options) def correlation_4op_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op, d_op, solver="me", args={}, options=Options(ntraj=[20, 100])): """ Calculate the four-operator two-time correlation function: :math:`\left` along two time axes using the quantum regression theorem and the evolution solver indicated by the `solver` parameter. Note: it is not possibly to calculate a physically meaningful correlation of this form where :math:`\\tau<0`. Parameters ---------- H : Qobj system Hamiltonian, may be time-dependent for solver choice of `me` or `mc`. rho0 : Qobj Initial state density matrix :math:`\\rho_0` or state vector :math:`\\psi_0`. If 'state0' is 'None', then the steady state will be used as the initial state. The 'steady-state' is only implemented for the `me` and `es` solvers. tlist : array_like list of times for :math:`t`. tlist must be positive and contain the element `0`. When taking steady-steady correlations only one tlist value is necessary, i.e. when :math:`t \\rightarrow \\infty`; here tlist is automatically set, ignoring user input. taulist : array_like list of times for :math:`\\tau`. taulist must be positive and contain the element `0`. c_ops : list list of collapse operators, may be time-dependent for solver choice of `me` or `mc`. a_op : Qobj operator A. b_op : Qobj operator B. c_op : Qobj operator C. d_op : Qobj operator D. solver : str choice of solver (`me` for master-equation, `mc` for Monte Carlo, and `es` for exponential series). options : Options solver options class. `ntraj` is taken as a two-element list because the `mc` correlator calls `mcsolve()` recursively; by default, `ntraj=[20, 100]`. `mc_corr_eps` prevents divide-by-zero errors in the `mc` correlator; by default, `mc_corr_eps=1e-10`. Returns ------- corr_mat : array An 2-dimensional array (matrix) of correlation values for the times specified by `tlist` (first index) and `taulist` (second index). If `tlist` is `None`, then a 1-dimensional array of correlation values is returned instead. References ---------- See, Gardiner, Quantum Noise, Section 5.2. """ warn("correlation_4op_2t() now legacy, please use correlation_3op_2t()", FutureWarning) warn("the reverse argument has been removed as it did not contain any" + "new physical information", DeprecationWarning) if debug: print(inspect.stack()[0][3]) return correlation_3op_2t(H, state0, tlist, taulist, c_ops, a_op, b_op * c_op, d_op, solver=solver, args=args, options=options) # spectrum def spectrum_ss(H, wlist, c_ops, a_op, b_op): """ Calculate the spectrum of the correlation function :math:`\lim_{t \\to \\infty} \left`, i.e., the Fourier transform of the correlation function: .. math:: S(\omega) = \int_{-\infty}^{\infty} \lim_{t \\to \\infty} \left e^{-i\omega\\tau} d\\tau. using an eseries based solver Note: this spectrum is only defined for stationary statistics (uses steady state rho0). Parameters ---------- H : :class:`qutip.qobj` system Hamiltonian. wlist : array_like list of frequencies for :math:`\\omega`. c_ops : *list* of :class:`qutip.qobj` list of collapse operators. a_op : :class:`qutip.qobj` operator A. b_op : :class:`qutip.qobj` operator B. use_pinv : *bool* If `True` use numpy's `pinv` method, otherwise use a generic solver. Returns ------- spectrum : array An array with spectrum :math:`S(\omega)` for the frequencies specified in `wlist`. """ warn("spectrum_ss() now legacy, please use spectrum()", FutureWarning) return spectrum(H, wlist, c_ops, a_op, b_op, solver="es") def spectrum_pi(H, wlist, c_ops, a_op, b_op, use_pinv=False): """ Calculate the spectrum of the correlation function :math:`\lim_{t \\to \\infty} \left`, i.e., the Fourier transform of the correlation function: .. math:: S(\omega) = \int_{-\infty}^{\infty} \lim_{t \\to \\infty} \left e^{-i\omega\\tau} d\\tau. using a psuedo-inverse method. Note: this spectrum is only defined for stationary statistics (uses steady state rho0) Parameters ---------- H : :class:`qutip.qobj` system Hamiltonian. wlist : array_like list of frequencies for :math:`\\omega`. c_ops : *list* of :class:`qutip.qobj` list of collapse operators. a_op : :class:`qutip.qobj` operator A. b_op : :class:`qutip.qobj` operator B. use_pinv : *bool* If `True` use numpy's pinv method, otherwise use a generic solver. Returns ------- spectrum : array An array with spectrum :math:`S(\omega)` for the frequencies specified in `wlist`. """ warn("spectrum_pi() now legacy, please use spectrum()", FutureWarning) return spectrum(H, wlist, c_ops, a_op, b_op, solver="pi", use_pinv=use_pinv) # ----------------------------------------------------------------------------- # PRIVATE SOLVER METHODS # ----------------------------------------------------------------------------- # master 2t correlation solver def _correlation_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op, solver="me", args={}, options=Options()): """ Internal function for calling solvers in order to calculate the three-operator two-time correlation function: """ # Note: the current form of the correlator is sufficient for all possible # two-time correlations (incuding those with 2ops vs 3). Ex: to compute a # correlation of the form : a_op = identity, b_op = A, # and c_op = B. if debug: print(inspect.stack()[0][3]) if min(tlist) != 0: raise TypeError("tlist must be positive and contain the element 0.") if min(taulist) != 0: raise TypeError("taulist must be positive and contain the element 0.") if config.tdname: _cython_build_cleanup(config.tdname) rhs_clear() H, c_ops, args = _td_wrap_array_str(H, c_ops, args, tlist) if solver == "me": return _correlation_me_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op, args=args, options=options) elif solver == "mc": return _correlation_mc_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op, args=args, options=options) elif solver == "es": return _correlation_es_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op) else: raise ValueError("Unrecognized choice of solver" + "%s (use me, mc, or es)." % solver) # master equation solvers def _correlation_me_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op, args={}, options=Options()): """ Internal function for calculating the three-operator two-time correlation function: using a master equation solver. """ # the solvers only work for positive time differences and the correlators # require positive tau if state0 is None: rho0 = steadystate(H, c_ops) tlist = [0] elif isket(state0): rho0 = ket2dm(state0) else: rho0 = state0 if debug: print(inspect.stack()[0][3]) rho_t = mesolve(H, rho0, tlist, c_ops, [], args=args, options=options).states corr_mat = np.zeros([np.size(tlist), np.size(taulist)], dtype=complex) H_shifted, c_ops_shifted, _args = _transform_L_t_shift_new(H, c_ops, args) if config.tdname: _cython_build_cleanup(config.tdname) rhs_clear() for t_idx, rho in enumerate(rho_t): if not isinstance(H, Qobj): _args["_t0"] = tlist[t_idx] corr_mat[t_idx, :] = mesolve( H_shifted, c_op * rho * a_op, taulist, c_ops_shifted, [b_op], args=_args, options=options ).expect[0] if t_idx == 1: options.rhs_reuse = True if config.tdname: _cython_build_cleanup(config.tdname) rhs_clear() return corr_mat # exponential series solvers def _correlation_es_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op): """ Internal function for calculating the three-operator two-time correlation function: using an exponential series solver. """ # the solvers only work for positive time differences and the correlators # require positive tau if state0 is None: rho0 = steadystate(H, c_ops) tlist = [0] elif isket(state0): rho0 = ket2dm(state0) else: rho0 = state0 if debug: print(inspect.stack()[0][3]) # contruct the Liouvillian L = liouvillian(H, c_ops) corr_mat = np.zeros([np.size(tlist), np.size(taulist)], dtype=complex) solES_t = ode2es(L, rho0) # evaluate the correlation function for t_idx in range(len(tlist)): rho_t = esval(solES_t, [tlist[t_idx]]) solES_tau = ode2es(L, c_op * rho_t * a_op) corr_mat[t_idx, :] = esval(expect(b_op, solES_tau), taulist) return corr_mat def _spectrum_es(H, wlist, c_ops, a_op, b_op): """ Internal function for calculating the spectrum of the correlation function :math:`\left`. """ if debug: print(inspect.stack()[0][3]) # construct the Liouvillian L = liouvillian(H, c_ops) # find the steady state density matrix and a_op and b_op expecation values rho0 = steadystate(L) a_op_ss = expect(a_op, rho0) b_op_ss = expect(b_op, rho0) # eseries solution for (b * rho0)(t) es = ode2es(L, b_op * rho0) # correlation corr_es = expect(a_op, es) # covariance cov_es = corr_es - a_op_ss * b_op_ss # tidy up covariance (to combine, e.g., zero-frequency components that cancel) cov_es.tidyup() # spectrum spectrum = esspec(cov_es, wlist) return spectrum # Monte Carlo solvers def _correlation_mc_2t(H, state0, tlist, taulist, c_ops, a_op, b_op, c_op, args={}, options=Options()): """ Internal function for calculating the three-operator two-time correlation function: using a Monte Carlo solver. """ if not c_ops: raise TypeError("If no collapse operators are required, use the `me`" + "or `es` solvers") # the solvers only work for positive time differences and the correlators # require positive tau if state0 is None: raise NotImplementedError("steady state not implemented for " + "mc solver, please use `es` or `me`") elif not isket(state0): raise TypeError("state0 must be a state vector.") psi0 = state0 if debug: print(inspect.stack()[0][3]) psi_t_mat = mcsolve( H, psi0, tlist, c_ops, [], args=args, ntraj=options.ntraj[0], options=options, progress_bar=None ).states corr_mat = np.zeros([np.size(tlist), np.size(taulist)], dtype=complex) H_shifted, c_ops_shifted, _args = _transform_L_t_shift_new(H, c_ops, args) if config.tdname: _cython_build_cleanup(config.tdname) rhs_clear() # calculation of from only knowledge of psi0 requires # averaging over both t and tau for t_idx in range(np.size(tlist)): if not isinstance(H, Qobj): _args["_t0"] = tlist[t_idx] for trial_idx in range(options.ntraj[0]): if isinstance(a_op, Qobj) and isinstance(c_op, Qobj): if a_op.dag() == c_op: # A shortcut here, requires only 1/4 the trials chi_0 = (options.mc_corr_eps + c_op) * \ psi_t_mat[trial_idx, t_idx] # evolve these states and calculate expectation value of B c_tau = chi_0.norm()**2 * mcsolve( H_shifted, chi_0/chi_0.norm(), taulist, c_ops_shifted, [b_op], args=_args, ntraj=options.ntraj[1], options=options, progress_bar=None ).expect[0] # final correlation vector computed by combining the # averages corr_mat[t_idx, :] += c_tau/options.ntraj[1] else: # otherwise, need four trial wavefunctions # (Ad+C)*psi_t, (Ad+iC)*psi_t, (Ad-C)*psi_t, (Ad-iC)*psi_t if isinstance(a_op, Qobj): a_op_dag = a_op.dag() else: # assume this is a number, ex. i.e. a_op = 1 # if this is not correct, the over-loaded addition # operation will raise errors a_op_dag = a_op chi_0 = [(options.mc_corr_eps + a_op_dag + np.exp(1j*x*np.pi/2)*c_op) * psi_t_mat[trial_idx, t_idx] for x in range(4)] # evolve these states and calculate expectation value of B c_tau = [ chi.norm()**2 * mcsolve( H_shifted, chi/chi.norm(), taulist, c_ops_shifted, [b_op], args=_args, ntraj=options.ntraj[1], options=options, progress_bar=None ).expect[0] for chi in chi_0 ] # final correlation vector computed by combining the averages corr_mat_add = np.asarray( 1.0 / (4*options.ntraj[0]) * (c_tau[0] - c_tau[2] - 1j*c_tau[1] + 1j*c_tau[3]), dtype=corr_mat.dtype ) corr_mat[t_idx, :] += corr_mat_add if t_idx == 1: options.rhs_reuse = True if config.tdname: _cython_build_cleanup(config.tdname) rhs_clear() return corr_mat # pseudo-inverse solvers def _spectrum_pi(H, wlist, c_ops, a_op, b_op, use_pinv=False): """ Internal function for calculating the spectrum of the correlation function :math:`\left`. """ L = H if issuper(H) else liouvillian(H, c_ops) tr_mat = tensor([qeye(n) for n in L.dims[0][0]]) N = np.prod(L.dims[0][0]) A = L.full() b = spre(b_op).full() a = spre(a_op).full() tr_vec = np.transpose(mat2vec(tr_mat.full())) rho_ss = steadystate(L) rho = np.transpose(mat2vec(rho_ss.full())) I = np.identity(N * N) P = np.kron(np.transpose(rho), tr_vec) Q = I - P spectrum = np.zeros(len(wlist)) for idx, w in enumerate(wlist): if use_pinv: MMR = np.linalg.pinv(-1.0j * w * I + A) else: MMR = np.dot(Q, np.linalg.solve(-1.0j * w * I + A, Q)) s = np.dot(tr_vec, np.dot(a, np.dot(MMR, np.dot(b, np.transpose(rho))))) spectrum[idx] = -2 * np.real(s[0, 0]) return spectrum # auxiliary def _transform_shift_one_coeff(op, args): if isinstance(op, types.FunctionType): # function-list based time-dependence if isinstance(args, dict): def fn(t, args_i): return op(t + args_i["_t0"], args_i) fn = lambda t, args_i: \ op(t + args_i["_t0"], args_i) else: def fn(t, args_i): return op(t + args_i["_t0"], args_i["_user_args"]) else: fn = sub("(?<=[^0-9a-zA-Z_])t(?=[^0-9a-zA-Z_])", "(t+_t0)", " " + op + " ") return fn def _transform_shift_one_op(op, args={}): if isinstance(op, Qobj): new_op = op elif isinstance(op, QobjEvo): new_op = op new_op._shift elif callable(op): def new_op(t, args_i): return op(t + args_i["_t0"], args_i) elif isinstance(op, list): new_op = [] for block in op: if isinstance(block, list): new_op.append([block[0], _transform_shift_one_coeff(block[1], args)]) else: new_op.append(block) return new_op def _transform_L_t_shift_new(H, c_ops, args={}): H_shifted = _transform_shift_one_op(H, args) c_ops_shifted = [_transform_shift_one_op(op, args) for op in c_ops] if args is None: _args = {"_t0": 0} elif isinstance(args, dict): _args = args.copy() _args["_t0"] = 0 else: _args = {"_user_args": args, "_t0": 0} return H_shifted, c_ops_shifted, _args qutip-4.4.1/qutip/countstat.py000066400000000000000000000253101352460343600164310ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ This module contains functions for calculating current and current noise using the counting statistics formalism. """ __all__ = ['countstat_current', 'countstat_current_noise'] import numpy as np import scipy.sparse as sp from qutip.expect import expect_rho_vec from qutip.steadystate import pseudo_inverse, steadystate from qutip.superoperator import mat2vec, sprepost, spre from qutip import operator_to_vector, identity, tensor import qutip.settings as settings from qutip.qobj import Qobj, issuper, isoper # Load MKL spsolve if avaiable if settings.has_mkl: from qutip._mkl.spsolve import (mkl_splu, mkl_spsolve) def countstat_current(L, c_ops=None, rhoss=None, J_ops=None): """ Calculate the current corresponding a system Liouvillian `L` and a list of current collapse operators `c_ops` or current superoperators `J_ops` (either must be specified). Optionally the steadystate density matrix `rhoss` and a list of current superoperators `J_ops` can be specified. If either of these are omitted they are computed internally. Parameters ---------- L : :class:`qutip.Qobj` Qobj representing the system Liouvillian. c_ops : array / list (optional) List of current collapse operators. rhoss : :class:`qutip.Qobj` (optional) The steadystate density matrix corresponding the system Liouvillian `L`. J_ops : array / list (optional) List of current superoperators. Returns -------- I : array The currents `I` corresponding to each current collapse operator `c_ops` (or, equivalently, each current superopeator `J_ops`). """ if J_ops is None: if c_ops is None: raise ValueError("c_ops must be given if J_ops is not") J_ops = [sprepost(c, c.dag()) for c in c_ops] if rhoss is None: if c_ops is None: raise ValueError("c_ops must be given if rhoss is not") rhoss = steadystate(L, c_ops) rhoss_vec = mat2vec(rhoss.full()).ravel() N = len(J_ops) I = np.zeros(N) for i, Ji in enumerate(J_ops): I[i] = expect_rho_vec(Ji.data, rhoss_vec, 1) return I def countstat_current_noise(L, c_ops, wlist=None, rhoss=None, J_ops=None, sparse=True, method='direct'): """ Compute the cross-current noise spectrum for a list of collapse operators `c_ops` corresponding to monitored currents, given the system Liouvillian `L`. The current collapse operators `c_ops` should be part of the dissipative processes in `L`, but the `c_ops` given here does not necessarily need to be all collapse operators contributing to dissipation in the Liouvillian. Optionally, the steadystate density matrix `rhoss` and the current operators `J_ops` correpsonding to the current collapse operators `c_ops` can also be specified. If either of `rhoss` and `J_ops` are omitted, they will be computed internally. 'wlist' is an optional list of frequencies at which to evaluate the noise spectrum. Note: The default method is a direct solution using dense matrices, as sparse matrix methods fail for some examples of small systems. For larger systems it is reccomended to use the sparse solver with the direct method, as it avoids explicit calculation of the pseudo-inverse, as described in page 67 of "Electrons in nanostructures" C. Flindt, PhD Thesis, available online: http://orbit.dtu.dk/fedora/objects/orbit:82314/datastreams/file_4732600/content Parameters ---------- L : :class:`qutip.Qobj` Qobj representing the system Liouvillian. c_ops : array / list List of current collapse operators. rhoss : :class:`qutip.Qobj` (optional) The steadystate density matrix corresponding the system Liouvillian `L`. wlist : array / list (optional) List of frequencies at which to evaluate (if none are given, evaluates at zero frequency) J_ops : array / list (optional) List of current superoperators. sparse : bool Flag that indicates whether to use sparse or dense matrix methods when computing the pseudo inverse. Default is false, as sparse solvers can fail for small systems. For larger systems the sparse solvers are reccomended. Returns -------- I, S : tuple of arrays The currents `I` corresponding to each current collapse operator `c_ops` (or, equivalently, each current superopeator `J_ops`) and the zero-frequency cross-current correlation `S`. """ if rhoss is None: rhoss = steadystate(L, c_ops) if J_ops is None: J_ops = [sprepost(c, c.dag()) for c in c_ops] N = len(J_ops) I = np.zeros(N) if wlist is None: S = np.zeros((N, N,1)) wlist=[0.] else: S = np.zeros((N, N,len(wlist))) if sparse == False: rhoss_vec = mat2vec(rhoss.full()).ravel() for k,w in enumerate(wlist): R = pseudo_inverse(L, rhoss=rhoss, w= w, sparse = sparse, method=method) for i, Ji in enumerate(J_ops): for j, Jj in enumerate(J_ops): if i == j: I[i] = expect_rho_vec(Ji.data, rhoss_vec, 1) S[i, j,k] = I[i] S[i, j,k] -= expect_rho_vec((Ji * R * Jj + Jj * R * Ji).data, rhoss_vec, 1) else: if method == "direct": N = np.prod(L.dims[0][0]) rhoss_vec = operator_to_vector(rhoss) tr_op = tensor([identity(n) for n in L.dims[0][0]]) tr_op_vec = operator_to_vector(tr_op) Pop = sp.kron(rhoss_vec.data, tr_op_vec.data.T, format='csr') Iop = sp.eye(N*N, N*N, format='csr') Q = Iop - Pop for k,w in enumerate(wlist): if w != 0.0: L_temp = 1.0j*w*spre(tr_op) + L else: #At zero frequency some solvers fail for small systems. #Adding a small finite frequency of order 1e-15 #helps prevent the solvers from throwing an exception. L_temp = 1.0j*(1e-15)*spre(tr_op) + L if not settings.has_mkl: A = L_temp.data.tocsc() else: A = L_temp.data.tocsr() A.sort_indices() rhoss_vec = mat2vec(rhoss.full()).ravel() for j, Jj in enumerate(J_ops): Qj = Q.dot( Jj.data.dot( rhoss_vec)) try: if settings.has_mkl: X_rho_vec_j = mkl_spsolve(A,Qj) else: X_rho_vec_j = sp.linalg.splu(A, permc_spec ='COLAMD').solve(Qj) except: X_rho_vec_j = sp.linalg.lsqr(A,Qj)[0] for i, Ji in enumerate(J_ops): Qi = Q.dot( Ji.data.dot(rhoss_vec)) try: if settings.has_mkl: X_rho_vec_i = mkl_spsolve(A,Qi) else: X_rho_vec_i = sp.linalg.splu(A, permc_spec ='COLAMD').solve(Qi) except: X_rho_vec_i = sp.linalg.lsqr(A,Qi)[0] if i == j: I[i] = expect_rho_vec(Ji.data, rhoss_vec, 1) S[j, i, k] = I[i] S[j, i, k] -= (expect_rho_vec(Jj.data * Q, X_rho_vec_i, 1) + expect_rho_vec(Ji.data * Q, X_rho_vec_j, 1)) else: rhoss_vec = mat2vec(rhoss.full()).ravel() for k,w in enumerate(wlist): R = pseudo_inverse(L,rhoss=rhoss, w= w, sparse = sparse, method=method) for i, Ji in enumerate(J_ops): for j, Jj in enumerate(J_ops): if i == j: I[i] = expect_rho_vec(Ji.data, rhoss_vec, 1) S[i, j, k] = I[i] S[i, j, k] -= expect_rho_vec((Ji * R * Jj + Jj * R * Ji).data, rhoss_vec, 1) return I, S qutip-4.4.1/qutip/cy/000077500000000000000000000000001352460343600144455ustar00rootroot00000000000000qutip-4.4.1/qutip/cy/__init__.py000077500000000000000000000000421352460343600165550ustar00rootroot00000000000000from qutip.cy.spmatfuncs import * qutip-4.4.1/qutip/cy/br_codegen.py000066400000000000000000000462071352460343600171170ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, QuSTaR. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import os import numpy as np import qutip.settings as qset from qutip.interpolate import Cubic_Spline _cython_path = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/") _include_string = "'"+_cython_path+"/complex_math.pxi'" __all__ = ['BR_Codegen'] class BR_Codegen(object): """ Class for generating Bloch-Redfield time-dependent code at runtime. """ def __init__(self, h_terms=None, h_td_terms=None, h_obj=None, c_terms=None, c_td_terms=None, c_obj=None, a_terms=None, a_td_terms=None, spline_count=[0,0], coupled_ops=[], coupled_lengths=[], coupled_spectra=[], config=None, sparse=False, use_secular=None, sec_cutoff=0.1, args=None, use_openmp=False, omp_thresh=None, omp_threads=None, atol=None): import sys import os sys.path.append(os.getcwd()) # Hamiltonian time-depdendent pieces self.h_terms = h_terms # number of H pieces self.h_td_terms = h_td_terms self.h_obj = h_obj # Collapse operator time-depdendent pieces self.c_terms = c_terms # number of C pieces self.c_td_terms = c_td_terms self.c_obj = c_obj # BR operator time-depdendent pieces self.a_terms = a_terms # number of A pieces self.a_td_terms = a_td_terms self.spline_count = spline_count self.use_secular = int(use_secular) self.sec_cutoff = sec_cutoff self.args = args self.sparse = sparse self.spline = 0 # Code generator properties self.code = [] # strings to be written to file self.level = 0 # indent level self.config = config if atol is None: self.atol = qset.atol else: self.atol = atol self.use_openmp = use_openmp self.omp_thresh = omp_thresh self.omp_threads = omp_threads self.coupled_ops = coupled_ops self.coupled_lengths = coupled_lengths self.coupled_spectra = coupled_spectra def write(self, string): """write lines of code to self.code""" self.code.append(" " * self.level + string + "\n") def file(self, filename): """open file called filename for writing""" self.file = open(filename, "w") def generate(self, filename="rhs.pyx"): """generate the file""" for line in cython_preamble(self.use_openmp)+self.aop_td_funcs(): self.write(line) # write function for Hamiltonian terms (there is always # be at least one term) for line in cython_checks() + self.ODE_func_header(): self.write(line) self.indent() #Reset spline count self.spline = 0 for line in self.func_vars()+self.ham_add_and_eigsolve()+ \ self.br_matvec_terms()+["\n"]: self.write(line) for line in self.func_end(): self.write(line) self.dedent() self.file(filename) self.file.writelines(self.code) self.file.close() self.config.cgen_num += 1 def indent(self): """increase indention level by one""" self.level += 1 def dedent(self): """decrease indention level by one""" if self.level == 0: raise SyntaxError("Error in code generator") self.level -= 1 def _get_arg_str(self, args): if len(args) == 0: return '' ret = '' for name, value in self.args.items(): if isinstance(value, np.ndarray): ret += ",\n np.ndarray[np.%s_t, ndim=1] %s" % \ (value.dtype.name, name) else: if isinstance(value, (int, np.int32, np.int64)): kind = 'int' elif isinstance(value, (float, np.float32, np.float64)): kind = 'float' elif isinstance(value, (complex, np.complex128)): kind = 'complex' #kind = type(value).__name__ ret += ",\n " + kind + " " + name return ret def ODE_func_header(self): """Creates function header for time-dependent ODE RHS.""" func_name = "def cy_td_ode_rhs(" # strings for time and vector variables input_vars = ("\n double t" + ",\n complex[::1] vec") for k in range(self.h_terms): input_vars += (",\n " + "complex[::1,:] H%d" % k) #Add array for each Cubic_Spline H term for htd in self.h_td_terms: if isinstance(htd, Cubic_Spline): if not htd.is_complex: input_vars += (",\n " + "double[::1] spline%d" % self.spline) else: input_vars += (",\n " + "complex[::1] spline%d" % self.spline) self.spline += 1 for k in range(self.c_terms): input_vars += (",\n " + "complex[::1,:] C%d" % k) #Add array for each Cubic_Spline c_op term for ctd in self.c_td_terms: if isinstance(ctd, Cubic_Spline): if not ctd.is_complex: input_vars += (",\n " + "double[::1] spline%d" % self.spline) else: input_vars += (",\n " + "complex[::1] spline%d" % self.spline) self.spline += 1 #Add coupled a_op terms for _a in self.a_td_terms: if isinstance(_a, Cubic_Spline): if not _a.is_complex: input_vars += (",\n " + "double[::1] spline%d" % self.spline) else: input_vars += (",\n " + "complex[::1] spline%d" % self.spline) self.spline += 1 #Add a_op terms for k in range(self.a_terms): input_vars += (",\n " + "complex[::1,:] A%d" % k) input_vars += (",\n unsigned int nrows") input_vars += self._get_arg_str(self.args) func_end = "):" return [func_name + input_vars + func_end] def func_vars(self): """Writes the variables and their types & spmv parts""" func_vars = ["", "cdef double complex * " + 'out = PyDataMem_NEW_ZEROED(nrows**2,sizeof(complex))'] func_vars.append(" ") return func_vars def aop_td_funcs(self): aop_func_str=[] spline_val = self.spline_count[0] coupled_val = 0 kk = 0 while kk < self.a_terms: if kk not in self.coupled_ops: aa = self.a_td_terms[kk] if isinstance(aa, str): aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, aa)] elif isinstance(aa, tuple): if isinstance(aa[0],str): str0 = aa[0] elif isinstance(aa[0],Cubic_Spline): if not aa[0].is_complex: aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+",dtype=float)"] str0 = "interp(w, %s, %s, spline%s)" % (aa[0].a, aa[0].b, spline_val) else: aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+",dtype=complex)"] str0 = "zinterp(w, %s, %s, spline%s)" % (aa[0].a, aa[0].b, spline_val) spline_val += 1 else: raise Exception('Error parsing tuple.') if isinstance(aa[1],str): str1 = aa[1] elif isinstance(aa[1],Cubic_Spline): if not aa[1].is_complex: aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=float)"] str1 = "interp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val) else: aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=complex)"] str1 = "zinterp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val) spline_val += 1 else: raise Exception('Error parsing tuple.') aop_func_str += ["cdef complex spectral{0}(double w, double t): return ({1})*({2})".format(kk, str0, str1)] else: raise Exception('Invalid a_td_term.') kk += 1 else: aa = self.coupled_spectra[coupled_val] if isinstance(aa, str): aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, aa)] elif isinstance(aa, Cubic_Spline): if not aa[1].is_complex: aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=float)"] str1 = "interp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val) else: aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=complex)"] str1 = "zinterp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val) spline_val += 1 aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, str1)] kk += self.coupled_lengths[coupled_val] coupled_val += 1 return aop_func_str def ham_add_and_eigsolve(self): ham_str = [] #allocate initial zero-Hamiltonian and eigenvector array in Fortran-order ham_str += ['cdef complex[::1, :] H = farray_alloc(nrows)'] ham_str += ['cdef complex[::1, :] evecs = farray_alloc(nrows)'] #allocate double array for eigenvalues ham_str += ['cdef double * eigvals = PyDataMem_NEW_ZEROED(nrows,sizeof(double))'] for kk in range(self.h_terms): if isinstance(self.h_td_terms[kk], Cubic_Spline): S = self.h_td_terms[kk] if not S.is_complex: td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline) else: td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline) ham_str += ["dense_add_mult(H, H{0}, {1})".format(kk,td_str)] self.spline += 1 else: ham_str += ["dense_add_mult(H, H{0}, {1})".format(kk,self.h_td_terms[kk])] #Do the eigensolving ham_str += ["ZHEEVR(H, eigvals, evecs, nrows)"] #Free H as it is no longer needed ham_str += ["PyDataMem_FREE(&H[0,0])"] return ham_str def br_matvec_terms(self): br_str = [] # Transform vector eigenbasis br_str += ["cdef double complex * eig_vec = vec_to_eigbasis(vec, evecs, nrows)"] # Do the diagonal liouvillian matvec br_str += ["diag_liou_mult(eigvals, eig_vec, out, nrows)"] # Do the cop_term matvec for each c_term for kk in range(self.c_terms): if isinstance(self.c_td_terms[kk], Cubic_Spline): S = self.c_td_terms[kk] if not S.is_complex: td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline) else: td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline) if self.use_openmp: br_str += ["cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})".format(kk, td_str, self.omp_thresh, self.omp_threads, self.atol)] else: br_str += ["cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})".format(kk, td_str, self.atol)] self.spline += 1 else: if self.use_openmp: br_str += ["cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})".format(kk, self.c_td_terms[kk], self.omp_thresh, self.omp_threads, self.atol)] else: br_str += ["cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})".format(kk, self.c_td_terms[kk], self.atol)] if self.a_terms != 0: #Calculate skew and dw_min terms br_str += ["cdef double[:,::1] skew = PyDataMem_NEW_ZEROED(nrows**2,sizeof(double))"] br_str += ["cdef double dw_min = skew_and_dwmin(eigvals, skew, nrows)"] #Compute BR term matvec kk = 0 coupled_val = 0 while kk < self.a_terms: if kk not in self.coupled_ops: if self.use_openmp: br_str += ["br_term_mult_openmp(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})".format(kk, self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)] else: br_str += ["br_term_mult(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})".format(kk, self.use_secular, self.sec_cutoff, self.atol)] kk += 1 else: br_str += ['cdef complex[::1, :] Ac{0} = farray_alloc(nrows)'.format(kk)] for nn in range(self.coupled_lengths[coupled_val]): if isinstance(self.a_td_terms[kk+nn], str): br_str += ["dense_add_mult(Ac{0}, A{1}, {2})".format(kk,kk+nn,self.a_td_terms[kk+nn])] elif isinstance(self.a_td_terms[kk+nn], Cubic_Spline): S = self.a_td_terms[kk+nn] if not S.is_complex: td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline) else: td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline) br_str += ["dense_add_mult(Ac{0}, A{1}, {2})".format(kk,kk+nn,td_str)] else: raise Exception('Invalid time-dependence fot a_op.') if self.use_openmp: br_str += ["br_term_mult_openmp(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})".format(kk, self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)] else: br_str += ["br_term_mult(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})".format(kk, self.use_secular, self.sec_cutoff, self.atol)] br_str += ["PyDataMem_FREE(&Ac{0}[0,0])".format(kk)] kk += self.coupled_lengths[coupled_val] coupled_val += 1 return br_str def func_end(self): end_str = [] #Transform out vector back to fock basis end_str += ["cdef np.ndarray[complex, ndim=1, mode='c'] arr_out = vec_to_fockbasis(out, evecs, nrows)"] #Free everything at end if self.a_terms != 0: end_str += ["PyDataMem_FREE(&skew[0,0])"] end_str += ["PyDataMem_FREE(&evecs[0,0])"] end_str += ["PyDataMem_FREE(eigvals)"] end_str += ["PyDataMem_FREE(eig_vec)"] end_str += ["PyDataMem_FREE(out)"] end_str += ["return arr_out"] return end_str def cython_preamble(use_omp=False): if use_omp: call_str = "from qutip.cy.openmp.br_omp cimport (cop_super_mult_openmp, br_term_mult_openmp)" else: call_str = "from qutip.cy.brtools cimport (cop_super_mult, br_term_mult)" """ Returns list of code segments for Cython preamble. """ return ["""#!python #cython: language_level=3 # This file is generated automatically by QuTiP. # (C) 2011 and later, QuSTaR import numpy as np cimport numpy as np cimport cython np.import_array() cdef extern from "numpy/arrayobject.h" nogil: void PyDataMem_NEW_ZEROED(size_t size, size_t elsize) void PyArray_ENABLEFLAGS(np.ndarray arr, int flags) void PyDataMem_FREE(void * ptr) from qutip.cy.interpolate cimport interp, zinterp from qutip.cy.math cimport erf, zerf cdef double pi = 3.14159265358979323 from qutip.cy.brtools cimport (dense_add_mult, ZHEEVR, dense_to_eigbasis, vec_to_eigbasis, vec_to_fockbasis, skew_and_dwmin, diag_liou_mult, spec_func, farray_alloc) """ +call_str+ """ include """+_include_string+""" """] def cython_checks(): """ List of strings that turn off Cython checks. """ return [""" @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False)"""] qutip-4.4.1/qutip/cy/br_tensor.pyx000066400000000000000000000166201352460343600172110ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import warnings import numpy as np import qutip.settings as qset from qutip.qobj import Qobj cimport numpy as np cimport cython from libcpp cimport bool from qutip.cy.brtools cimport (vec2mat_index, dense_to_eigbasis, ZHEEVR, skew_and_dwmin) from qutip.cy.brtools import (liou_from_diag_ham, cop_super_term) from libc.math cimport fabs include "sparse_routines.pxi" @cython.boundscheck(False) @cython.wraparound(False) def _br_term(complex[::1,:] A, complex[::1,:] evecs, double[:,::1] skew, double dw_min, object spectral, unsigned int nrows, int use_secular, double sec_cutoff, double atol): cdef size_t kk cdef size_t I, J # vector index variables cdef int[2] ab, cd #matrix indexing variables cdef complex[::1,:] A_eig = dense_to_eigbasis(A, evecs, nrows, atol) cdef complex elem, ac_elem, bd_elem cdef vector[int] coo_rows, coo_cols cdef vector[complex] coo_data cdef unsigned int nnz cdef COO_Matrix coo cdef CSR_Matrix csr for I in range(nrows**2): vec2mat_index(nrows, I, ab) for J in range(nrows**2): vec2mat_index(nrows, J, cd) if (not use_secular) or (fabs(skew[ab[0],ab[1]]-skew[cd[0],cd[1]]) < (dw_min * sec_cutoff)): elem = (A_eig[ab[0],cd[0]]*A_eig[cd[1],ab[1]]) * 0.5 elem *= (spectral(skew[cd[0],ab[0]])+spectral(skew[cd[1],ab[1]])) if (ab[0]==cd[0]): ac_elem = 0 for kk in range(nrows): ac_elem += A_eig[cd[1],kk]*A_eig[kk,ab[1]] * spectral(skew[cd[1],kk]) elem -= 0.5*ac_elem if (ab[1]==cd[1]): bd_elem = 0 for kk in range(nrows): bd_elem += A_eig[ab[0],kk]*A_eig[kk,cd[0]] * spectral(skew[cd[0],kk]) elem -= 0.5*bd_elem if (elem != 0): coo_rows.push_back(I) coo_cols.push_back(J) coo_data.push_back(elem) PyDataMem_FREE(&A_eig[0,0]) #Number of elements in BR tensor nnz = coo_rows.size() coo.nnz = nnz coo.rows = coo_rows.data() coo.cols = coo_cols.data() coo.data = coo_data.data() coo.nrows = nrows**2 coo.ncols = nrows**2 coo.is_set = 1 coo.max_length = nnz COO_to_CSR(&csr, &coo) return CSR_to_scipy(&csr) @cython.boundscheck(False) @cython.wraparound(False) def bloch_redfield_tensor(object H, list a_ops, spectra_cb=None, list c_ops=[], bool use_secular=True, double sec_cutoff=0.1, double atol = qset.atol): """ Calculates the time-independent Bloch-Redfield tensor for a system given a set of operators and corresponding spectral functions that describes the system's couplingto its environment. Parameters ---------- H : :class:`qutip.qobj` System Hamiltonian. a_ops : list Nested list of system operators that couple to the environment, and the corresponding bath spectra represented as Python functions. spectra_cb : list Depreciated. c_ops : list List of system collapse operators. use_secular : bool {True, False} Flag that indicates if the secular approximation should be used. sec_cutoff : float {0.1} Threshold for secular approximation. atol : float {qutip.settings.atol} Threshold for removing small parameters. Returns ------- R, kets: :class:`qutip.Qobj`, list of :class:`qutip.Qobj` R is the Bloch-Redfield tensor and kets is a list eigenstates of the Hamiltonian. """ cdef list _a_ops cdef object a, cop, L cdef int K, kk cdef int nrows = H.shape[0] cdef list op_dims = H.dims cdef list sop_dims = [[op_dims[0], op_dims[0]], [op_dims[1], op_dims[1]]] cdef list ekets, ket_dims ket_dims = [op_dims[0], [1] * len(op_dims[0])] if not (spectra_cb is None): warnings.warn("The use of spectra_cb is depreciated.", DeprecationWarning) _a_ops = [] for kk, a in enumerate(a_ops): _a_ops.append([a,spectra_cb[kk]]) a_ops = _a_ops K = len(a_ops) # Sanity checks for input parameters if not isinstance(H, Qobj): raise TypeError("H must be an instance of Qobj") for a in a_ops: if not isinstance(a[0], Qobj) or not a[0].isherm: raise TypeError("Operators in a_ops must be Hermitian Qobj.") cdef complex[::1,:] H0 = H.full('F') cdef complex[::1,:] evecs = np.zeros((nrows,nrows), dtype=complex, order='F') cdef double[::1] evals = np.zeros(nrows, dtype=float) ZHEEVR(H0, &evals[0], evecs, nrows) L = liou_from_diag_ham(evals) for cop in c_ops: L = L + cop_super_term(cop.full('F'), evecs, 1, nrows, atol) #only lindblad collapse terms if K == 0: ekets = [Qobj(np.asarray(evecs[:,k]), dims=ket_dims) for k in range(nrows)] return Qobj(L, dims=sop_dims, copy=False), ekets #has some br operators and spectra cdef double[:,::1] skew = np.zeros((nrows,nrows), dtype=float) cdef double dw_min = skew_and_dwmin(&evals[0], skew, nrows) for a in a_ops: L = L + _br_term(a[0].full('F'), evecs, skew, dw_min, a[1], nrows, use_secular, sec_cutoff, atol) ekets = [Qobj(np.asarray(evecs[:,k]), dims=ket_dims) for k in range(nrows)] return Qobj(L, dims=sop_dims, copy=False), ekets qutip-4.4.1/qutip/cy/brtools.pxd000066400000000000000000000067671352460343600166660ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cimport numpy as np #Spectral function with signature (w,t) ctypedef complex (*spec_func)(double, double) cdef complex[::1,:] farray_alloc(int nrows) cpdef void dense_add_mult(complex[::1,:] A, complex[::1,:] B, double complex alpha) nogil cdef void ZHEEVR(complex[::1,:] H, double * eigvals, complex[::1,:] Z, int nrows) cdef complex[::1,:] dense_to_eigbasis(complex[::1,:] A, complex[::1,:] evecs, unsigned int nrows, double atol) cdef void diag_liou_mult(double * diags, double complex * vec, double complex * out, unsigned int nrows) nogil cdef double complex * vec_to_eigbasis(complex[::1] vec, complex[::1,:] evecs, unsigned int nrows) cdef np.ndarray[complex, ndim=1, mode='c'] vec_to_fockbasis(double complex * eig_vec, complex[::1,:] evecs, unsigned int nrows) cdef void cop_super_mult(complex[::1,:] cop, complex[::1,:] evecs, double complex * vec, double complex alpha, double complex * out, unsigned int nrows, double atol) cdef void vec2mat_index(int nrows, int index, int[2] out) nogil cdef double skew_and_dwmin(double * evals, double[:,::1] skew, unsigned int nrows) nogil cdef void br_term_mult(double t, complex[::1,:] A, complex[::1,:] evecs, double[:,::1] skew, double dw_min, spec_func spectral, double complex * vec, double complex * out, unsigned int nrows, int use_secular, double sec_cutoff, double atol) qutip-4.4.1/qutip/cy/brtools.pyx000066400000000000000000000511161352460343600166770ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### from scipy.linalg.cython_lapack cimport zheevr from scipy.linalg.cython_blas cimport zgemm, zgemv, zaxpy from qutip.cy.spmath cimport (_zcsr_kron_core, _zcsr_kron, _zcsr_add, _zcsr_transpose, _zcsr_adjoint, _zcsr_mult) from qutip.cy.spconvert cimport fdense2D_to_CSR from qutip.cy.spmatfuncs cimport spmvpy from qutip.cy.brtools cimport spec_func from libc.math cimport fabs, fmin from libc.float cimport DBL_MAX from libcpp.vector cimport vector from qutip.cy.sparse_structs cimport (CSR_Matrix, COO_Matrix) include "sparse_routines.pxi" cdef extern from "" namespace "std" nogil: double complex conj(double complex x) double cabs "abs" (double complex x) @cython.boundscheck(False) @cython.wraparound(False) cdef complex[::1,:] farray_alloc(int nrows): """ Allocate a complex zero array in fortran-order for a square matrix. Parameters ---------- nrows : int Number of rows and columns in the matrix. Returns ------- fview : memview A zeroed memoryview in fortran-order. """ cdef double complex * temp = PyDataMem_NEW_ZEROED(nrows*nrows,sizeof(complex)) cdef complex[:,::1] cview = temp cdef complex[::1,:] fview = cview.T return fview @cython.boundscheck(False) @cython.wraparound(False) cpdef void dense_add_mult(complex[::1,:] A, complex[::1,:] B, double complex alpha) nogil: """ Performs the dense matrix multiplication A = A + (alpha*B) where A and B are complex 2D square matrices, and alpha is a complex coefficient. Parameters ---------- A : ndarray Complex matrix in f-order that is to be overwritten B : ndarray Complex matrix in f-order. alpha : complex Coefficient in front of B. """ cdef int nrows2 = A.shape[0]**2 cdef int inc = 1 zaxpy(&nrows2, &alpha, &B[0,0], &inc, &A[0,0], &inc) @cython.boundscheck(False) @cython.wraparound(False) cdef void ZHEEVR(complex[::1,:] H, double * eigvals, complex[::1,:] Z, int nrows): """ Computes the eigenvalues and vectors of a dense Hermitian matrix. Eigenvectors are returned in Z. Parameters ---------- H : array_like Input Hermitian matrix. eigvals : array_like Input array to store eigen values. Z : array_like Output array of eigenvectors. nrows : int Number of rows in matrix. """ cdef char jobz = b'V' cdef char rnge = b'A' cdef char uplo = b'L' cdef double vl=1, vu=1, abstol=0 cdef int il=1, iu=1 cdef int lwork = 18 * nrows cdef int lrwork = 24*nrows, liwork = 10*nrows cdef int info=0, M=0 #These nee to be freed at end cdef int * isuppz = PyDataMem_NEW((2*nrows) * sizeof(int)) cdef complex * work = PyDataMem_NEW(lwork * sizeof(complex)) cdef double * rwork = PyDataMem_NEW((24*nrows) * sizeof(double)) cdef int * iwork = PyDataMem_NEW((10*nrows) * sizeof(int)) zheevr(&jobz, &rnge, &uplo, &nrows, &H[0,0], &nrows, &vl, &vu, &il, &iu, &abstol, &M, eigvals, &Z[0,0], &nrows, isuppz, work, &lwork, rwork, &lrwork, iwork, &liwork, &info) PyDataMem_FREE(work) PyDataMem_FREE(rwork) PyDataMem_FREE(isuppz) PyDataMem_FREE(iwork) if info != 0: if info < 0: raise Exception("Error in parameter : %s" & abs(info)) else: raise Exception("Algorithm failed to converge") @cython.boundscheck(False) @cython.wraparound(False) def liou_from_diag_ham(double[::1] diags): cdef unsigned int nrows = diags.shape[0] cdef np.ndarray[complex, ndim=1, mode='c'] data = np.empty(nrows**2, dtype=complex) cdef np.ndarray[int, ndim=1, mode='c'] ind = np.empty(nrows**2, dtype=np.int32) cdef np.ndarray[int, ndim=1, mode='c'] ptr = np.empty(nrows**2+1, dtype=np.int32) cdef unsigned int idx, nnz = 0 cdef size_t ii, jj cdef double complex val1, val2, ans ptr[0] = 0 for ii in range(nrows): val1 = 1j*diags[ii] idx = nrows*ii+1 #Here the +1 is to set the next ptr for jj in range(nrows): val2 = -1j*diags[jj] ans = val1 + val2 if ans != 0: data[nnz] = ans ind[nnz] = nrows*ii+jj ptr[idx+jj] = nnz+1 nnz += 1 else: ptr[idx+jj] = nnz return fast_csr_matrix((data[:nnz],ind[:nnz],ptr), shape=(nrows**2,nrows**2)) @cython.boundscheck(False) @cython.wraparound(False) cdef void diag_liou_mult(double * diags, double complex * vec, double complex * out, unsigned int nrows) nogil: """ Multiplies a Liouvillian constructed from a diagonal Hamiltonian onto a vectorized density matrix. Parameters ---------- diags : double ptr Pointer to eigvals of Hamiltonian vec : complex ptr Pointer to density matrix vector out : complex ptr Pointer to vector storing result nrows : int Dimension of Hamiltonian. """ cdef unsigned int nnz = 0 cdef size_t ii, jj cdef double complex val, ans for ii in range(nrows): val = 1j*diags[ii] for jj in range(nrows): ans = val - 1j*diags[jj] out[nnz] += ans*vec[nnz] nnz += 1 @cython.boundscheck(False) @cython.wraparound(False) cdef double complex * ZGEMM(double complex * A, double complex * B, int Arows, int Acols, int Brows, int Bcols, int transA = 0, int transB = 0, double complex alpha = 1, double complex beta = 0): cdef double complex * C = PyDataMem_NEW((Acols*Brows)*sizeof(double complex)) cdef char tA, tB if transA == 0: tA = b'N' elif transA == 1: tA = b'T' elif transA == 2: tA = b'C' else: raise Exception('Invalid transA value.') if transB == 0: tB = b'N' elif transB == 1: tB = b'T' elif transB == 2: tB = b'C' else: raise Exception('Invalid transB value.') zgemm(&tA, &tB, &Arows, &Bcols, &Brows, &alpha, A, &Arows, B, &Brows, &beta, C, &Arows) return C @cython.boundscheck(False) @cython.wraparound(False) cdef void ZGEMV(double complex * A, double complex * vec, double complex * out, int Arows, int Acols, int transA = 0, double complex alpha=1, double complex beta=1): cdef char tA cdef int idx = 1, idy = 1 if transA == 0: tA = b'N' elif transA == 1: tA = b'T' elif transA == 2: tA = b'C' else: raise Exception('Invalid transA value.') zgemv(&tA, &Arows, &Acols, &alpha, A, &Arows, vec, &idx, &beta, out, &idy) @cython.boundscheck(False) @cython.wraparound(False) cdef complex[::1,:] dense_to_eigbasis(complex[::1,:] A, complex[::1,:] evecs, unsigned int nrows, double atol): cdef int kk cdef double complex * temp1 = ZGEMM(&A[0,0], &evecs[0,0], nrows, nrows, nrows, nrows, 0, 0) cdef double complex * eig_mat = ZGEMM(&evecs[0,0], temp1, nrows, nrows, nrows, nrows, 2, 0) PyDataMem_FREE(temp1) #Get view on ouput # Find all small elements and set to zero for kk in range(nrows**2): if cabs(eig_mat[kk]) < atol: eig_mat[kk] = 0 cdef complex[:,::1] out = eig_mat #This just gets the correct f-ordered view on the data cdef complex[::1,:] out_f = out.T return out_f @cython.boundscheck(False) @cython.wraparound(False) cdef double complex * vec_to_eigbasis(complex[::1] vec, complex[::1,:] evecs, unsigned int nrows): cdef size_t ii, jj cdef double complex * temp1 = ZGEMM(&vec[0], &evecs[0,0], nrows, nrows, nrows, nrows, 0, 0) cdef double complex * eig_vec = ZGEMM(&evecs[0,0], temp1, nrows, nrows, nrows, nrows, 2, 0) PyDataMem_FREE(temp1) return eig_vec @cython.boundscheck(False) @cython.wraparound(False) cdef np.ndarray[complex, ndim=1, mode='c'] vec_to_fockbasis(double complex * eig_vec, complex[::1,:] evecs, unsigned int nrows): cdef size_t ii, jj cdef np.npy_intp nrows2 = nrows**2 cdef double complex * temp1 = ZGEMM(&eig_vec[0], &evecs[0,0], nrows, nrows, nrows, nrows, 0, 2) cdef double complex * fock_vec = ZGEMM(&evecs[0,0], temp1, nrows, nrows, nrows, nrows, 0, 0) PyDataMem_FREE(temp1) cdef np.ndarray[complex, ndim=1, mode='c'] out = \ np.PyArray_SimpleNewFromData(1, &nrows2, np.NPY_COMPLEX128, fock_vec) PyArray_ENABLEFLAGS(out, np.NPY_OWNDATA) return out @cython.boundscheck(False) @cython.wraparound(False) cpdef cop_super_term(complex[::1,:] cop, complex[::1,:] evecs, double complex alpha, unsigned int nrows, double atol): cdef size_t kk cdef CSR_Matrix mat1, mat2, mat3, mat4, mat5 cdef complex[::1,:] cop_eig = dense_to_eigbasis(cop, evecs, nrows, atol) fdense2D_to_CSR(cop_eig, &mat1, nrows, nrows) #Multiply by alpha for time-dependence for kk in range(mat1.nnz): mat1.data[kk] *= alpha #Free data associated with cop_eig as it is no longer needed. PyDataMem_FREE(&cop_eig[0,0]) #create temp array of conj data for cop_eig_sparse cdef complex * conj_data = PyDataMem_NEW(mat1.nnz * sizeof(complex)) for kk in range(mat1.nnz): conj_data[kk] = conj(mat1.data[kk]) #mat2 holds data for kron(cop.dag(), c) init_CSR(&mat2, mat1.nnz**2, mat1.nrows**2, mat1.ncols**2) _zcsr_kron_core(conj_data, mat1.indices, mat1.indptr, mat1.data, mat1.indices, mat1.indptr, &mat2, mat1.nrows, mat1.nrows, mat1.ncols) #Free temp conj_data array PyDataMem_FREE(conj_data) #Create identity in mat3 identity_CSR(&mat3, nrows) #Take adjoint cop.H -> mat4 _zcsr_adjoint(&mat1, &mat4) #multiply cop.dag() * c -> mat5 _zcsr_mult(&mat4, &mat1, &mat5) #Free mat1 and mat 4 as we will reuse free_CSR(&mat1) free_CSR(&mat4) # kron(eye, cdc) -> mat1 _zcsr_kron(&mat3, &mat5, &mat1) # Add data from mat2 - 0.5 * cop_sparse -> mat4 _zcsr_add(&mat2, &mat1, &mat4, -0.5) #Free mat1 and mat2 now free_CSR(&mat1) free_CSR(&mat2) #Take traspose of cdc -> mat1 _zcsr_transpose(&mat5, &mat1) free_CSR(&mat5) # kron(cdct, eye) -> mat2 _zcsr_kron(&mat1, &mat3, &mat2) free_CSR(&mat3) # Add data from mat4 - 0.5 * mat2 -> mat1 _zcsr_add(&mat4, &mat2, &mat1, -0.5) free_CSR(&mat4) free_CSR(&mat2) return CSR_to_scipy(&mat1) @cython.boundscheck(False) @cython.wraparound(False) cdef void cop_super_mult(complex[::1,:] cop, complex[::1,:] evecs, double complex * vec, double complex alpha, double complex * out, unsigned int nrows, double atol): cdef size_t kk cdef CSR_Matrix mat1, mat2, mat3, mat4 cdef complex[::1,:] cop_eig = dense_to_eigbasis(cop, evecs, nrows, atol) #Mat1 holds cop_eig in CSR format fdense2D_to_CSR(cop_eig, &mat1, nrows, nrows) #Multiply by alpha for time-dependence for kk in range(mat1.nnz): mat1.data[kk] *= alpha #Free data associated with cop_eig as it is no longer needed. PyDataMem_FREE(&cop_eig[0,0]) #create temp array of conj data for cop_eig_sparse cdef complex * conj_data = PyDataMem_NEW(mat1.nnz * sizeof(complex)) for kk in range(mat1.nnz): conj_data[kk] = conj(mat1.data[kk]) #mat2 holds data for kron(cop.dag(), c) init_CSR(&mat2, mat1.nnz**2, mat1.nrows**2, mat1.ncols**2) _zcsr_kron_core(conj_data, mat1.indices, mat1.indptr, mat1.data, mat1.indices, mat1.indptr, &mat2, mat1.nrows, mat1.nrows, mat1.ncols) #Do spmv with kron(cop.dag(), c) spmvpy(mat2.data,mat2.indices,mat2.indptr, &vec[0], 1, out, nrows**2) #Free temp conj_data array PyDataMem_FREE(conj_data) #Free mat2 free_CSR(&mat2) #Create identity in mat3 identity_CSR(&mat3, nrows) #Take adjoint of cop (mat1) -> mat2 _zcsr_adjoint(&mat1, &mat2) #multiply cop.dag() * c (cdc) -> mat4 _zcsr_mult(&mat2, &mat1, &mat4) #Free mat1 and mat2 free_CSR(&mat1) free_CSR(&mat2) # kron(eye, cdc) -> mat1 _zcsr_kron(&mat3, &mat4, &mat1) #Do spmv with -0.5*kron(eye, cdc) spmvpy(mat1.data,mat1.indices,mat1.indptr, vec, -0.5, &out[0], nrows**2) #Free mat1 (mat1 and mat2 are currently free) free_CSR(&mat1) #Take traspose of cdc (mat4) -> mat1 _zcsr_transpose(&mat4, &mat1) #Free mat4 (mat2 and mat4 currently free) free_CSR(&mat4) # kron(cdct, eye) -> mat2 _zcsr_kron(&mat1, &mat3, &mat2) #Do spmv with -0.5*kron(cdct, eye) spmvpy(mat2.data,mat2.indices,mat2.indptr, vec, -0.5, &out[0], nrows**2) #Free mat1, mat2, and mat3 free_CSR(&mat1) free_CSR(&mat2) free_CSR(&mat3) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef inline void vec2mat_index(int nrows, int index, int[2] out) nogil: out[1] = index // nrows out[0] = index - nrows * out[1] @cython.boundscheck(False) @cython.wraparound(False) cdef double skew_and_dwmin(double * evals, double[:,::1] skew, unsigned int nrows) nogil: cdef double diff dw_min = DBL_MAX cdef size_t ii, jj for ii in range(nrows): for jj in range(nrows): diff = evals[ii] - evals[jj] skew[ii,jj] = diff if diff != 0: dw_min = fmin(fabs(diff), dw_min) return dw_min @cython.boundscheck(False) @cython.wraparound(False) cdef void br_term_mult(double t, complex[::1,:] A, complex[::1,:] evecs, double[:,::1] skew, double dw_min, spec_func spectral, double complex * vec, double complex * out, unsigned int nrows, int use_secular, double sec_cutoff, double atol): cdef size_t kk cdef size_t I, J # vector index variables cdef int[2] ab, cd #matrix indexing variables cdef complex[::1,:] A_eig = dense_to_eigbasis(A, evecs, nrows, atol) cdef complex elem, ac_elem, bd_elem cdef vector[int] coo_rows, coo_cols cdef vector[complex] coo_data cdef unsigned int nnz cdef COO_Matrix coo cdef CSR_Matrix csr cdef complex[:,::1] non_sec_mat for I in range(nrows**2): vec2mat_index(nrows, I, ab) for J in range(nrows**2): vec2mat_index(nrows, J, cd) if (not use_secular) or (fabs(skew[ab[0],ab[1]]-skew[cd[0],cd[1]]) < (dw_min * sec_cutoff)): elem = (A_eig[ab[0],cd[0]]*A_eig[cd[1],ab[1]]) * 0.5 elem *= (spectral(skew[cd[0],ab[0]],t)+spectral(skew[cd[1],ab[1]],t)) if (ab[0]==cd[0]): ac_elem = 0 for kk in range(nrows): ac_elem += A_eig[cd[1],kk]*A_eig[kk,ab[1]] * spectral(skew[cd[1],kk],t) elem -= 0.5*ac_elem if (ab[1]==cd[1]): bd_elem = 0 for kk in range(nrows): bd_elem += A_eig[ab[0],kk]*A_eig[kk,cd[0]] * spectral(skew[cd[0],kk],t) elem -= 0.5*bd_elem if (elem != 0): coo_rows.push_back(I) coo_cols.push_back(J) coo_data.push_back(elem) PyDataMem_FREE(&A_eig[0,0]) #Number of elements in BR tensor nnz = coo_rows.size() coo.nnz = nnz coo.rows = coo_rows.data() coo.cols = coo_cols.data() coo.data = coo_data.data() coo.nrows = nrows**2 coo.ncols = nrows**2 coo.is_set = 1 coo.max_length = nnz COO_to_CSR(&csr, &coo) spmvpy(csr.data, csr.indices, csr.indptr, vec, 1, out, nrows**2) free_CSR(&csr) @cython.boundscheck(False) @cython.wraparound(False) cdef void sparse_ZHEEVR(complex[::1,:] H, double * eigvals, CSR_Matrix * evecs, int nrows, double atol): """ Computes the eigenvalues and vectors of a dense Hermitian matrix. Eigenvectors are returned in Z. Parameters ---------- H : array_like Input Hermitian matrix. eigvals : array_like Input array to store eigen values. evecs : CSR_Matrix Output csr matrix of eigenvectors. nrows : int Number of rows in matrix. """ cdef size_t jj, ii cdef char jobz = b'V' cdef char rnge = b'A' cdef char uplo = b'L' cdef double vl=1, vu=1, abstol=0 cdef int il=1, iu=1 cdef int lwork = 18 * nrows cdef int lrwork = 24*nrows, liwork = 10*nrows cdef int info=0, M=0 #These nee to be freed at end cdef int * isuppz = PyDataMem_NEW((2*nrows) * sizeof(int)) cdef complex * work = PyDataMem_NEW(lwork * sizeof(complex)) cdef double * rwork = PyDataMem_NEW((24*nrows) * sizeof(double)) cdef int * iwork = PyDataMem_NEW((10*nrows) * sizeof(int)) cdef complex[:,::1] cZ = PyDataMem_NEW(nrows**2 * sizeof(complex)) cdef complex[::1,:] Z = cZ.T zheevr(&jobz, &rnge, &uplo, &nrows, &H[0,0], &nrows, &vl, &vu, &il, &iu, &abstol, &M, eigvals, &Z[0,0], &nrows, isuppz, work, &lwork, rwork, &lrwork, iwork, &liwork, &info) PyDataMem_FREE(work) PyDataMem_FREE(rwork) PyDataMem_FREE(isuppz) PyDataMem_FREE(iwork) if info != 0: if info < 0: raise Exception("Error in parameter : %s" & abs(info)) else: raise Exception("Algorithm failed to converge") for jj in range(nrows): for ii in range(nrows): if cabs(Z[ii,jj]) < atol: Z[ii,jj] = 0 fdense2D_to_CSR(Z, evecs, nrows, nrows) PyDataMem_FREE(&Z[0,0]) @cython.boundscheck(False) @cython.wraparound(False) cdef CSR_Matrix sparse_to_eigbasis(complex[::1] Adata, int[::1] Aind, int[::1] Aptr, CSR_Matrix * evecs, unsigned int nrows): cdef CSR_Matrix A, B, C, A_eig A.data = &Adata[0] A.indices = &Aind[0] A.indptr = &Aptr[0] A.nnz = A.indptr[nrows] A.nrows = nrows A.ncols = nrows A.max_length = A.nnz A.numpy_lock = 1 A.is_set = 1 _zcsr_mult(&A, evecs, &B) _zcsr_adjoint(evecs, &C) _zcsr_mult(&C, &B, &A_eig) sort_indices(&A_eig) free_CSR(&B) free_CSR(&C) return A_eig qutip-4.4.1/qutip/cy/brtools_checks.pyx000066400000000000000000000131151352460343600202140ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as np cimport cython import qutip.settings as qset from qutip.cy.brtools cimport (ZHEEVR, diag_liou_mult, dense_to_eigbasis, vec_to_eigbasis, vec_to_fockbasis, cop_super_mult, br_term_mult, skew_and_dwmin) include "sparse_routines.pxi" @cython.boundscheck(False) def _test_zheevr(complex[::1,:] H, double[::1] evals): cdef np.ndarray[complex, ndim=2, mode='fortran'] Z = np.zeros((H.shape[0],H.shape[0]),dtype=complex, order='f') ZHEEVR(H, &evals[0], Z, H.shape[0]) return Z @cython.boundscheck(False) def _test_diag_liou_mult(double[::1] evals, complex[::1] vec, complex[::1] out, int nrows): diag_liou_mult(&evals[0], &vec[0], &out[0], nrows) @cython.boundscheck(False) def _test_dense_to_eigbasis(complex[::1,:] A, complex[::1,:] evecs, unsigned int nrows, double atol): cdef complex[::1,:] out = dense_to_eigbasis(A, evecs, nrows, atol) cdef np.ndarray[complex, ndim=2] mat cdef np.npy_intp dims[2] dims[:] = [A.shape[0], A.shape[1]] #We cannot build simple in fortran-order, so build c-order and return transpose mat = np.PyArray_SimpleNewFromData(2, dims, np.NPY_COMPLEX128, &out[0,0]) PyArray_ENABLEFLAGS(mat, np.NPY_OWNDATA) return mat.T @cython.boundscheck(False) def _test_vec_to_eigbasis(complex[::1,:] H, complex[::1] vec): cdef np.ndarray[complex, ndim=2, mode='fortran'] Z = np.zeros((H.shape[0],H.shape[0]), dtype=complex, order='f') cdef double[::1] evals = np.zeros(H.shape[0],dtype=float) ZHEEVR(H, &evals[0], Z, H.shape[0]) cdef double complex * eig_vec = vec_to_eigbasis(vec,Z, H.shape[0]) cdef np.npy_intp dim = H.shape[0]**2 cdef np.ndarray[complex, ndim=1, mode='c'] out out = np.PyArray_SimpleNewFromData(1, &dim, np.NPY_COMPLEX128, eig_vec) PyArray_ENABLEFLAGS(out, np.NPY_OWNDATA) return out @cython.boundscheck(False) def _test_eigvec_to_fockbasis(complex[::1] eig_vec, complex[::1,:] evecs, int nrows): cdef np.ndarray[complex, ndim=1, mode='c'] out out = vec_to_fockbasis(&eig_vec[0], evecs, nrows) return out @cython.boundscheck(False) def _test_vector_roundtrip(complex[::1,:] H, complex[::1] vec): cdef np.ndarray[complex, ndim=2, mode='fortran'] Z = np.zeros((H.shape[0],H.shape[0]), dtype=complex, order='f') cdef double[::1] evals = np.zeros(H.shape[0],dtype=float) ZHEEVR(H, &evals[0], Z, H.shape[0]) cdef double complex * eig_vec = vec_to_eigbasis(vec, Z, H.shape[0]) cdef np.ndarray[complex, ndim=1, mode='c'] out out = vec_to_fockbasis(eig_vec, Z, H.shape[0]) PyDataMem_FREE(eig_vec) return out @cython.boundscheck(False) def _cop_super_mult(complex[::1,:] cop, complex[::1,:] evecs, complex[::1] vec, double complex alpha, complex[::1] out, unsigned int nrows, double atol): cop_super_mult(cop, evecs, &vec[0], alpha, &out[0], nrows, atol) #Test spectral function cdef complex spectral(double w, double t): return 1.0 def _test_br_term_mult(double t, complex[::1,:] A, complex[::1, :] evecs, double[::1] evals, complex[::1] vec, complex[::1] out, int use_secular, double sec_cutoff, double atol): cdef unsigned int nrows = A.shape[0] cdef double * _temp = PyDataMem_NEW((nrows**2) * sizeof(double)) cdef double[:,::1] skew = _temp cdef double dw_min = skew_and_dwmin(&evals[0], skew, nrows) br_term_mult(t, A, evecs, skew, dw_min, spectral, &vec[0], &out[0], nrows, use_secular, sec_cutoff, atol) PyDataMem_FREE(&skew[0,0]) qutip-4.4.1/qutip/cy/checks.pyx000066400000000000000000000073121352460343600164520ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np import scipy.sparse as sp from qutip.fastsparse import fast_csr_matrix cimport numpy as cnp cimport cython include "sparse_routines.pxi" def _test_coo2csr_struct(object A): cdef COO_Matrix mat = COO_from_scipy(A) cdef CSR_Matrix out COO_to_CSR(&out, &mat) return CSR_to_scipy(&out) def _test_sorting(object A): cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef int nrows = A.shape[0] cdef int ncols = A.shape[1] cdef CSR_Matrix out out.data = &data[0] out.indices = &ind[0] out.indptr = &ptr[0] out.nrows = nrows out.ncols = ncols out.is_set = 1 out.numpy_lock = 0 sort_indices(&out) def _test_coo2csr_inplace_struct(object A, int sorted = 0): cdef complex[::1] data = A.data cdef int[::1] rows = A.row cdef int[::1] cols = A.col cdef int nrows = A.shape[0] cdef int ncols = A.shape[1] cdef int nnz = data.shape[0] cdef size_t kk #We need to make copies here to test the inplace conversion #as we cannot use numpy data due to ownership issues. cdef complex * _data = PyDataMem_NEW(nnz * sizeof(complex)) cdef int * _rows = PyDataMem_NEW(nnz * sizeof(int)) cdef int * _cols = PyDataMem_NEW(nnz * sizeof(int)) for kk in range(nnz): _data[kk] = data[kk] _rows[kk] = rows[kk] _cols[kk] = cols[kk] cdef COO_Matrix mat mat.data = _data mat.rows = _rows mat.cols = _cols mat.nrows = nrows mat.ncols = ncols mat.nnz = nnz mat.max_length = mat.nnz mat.is_set = 1 mat.numpy_lock = 0 cdef CSR_Matrix out COO_to_CSR_inplace(&out, &mat) if sorted: sort_indices(&out) return CSR_to_scipy(&out) def _test_csr2coo_struct(object A): cdef CSR_Matrix mat = CSR_from_scipy(A) cdef COO_Matrix out CSR_to_COO(&out, &mat) return COO_to_scipy(&out) qutip-4.4.1/qutip/cy/codegen.py000066400000000000000000000423351352460343600164320ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import os import numpy as np from qutip.interpolate import Cubic_Spline _cython_path = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/") _include_string = "'"+_cython_path+"/complex_math.pxi'" __all__ = ['Codegen'] class Codegen(): """ Class for generating cython code files at runtime. """ def __init__(self, h_terms=None, h_tdterms=None, h_td_inds=None, args=None, c_terms=None, c_tdterms=[], c_td_inds=None, c_td_splines=[], c_td_spline_flags=[], type='me', config=None, use_openmp=False, omp_components=None, omp_threads=None): import sys import os sys.path.append(os.getcwd()) # Hamiltonian time-depdendent pieces self.type = type if isinstance(h_terms, int): h_terms = range(h_terms) self.h_terms = h_terms # number of H pieces self.h_tdterms = h_tdterms # list of time-dependent strings self.h_td_inds = h_td_inds # indicies of time-dependnt terms self.args = args # args for strings # Collapse operator time-depdendent pieces self.c_terms = c_terms # number of C pieces self.c_tdterms = c_tdterms # list of time-dependent strings self.c_td_inds = c_td_inds # indicies of time-dependent terms self.c_td_splines = c_td_splines #List of c_op spline arrays self.c_td_spline_flags = c_td_spline_flags #flags for oper or super # Code generator properties self.code = [] # strings to be written to file self.level = 0 # indent level self.config = config #openmp settings self.use_openmp = use_openmp self.omp_components = omp_components self.omp_threads = omp_threads def write(self, string): """write lines of code to self.code""" self.code.append(" " * self.level + string + "\n") def file(self, filename): """open file called filename for writing""" self.file = open(filename, "w") def generate(self, filename="rhs.pyx"): """generate the file""" for line in cython_preamble(self.use_openmp): self.write(line) # write function for Hamiltonian terms (there is always at least one # term) for line in cython_checks() + self.ODE_func_header(): self.write(line) self.indent() for line in self.func_vars(): self.write(line) self.write(self.func_end()) self.dedent() # generate collapse operator functions if any c_terms if any(self.c_tdterms): for line in (cython_checks() + self.col_spmv_header() + cython_col_spmv()): self.write(line) self.indent() for line in self.func_which(): self.write(line) self.write(self.func_end()) self.dedent() for line in (cython_checks() + self.col_expect_header() + cython_col_expect(self.args)): self.write(line) self.indent() for line in self.func_which_expect(): self.write(line) self.write(self.func_end_real()) self.dedent() self.file(filename) self.file.writelines(self.code) self.file.close() self.config.cgen_num += 1 def indent(self): """increase indention level by one""" self.level += 1 def dedent(self): """decrease indention level by one""" if self.level == 0: raise SyntaxError("Error in code generator") self.level -= 1 def _get_arg_str(self, args): if len(args) == 0: return '' ret = '' for name, value in self.args.items(): if isinstance(value, np.ndarray): ret += ",\n np.ndarray[np.%s_t, ndim=1] %s" % \ (value.dtype.name, name) else: if isinstance(value, (int, np.int32, np.int64)): kind = 'int' elif isinstance(value, (float, np.float32, np.float64)): kind = 'float' elif isinstance(value, (complex, np.complex128)): kind = 'complex' #kind = type(value).__name__ ret += ",\n " + kind + " " + name return ret def ODE_func_header(self): """Creates function header for time-dependent ODE RHS.""" func_name = "def cy_td_ode_rhs(" # strings for time and vector variables input_vars = ("\n double t" + ",\n complex[::1] vec") for k in self.h_terms: input_vars += (",\n " + "complex[::1] data%d," % k + "int[::1] idx%d," % k + "int[::1] ptr%d" % k) kk = len(self.h_tdterms) for jj in range(len(self.c_td_splines)): input_vars += (",\n " + "complex[::1] data%d," % (jj+kk) + "int[::1] idx%d," % (jj+kk) + "int[::1] ptr%d" % (jj+kk)) if any(self.c_tdterms): for k in range(len(self.h_terms), len(self.h_terms) + len(self.c_tdterms)): input_vars += (",\n " + "complex[::1] data%d," % k + "int[::1] idx%d," % k + "int[::1] ptr%d" % k) #Add array for each Cubic_Spline term spline = 0 for htd in (self.h_tdterms+self.c_td_splines): if isinstance(htd, Cubic_Spline): if not htd.is_complex: input_vars += (",\n " + "double[::1] spline%d" % spline) else: input_vars += (",\n " + "complex[::1] spline%d" % spline) spline += 1 input_vars += self._get_arg_str(self.args) func_end = "):" return [func_name + input_vars + func_end] def col_spmv_header(self): """ Creates function header for time-dependent collapse operator terms. """ func_name = "def col_spmv(" input_vars = ("int which, double t, complex[::1] " + "data, int[::1] idx, int[::1] " + "ptr, complex[::1] vec") input_vars += self._get_arg_str(self.args) func_end = "):" return [func_name + input_vars + func_end] def col_expect_header(self): """ Creates function header for time-dependent collapse expectation values. """ func_name = "def col_expect(" input_vars = ("int which, double t, complex[::1] " + "data, int[::1] idx, int[::1] " + "ptr, complex[::1] vec") input_vars += self._get_arg_str(self.args) func_end = "):" return [func_name + input_vars + func_end] def func_vars(self): """Writes the variables and their types & spmv parts""" func_vars = ["", 'cdef size_t row', 'cdef unsigned int num_rows = vec.shape[0]', "cdef double complex * " + 'out = PyDataMem_NEW_ZEROED(num_rows,sizeof(complex))'] func_vars.append(" ") tdterms = self.h_tdterms hinds = 0 spline = 0 for ht in self.h_terms: hstr = str(ht) # Monte-carlo evolution if self.type == 'mc': if ht in self.h_td_inds: if isinstance(tdterms[hinds], str): td_str= tdterms[hinds] elif isinstance(tdterms[hinds], Cubic_Spline): S = tdterms[hinds] if not S.is_complex: td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, spline) else: td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, spline) spline += 1 hinds += 1 else: td_str = "1.0" str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, &out[0], num_rows)" % ( ht, ht, ht, td_str) func_vars.append(str_out) # Master and Schrodinger evolution else: if self.h_tdterms[ht] == "1.0": if self.use_openmp and self.omp_components[ht]: str_out = "spmvpy_openmp(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], 1.0, out, num_rows, %s)" % ( ht, ht, ht, self.omp_threads) else: str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], 1.0, out, num_rows)" % ( ht, ht, ht) else: if isinstance(self.h_tdterms[ht], str): if self.use_openmp and self.omp_components[ht]: str_out = "spmvpy_openmp(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows, %s)" % ( ht, ht, ht, self.h_tdterms[ht], self.omp_threads) else: str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows)" % ( ht, ht, ht, self.h_tdterms[ht]) elif isinstance(self.h_tdterms[ht], Cubic_Spline): S = self.h_tdterms[ht] if not S.is_complex: interp_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, spline) else: interp_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, spline) spline += 1 if self.use_openmp and self.omp_components[ht]: str_out = "spmvpy_openmp(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows, %s)" % ( ht, ht, ht, interp_str, self.omp_threads) else: str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows)" % ( ht, ht, ht, interp_str) #Do nothing if not a specified type else: str_out= '' func_vars.append(str_out) cstr = 0 if len(self.c_tdterms) > 0: # add a spacer line between Hamiltonian components and collapse # components. func_vars.append(" ") terms = len(self.c_tdterms) tdterms = self.c_tdterms cinds = 0 for ct in range(terms): cstr = str(ct + hinds + 1) str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows)" % ( cstr, cstr, cstr, " (" + tdterms[ct] + ")**2") cinds += 1 func_vars.append(str_out) #Collapse operators have cubic spline td-coeffs if len(self.c_td_splines) > 0: func_vars.append(" ") for ct in range(len(self.c_td_splines)): S = self.c_td_splines[ct] c_idx = self.c_td_spline_flags[ct] if not S.is_complex: interp_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, spline) else: interp_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, spline) spline += 1 #check if need to wrap string with ()**2 if c_idx > 0: interp_str = "("+interp_str+")**2" c_idx = abs(c_idx) if self.use_openmp and self.omp_components[ht]: str_out = "spmvpy_openmp(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows, %s)" % ( c_idx, c_idx, c_idx, interp_str, self.omp_threads) else: str_out = "spmvpy(&data%s[0], &idx%s[0], &ptr%s[0], &vec[0], %s, out, num_rows)" % ( c_idx, c_idx, c_idx, interp_str) func_vars.append(str_out) return func_vars def func_which(self): """Writes 'else-if' statements forcollapse operator eval function""" out_string = [] ind = 0 out_string.append("cdef size_t kk") out_string.append("cdef complex ctd = %s" % self.c_tdterms[ind]) for k in self.c_td_inds: out_string.append("if which == " + str(k) + ":") out_string.append("""\ for kk in range(num_rows): out[kk] *= ctd """) ind += 1 return out_string def func_which_expect(self): """Writes 'else-if' statements for collapse expect function """ out_string = [] ind = 0 for k in self.c_td_inds: out_string.append("if which == " + str(k) + ":") out_string.append(" out *= conj(" + self.c_tdterms[ind] + ")") ind += 1 return out_string def func_end(self): return """\ cdef np.npy_intp dims = num_rows cdef np.ndarray[complex, ndim=1, mode='c'] arr_out = np.PyArray_SimpleNewFromData(1, &dims, np.NPY_COMPLEX128, out) PyArray_ENABLEFLAGS(arr_out, np.NPY_OWNDATA) return arr_out """ def func_end_real(self): return "return real(out)" def cython_preamble(use_openmp=False): """ Returns list of code segments for Cython preamble. """ if use_openmp: openmp_string='from qutip.cy.openmp.parfuncs cimport spmvpy_openmp' else: openmp_string='' return ["""#!python #cython: language_level=3 # This file is generated automatically by QuTiP. # (C) 2011 and later, QuSTaR import numpy as np cimport numpy as np cimport cython np.import_array() cdef extern from "numpy/arrayobject.h" nogil: void PyDataMem_NEW_ZEROED(size_t size, size_t elsize) void PyArray_ENABLEFLAGS(np.ndarray arr, int flags) """ +openmp_string+ """ from qutip.cy.spmatfuncs cimport spmvpy from qutip.cy.interpolate cimport interp, zinterp from qutip.cy.math cimport erf, zerf cdef double pi = 3.14159265358979323 include """+_include_string+""" """] def cython_checks(): """ List of strings that turn off Cython checks. """ return [""" @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False)"""] def cython_col_spmv(): """ Writes col_SPMV vars. """ return ["""\ cdef size_t row cdef unsigned int jj, row_start, row_end cdef unsigned int num_rows = vec.shape[0] cdef complex dot cdef complex * out = PyDataMem_NEW_ZEROED(num_rows,sizeof(complex)) for row in range(num_rows): dot = 0.0 row_start = ptr[row] row_end = ptr[row+1] for jj in range(row_start,row_end): dot = dot + data[jj] * vec[idx[jj]] out[row] = dot """] def cython_col_expect(args): """ Writes col_expect vars. """ return ["""\ cdef size_t row cdef int num_rows = vec.shape[0] cdef complex out = 0.0 cdef np.ndarray[complex, ndim=1, mode='c'] dot = col_spmv(which, t, data, idx, ptr, vec%s) for row in range(num_rows): out += conj(vec[row]) * dot[row] """ % "".join(["," + str(td_const[0]) for td_const in args.items()]) if args else ""] qutip-4.4.1/qutip/cy/complex_math.pxi000066400000000000000000000017671352460343600176620ustar00rootroot00000000000000cdef extern from "" namespace "std" nogil: double abs(double complex x) double complex acos(double complex x) double complex acosh(double complex x) double arg(double complex x) double complex asin(double complex x) double complex asinh(double complex x) double complex atan(double complex x) double complex atanh(double complex x) double complex conj(double complex x) double complex cos(double complex x) double complex cosh(double complex x) double complex exp(double complex x) double imag(double complex x) double complex log(double complex x) double complex log10(double complex x) double norm(double complex x) double complex proj(double complex x) double real(double complex x) double complex sin(double complex x) double complex sinh(double complex x) double complex sqrt(double complex x) double complex tan(double complex x) double complex tanh(double complex x) qutip-4.4.1/qutip/cy/cqobjevo.pxd000066400000000000000000000077021352460343600170000ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### from qutip.cy.sparse_structs cimport CSR_Matrix, COO_Matrix from qutip.cy.cqobjevo_factor cimport CoeffFunc cdef class CQobjEvo: cdef int shape0, shape1 cdef object dims cdef int super cdef int num_ops cdef int dyn_args #cdef void (*factor_ptr)(double, complex*) cdef object factor_func cdef CoeffFunc factor_cobj cdef int factor_use_cobj # prepared buffer cdef complex[::1] coeff cdef complex* coeff_ptr cdef void _factor(self, double t) cdef void _factor_dyn(self, double t, complex* state, int[::1] state) cdef void _mul_vec(self, double t, complex* vec, complex* out) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncols) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncols) cpdef complex expect(self, double t, complex[::1] vec) cdef complex _expect(self, double t, complex* vec) cdef complex _expect_super(self, double t, complex* rho) cdef complex _overlapse(self, double t, complex* oper) cdef class CQobjCte(CQobjEvo): cdef int total_elem # pointer to data cdef CSR_Matrix cte cdef class CQobjCteDense(CQobjEvo): # pointer to data cdef complex[:, ::1] cte cdef class CQobjEvoTd(CQobjEvo): cdef long total_elem # pointer to data cdef CSR_Matrix cte cdef CSR_Matrix ** ops cdef long[::1] sum_elem cdef void _call_core(self, CSR_Matrix * out, complex* coeff) cdef class CQobjEvoTdDense(CQobjEvo): # data as array cdef complex[:, ::1] cte cdef complex[:, :, ::1] ops # prepared buffer cdef complex[:, ::1] data_t cdef complex* data_ptr cdef void _factor(self, double t) cdef void _call_core(self, complex[:,::1] out, complex* coeff) cdef class CQobjEvoTdMatched(CQobjEvo): cdef int nnz # data as array cdef int[::1] indptr cdef int[::1] indices cdef complex[::1] cte cdef complex[:, ::1] ops # prepared buffer cdef complex[::1] data_t cdef complex* data_ptr cdef void _factor(self, double t) cdef void _call_core(self, complex[::1] out, complex* coeff) qutip-4.4.1/qutip/cy/cqobjevo.pyx000066400000000000000000001236101352460343600170220ustar00rootroot00000000000000#!python #cython: language_level=3 # distutils: language = c++ # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ Contain the cython interface of QobjEvo. The parent class "CQobjEvo" set the interface. CQobjCte: QobjEvo that does not depend on times. sparse matrix CQobjCteDense: QobjEvo that does not depend on times. dense matrix - Hidden feature in the sense that it's not really documented and need to be explicitly used. Does not seems to results in significant speedup. CQobjEvoTd: QobjEvo that does depend on times. sparse matrix CQobjEvoTdDense: QobjEvo that does depend on times. dense matrix - Hidden feature in the sense that it's not really documented and need to be explicitly used. Does not seems to results in significant speedup. CQobjEvoTdMatched: QobjEvo that does depend on times. sparse matrix with 0s - Use sparce matrices that all have the same "filling". Therefore addition of such matrices become a vector addition. - Hidden feature/ experimental. It reasult in a speedup in some rare cases. In omp/cqobjevo_omp: Variantes which use parallel mat*vec and mat*mat product - CQobjCteOmp - CQobjEvoTdOmp - CQobjEvoTdMatchedOmp """ import numpy as np import scipy.sparse as sp cimport numpy as np import cython cimport cython from qutip.qobj import Qobj from qutip.cy.spmath cimport _zcsr_add_core from qutip.cy.spmatfuncs cimport spmvpy, _spmm_c_py, _spmm_f_py from qutip.cy.spmath import zcsr_add from qutip.cy.cqobjevo_factor cimport CoeffFunc, zptr2array1d cimport libc.math include "complex_math.pxi" include "sparse_routines.pxi" cdef extern from "Python.h": object PyLong_FromVoidPtr(void *) void* PyLong_AsVoidPtr(object) cdef extern from "numpy/arrayobject.h" nogil: void PyArray_ENABLEFLAGS(np.ndarray arr, int flags) void PyDataMem_FREE(void * ptr) void PyDataMem_RENEW(void * ptr, size_t size) void PyDataMem_NEW_ZEROED(size_t size, size_t elsize) void PyDataMem_NEW(size_t size) def _zcsr_match(sparses_list): """ For a list of csr sparse matrice A, set them so the their indptr and indices be all equal. Require keeping 0s in the data, but summation can be done in vector form. """ full_shape = sparses_list[0].copy() for sparse_elem in sparses_list[1:]: full_shape.data *= 0. full_shape.data += 1. if sparse_elem.indptr[-1] != 0: full_shape = zcsr_add( full_shape.data, full_shape.indices, full_shape.indptr, sparse_elem.data, sparse_elem.indices, sparse_elem.indptr, full_shape.shape[0], full_shape.shape[1], full_shape.indptr[-1], sparse_elem.indptr[-1], 0.) out = [] for sparse_elem in sparses_list[:]: full_shape.data *= 0. if sparse_elem.indptr[-1] != 0: out.append(zcsr_add( full_shape.data, full_shape.indices, full_shape.indptr, sparse_elem.data, sparse_elem.indices, sparse_elem.indptr, full_shape.shape[0], full_shape.shape[1], full_shape.indptr[-1], sparse_elem.indptr[-1], 1.)) else: out.append(full_shape.copy()) return out @cython.boundscheck(False) @cython.wraparound(False) cdef _shallow_get_state(CSR_Matrix* mat): """ Converts a CSR sparse matrix to a tuples for pickling. No deep copy of the data, pointer are passed. """ long_data = PyLong_FromVoidPtr(&mat.data[0]) long_indices = PyLong_FromVoidPtr(&mat.indices[0]) long_indptr = PyLong_FromVoidPtr(&mat.indptr[0]) return (long_data, long_indices, long_indptr, mat.nrows, mat.ncols, mat.nnz, mat.max_length, mat.is_set, mat.numpy_lock) @cython.boundscheck(False) @cython.wraparound(False) cdef _shallow_set_state(CSR_Matrix* mat, state): """ Converts a CSR sparse matrix to a tuples for pickling. No deep copy of the data, pointer are passed. """ mat.data = PyLong_AsVoidPtr(state[0]) mat.indices = PyLong_AsVoidPtr(state[1]) mat.indptr = PyLong_AsVoidPtr(state[2]) mat.nrows = state[3] mat.ncols = state[4] mat.nnz = state[5] mat.max_length = state[6] mat.is_set = state[7] mat.numpy_lock = state[8] cdef class CQobjEvo: """ Interface for the CQobjEvo's variantes Python Methods -------------- mul_vec(double t, complex[::1] vec) return self @ vec mul_mat(double t, np.ndarray[complex, ndim=2] mat) return self @ mat mat can be both "C" or "F" continuous. expect(double t, complex[::1] vec) return expectation value, knows to use the super version or not. ode_mul_mat_f_vec(double t, complex[::1] mat) return self @ mat mat is in a 1d, F ordered form. Used with scipy solver which only accept vector. call(double t, int data=0) return this at time t call_with_coeff(complex[::1] coeff, int data=0) return this with the given coefficients set_data(cte, [ops]) build the object from data from QobjEvo set_factor(self, func=None, ptr=False, obj=None) get the coefficient function from QobjEvo Cython Methods -------------- _mul_vec(double t, complex* vec, complex* out): out += self * vec _mul_matf(double t, complex* mat, complex* out, int nrow, int ncols): out += self * dense mat fortran ordered _mul_matc(double t, complex* mat, complex* out, int nrow, int ncols): out += self * dense mat c ordered _expect(double t, complex* vec): return _expect_super(double t, complex* rho): return tr( self * rho ) """ cdef void _mul_vec(self, double t, complex* vec, complex* out): """self * vec""" pass cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncols): """self * dense mat fortran ordered """ pass cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncols): """self * dense mat c ordered""" pass cdef complex _expect(self, double t, complex* vec): """""" return 0. cdef complex _expect_super(self, double t, complex* rho): """tr( self_L * rho * self_R )""" return 0. cdef complex _overlapse(self, double t, complex* oper): """tr( self * oper )""" return 0. def mul_vec(self, double t, complex[::1] vec): cdef np.ndarray[complex, ndim=1] out = np.zeros(self.shape0, dtype=complex) self._mul_vec(t, &vec[0], &out[0]) return out def mul_mat(self, double t, np.ndarray[complex, ndim=2] mat): cdef np.ndarray[complex, ndim=2] out cdef unsigned int sp_rows = self.shape0 cdef unsigned int nrows = mat.shape[0] cdef unsigned int ncols = mat.shape[1] if mat.flags["F_CONTIGUOUS"]: out = np.zeros((sp_rows,ncols), dtype=complex, order="F") self._mul_matf(t, &mat[0,0], &out[0,0], nrows, ncols) else: out = np.zeros((sp_rows,ncols), dtype=complex) self._mul_matc(t, &mat[0,0], &out[0,0], nrows, ncols) return out cpdef complex expect(self, double t, complex[::1] vec): if self.super: return self._expect_super(t, &vec[0]) else: return self._expect(t, &vec[0]) def overlapse(self, double t, complex[::1] oper): """ Compute the overlapse of operator as tr(this @ oper) """ cdef complex* vec = &oper[0] return self._overlapse(t, vec) def ode_mul_mat_f_vec(self, double t, complex[::1] mat): cdef np.ndarray[complex, ndim=1] out = np.zeros(self.shape1*self.shape1, dtype=complex) self._mul_matf(t, &mat[0], &out[0], self.shape1, self.shape1) return out def call(self, double t, int data=0): return None def call_with_coeff(self, complex[::1] coeff, int data=0): return None def has_dyn_args(self, int dyn_args): self.dyn_args = dyn_args cdef void _factor(self, double t): cdef int i if self.factor_use_cobj: self.factor_cobj._call_core(t, self.coeff_ptr) else: coeff = self.factor_func(t) for i in range(self.num_ops): self.coeff_ptr[i] = coeff[i] cdef void _factor_dyn(self, double t, complex* state, int[::1] shape): cdef int len_ if self.dyn_args: if self.factor_use_cobj: # print("factor_use_cobj") self.factor_cobj._dyn_args(t, state, shape) else: len_ = shape[0] * shape[1] # print(len_, shape.shape[0]) self.factor_func.dyn_args(t, np.array( state), np.array(shape)) self._factor(t) def set_data(self, cte): pass def __getstate__(self): return None def __setstate__(self, state): pass cdef class CQobjCte(CQobjEvo): def set_data(self, cte): self.shape0 = cte.shape[0] self.shape1 = cte.shape[1] self.dims = cte.dims self.cte = CSR_from_scipy(cte.data) self.total_elem = cte.data.data.shape[0] self.super = cte.issuper def __getstate__(self): CSR_info = _shallow_get_state(&self.cte) return (self.shape0, self.shape1, self.dims, self.total_elem, self.super, CSR_info) def __setstate__(self, state): self.shape0 = state[0] self.shape1 = state[1] self.dims = state[2] self.total_elem = state[3] self.super = state[4] _shallow_set_state(&self.cte, state[5]) def call(self, double t, int data=0): cdef CSR_Matrix out out.is_set = 0 copy_CSR(&out, &self.cte) scipy_obj = CSR_to_scipy(&out) # free_CSR(&out)? data is own by the scipy_obj? if data: return scipy_obj else: return Qobj(scipy_obj, dims=self.dims) def call_with_coeff(self, complex[::1] coeff, int data=0): cdef CSR_Matrix out out.is_set = 0 copy_CSR(&out, &self.cte) scipy_obj = CSR_to_scipy(&out) # free_CSR(&out)? data is own by the scipy_obj? if data: return scipy_obj else: return Qobj(scipy_obj) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_vec(self, double t, complex* vec, complex* out): spmvpy(self.cte.data, self.cte.indices, self.cte.indptr, vec, 1., out, self.shape0) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncol): _spmm_f_py(self.cte.data, self.cte.indices, self.cte.indptr, mat, 1., out, self.shape0, nrow, ncol) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncol): _spmm_c_py(self.cte.data, self.cte.indices, self.cte.indptr, mat, 1., out, self.shape0, nrow, ncol) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect(self, double t, complex* vec): cdef complex[::1] y = np.zeros(self.shape0, dtype=complex) spmvpy(self.cte.data, self.cte.indices, self.cte.indptr, vec, 1., &y[0], self.shape0) cdef int row cdef complex dot = 0 for row from 0 <= row < self.shape0: dot += conj(vec[row])*y[row] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect_super(self, double t, complex* vec): cdef int row cdef int jj, row_start, row_end cdef int num_rows = self.shape0 cdef int n = libc.math.sqrt(num_rows) cdef complex dot = 0.0 for row from 0 <= row < num_rows by n+1: row_start = self.cte.indptr[row] row_end = self.cte.indptr[row+1] for jj from row_start <= jj < row_end: dot += self.cte.data[jj]*vec[self.cte.indices[jj]] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _overlapse(self, double t, complex* oper): """tr( self * oper )""" cdef int row cdef int jj, row_start, row_end cdef int num_rows = self.shape0 cdef complex tr = 0.0 for row in range(num_rows): row_start = self.cte.indptr[row] row_end = self.cte.indptr[row+1] for jj from row_start <= jj < row_end: tr += self.cte.data[jj]*oper[num_rows*jj + row] return tr cdef class CQobjCteDense(CQobjEvo): def set_data(self, cte): self.shape0 = cte.shape[0] self.shape1 = cte.shape[1] self.dims = cte.dims self.cte = cte.data.toarray() self.super = cte.issuper def __getstate__(self): return (self.shape0, self.shape1, self.dims, self.super, np.array(self.cte)) def __setstate__(self, state): self.shape0 = state[0] self.shape1 = state[1] self.dims = state[2] self.super = state[3] self.cte = state[4] def call(self, double t, int data=0): if data: return sp.csr_matrix(self.cte, dtype=complex, copy=True) else: return Qobj(self.cte, dims = self.dims) def call_with_coeff(self, complex[::1] coeff, int data=0): if data: return sp.csr_matrix(self.cte, dtype=complex, copy=True) else: return Qobj(self.cte, dims = self.dims) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_vec(self, double t, complex* vec, complex* out): cdef int i, j cdef complex* ptr for i in range(self.shape0): ptr = &self.cte[i,0] for j in range(self.shape1): out[i] += ptr[j]*vec[j] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int i, j, k cdef complex* ptr = &self.cte[0,0] for i in range(self.shape0): for j in range(ncol): for k in range(nrow): out[i + j*self.shape0] += ptr[i*nrow + k]*mat[k + j*nrow] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int i, j, k cdef complex* ptr = &self.cte[0,0] for i in range(self.shape0): for j in range(ncol): for k in range(nrow): out[i*ncol + j] += ptr[i*nrow + k]*mat[k*ncol + j] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect(self, double t, complex* vec): cdef int i, j cdef complex dot = 0 for i in range(self.shape0): for j in range(self.shape1): dot += conj(vec[i])*self.cte[i,j]*vec[j] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect_super(self, double t, complex* vec): cdef int row, i cdef int num_rows = self.shape0 cdef int n = libc.math.sqrt(num_rows) cdef complex dot = 0.0 for row from 0 <= row < num_rows by n+1: for i in range(self.shape1): dot += self.cte[row,i]*vec[i] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _overlapse(self, double t, complex* oper): """tr( self * oper )""" cdef int i, j cdef complex tr = 0.0 for i in range(self.shape0): for j in range(self.shape0): tr += self.cte.data[i*self.shape0 + j] * oper[j + i*self.shape0] return tr cdef class CQobjEvoTd(CQobjEvo): def __init__(self): self.num_ops = 0 self.ops = PyDataMem_NEW(0 * sizeof(CSR_Matrix*)) def __del__(self): for i in range(self.num_ops): PyDataMem_FREE(self.ops[i]) PyDataMem_FREE(self.ops) def set_data(self, cte, ops): cdef int i self.shape0 = cte.shape[0] self.shape1 = cte.shape[1] self.dims = cte.dims self.cte = CSR_from_scipy(cte.data) cummulative_op = cte.data self.super = cte.issuper self.num_ops = len(ops) self.coeff = np.empty((self.num_ops,), dtype=complex) self.coeff_ptr = &self.coeff[0] PyDataMem_FREE(self.ops) self.ops = PyDataMem_NEW(self.num_ops * sizeof(CSR_Matrix*)) self.sum_elem = np.zeros(self.num_ops, dtype=int) for i, op in enumerate(ops): self.ops[i] = PyDataMem_NEW(sizeof(CSR_Matrix)) CSR_from_scipy_inplace(op[0].data, self.ops[i]) cummulative_op += op[0].data self.sum_elem[i] = cummulative_op.data.shape[0] self.total_elem = self.sum_elem[self.num_ops-1] def set_factor(self, func=None, ptr=False, obj=None): self.factor_use_cobj = 0 if func is not None: self.factor_func = func elif obj is not None: self.factor_use_cobj = 1 self.factor_cobj = obj else: raise Exception("Could not set coefficient function") def __getstate__(self): cte_info = _shallow_get_state(&self.cte) ops_info = () sum_elem = () for i in range(self.num_ops): ops_info += (_shallow_get_state(self.ops[i]),) sum_elem += (self.sum_elem[i],) return (self.shape0, self.shape1, self.dims, self.total_elem, self.super, self.factor_use_cobj, self.factor_cobj, self.factor_func, self.num_ops, sum_elem, cte_info, ops_info) def __setstate__(self, state): self.shape0 = state[0] self.shape1 = state[1] self.dims = state[2] self.total_elem = state[3] self.super = state[4] self.factor_use_cobj = state[5] if self.factor_use_cobj: self.factor_cobj = state[6] self.factor_func = state[7] self.num_ops = state[8] _shallow_set_state(&self.cte, state[10]) self.sum_elem = np.zeros(self.num_ops, dtype=int) self.ops = PyDataMem_NEW(self.num_ops * sizeof(CSR_Matrix*)) for i in range(self.num_ops): self.ops[i] = PyDataMem_NEW(sizeof(CSR_Matrix)) self.sum_elem[i] = state[9][i] _shallow_set_state(self.ops[i], state[11][i]) self.coeff = np.empty((self.num_ops,), dtype=complex) self.coeff_ptr = &self.coeff[0] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _call_core(self, CSR_Matrix * out, complex* coeff): cdef int i cdef CSR_Matrix previous, next if(self.num_ops ==1): _zcsr_add_core(self.cte.data, self.cte.indices, self.cte.indptr, self.ops[0].data, self.ops[0].indices, self.ops[0].indptr, coeff[0], out, self.shape0, self.shape1) else: # Ugly with a loop for 1 to N-2... # It save the copy of data from cte and out # no init/free to cte, out init_CSR(&next, self.sum_elem[0], self.shape0, self.shape1) _zcsr_add_core(self.cte.data, self.cte.indices, self.cte.indptr, self.ops[0].data, self.ops[0].indices, self.ops[0].indptr, coeff[0], &next, self.shape0, self.shape1) previous, next = next, previous for i in range(1,self.num_ops-1): init_CSR(&next, self.sum_elem[i], self.shape0, self.shape1) _zcsr_add_core(previous.data, previous.indices, previous.indptr, self.ops[i].data, self.ops[i].indices, self.ops[i].indptr, coeff[i], &next, self.shape0, self.shape1) free_CSR(&previous) previous, next = next, previous _zcsr_add_core(previous.data, previous.indices, previous.indptr, self.ops[self.num_ops-1].data, self.ops[self.num_ops-1].indices, self.ops[self.num_ops-1].indptr, coeff[self.num_ops-1], out, self.shape0, self.shape1) free_CSR(&previous) def call(self, double t, int data=0): cdef CSR_Matrix out init_CSR(&out, self.total_elem, self.shape0, self.shape1) self._factor(t) self._call_core(&out, self.coeff_ptr) scipy_obj = CSR_to_scipy(&out) # free_CSR(&out)? data is own by the scipy_obj? if data: return scipy_obj else: return Qobj(scipy_obj, dims=self.dims) def call_with_coeff(self, complex[::1] coeff, int data=0): cdef CSR_Matrix out init_CSR(&out, self.total_elem, self.shape0, self.shape1) self._call_core(&out, &coeff[0]) scipy_obj = CSR_to_scipy(&out) # free_CSR(&out)? data is own by the scipy_obj? if data: return scipy_obj else: return Qobj(scipy_obj) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_vec(self, double t, complex* vec, complex* out): cdef int[2] shape shape[0] = self.shape1 shape[1] = 1 self._factor_dyn(t, vec, shape) cdef int i spmvpy(self.cte.data, self.cte.indices, self.cte.indptr, vec, 1., out, self.shape0) for i in range(self.num_ops): spmvpy(self.ops[i].data, self.ops[i].indices, self.ops[i].indptr, vec, self.coeff_ptr[i], out, self.shape0) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) cdef int i _spmm_f_py(self.cte.data, self.cte.indices, self.cte.indptr, mat, 1., out, self.shape0, nrow, ncol) for i in range(self.num_ops): _spmm_f_py(self.ops[i].data, self.ops[i].indices, self.ops[i].indptr, mat, self.coeff_ptr[i], out, self.shape0, nrow, ncol) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) cdef int i _spmm_c_py(self.cte.data, self.cte.indices, self.cte.indptr, mat, 1., out, self.shape0, nrow, ncol) for i in range(self.num_ops): _spmm_c_py(self.ops[i].data, self.ops[i].indices, self.ops[i].indptr, mat, self.coeff_ptr[i], out, self.shape0, nrow, ncol) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect(self, double t, complex* vec): cdef complex [::1] y = np.zeros(self.shape0, dtype=complex) cdef int row cdef complex dot = 0 self._mul_vec(t, &vec[0], &y[0]) for row from 0 <= row < self.shape0: dot += conj(vec[row]) * y[row] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect_super(self, double t, complex* vec): cdef int[2] shape cdef int row, i cdef int jj, row_start, row_end cdef int num_rows = self.shape0 cdef int n = libc.math.sqrt(num_rows) cdef complex dot = 0.0 shape[0] = n shape[1] = n self._factor_dyn(t, vec, shape) for row from 0 <= row < num_rows by n+1: row_start = self.cte.indptr[row] row_end = self.cte.indptr[row+1] for jj from row_start <= jj < row_end: dot += self.cte.data[jj]*vec[self.cte.indices[jj]] for i in range(self.num_ops): for row from 0 <= row < num_rows by n+1: row_start = self.ops[i].indptr[row] row_end = self.ops[i].indptr[row+1] for jj from row_start <= jj < row_end: dot += self.ops[i].data[jj] * \ vec[self.ops[i].indices[jj]] * self.coeff_ptr[i] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _overlapse(self, double t, complex* oper): """tr( self * oper )""" cdef int jj, row_start, row_end, row cdef int num_rows = self.shape0 cdef complex tr = 0.0 cdef int[2] shape shape[0] = self.shape0 shape[1] = self.shape0 self._factor_dyn(t, oper, shape) for row in range(num_rows): row_start = self.cte.indptr[row] row_end = self.cte.indptr[row+1] for jj from row_start <= jj < row_end: tr += self.cte.data[jj] * oper[num_rows*jj + row] for i in range(self.num_ops): for row in range(num_rows): row_start = self.ops[i].indptr[row] row_end = self.ops[i].indptr[row+1] for jj from row_start <= jj < row_end: tr += self.ops[i].data[jj] * oper[num_rows*jj + row] * self.coeff_ptr[i] return tr cdef class CQobjEvoTdDense(CQobjEvo): def set_data(self, cte, ops): cdef int i, j, k self.shape0 = cte.shape[0] self.shape1 = cte.shape[1] self.dims = cte.dims self.super = cte.issuper self.num_ops = len(ops) self.cte = cte.data.toarray() self.ops = np.zeros((self.num_ops, self.shape0, self.shape1), dtype=complex) self.data_t = np.empty((self.shape0, self.shape1), dtype=complex) self.data_ptr = &self.data_t[0,0] self.coeff = np.empty((self.num_ops,), dtype=complex) self.coeff_ptr = &self.coeff[0] for i, op in enumerate(ops): oparray = op[0].data.toarray() for j in range(self.shape0): for k in range(self.shape1): self.ops[i,j,k] = oparray[j,k] def set_factor(self, func=None, ptr=False, obj=None): self.factor_use_cobj = 0 if func is not None: self.factor_func = func elif obj is not None: self.factor_use_cobj = 1 self.factor_cobj = obj else: raise Exception("Could not set coefficient function") def __getstate__(self): return (self.shape0, self.shape1, self.dims, self.super, self.factor_use_cobj, self.factor_cobj, self.factor_func, self.num_ops, np.array(self.cte), np.array(self.ops)) def __setstate__(self, state): self.shape0 = state[0] self.shape1 = state[1] self.dims = state[2] self.super = state[3] self.factor_use_cobj = state[4] if self.factor_use_cobj: self.factor_cobj = state[5] self.factor_func = state[6] self.num_ops = state[7] self.cte = state[8] self.ops = state[9] self.data_t = np.empty((self.shape0, self.shape1), dtype=complex) self.data_ptr = &self.data_t[0,0] self.coeff = np.empty((self.num_ops,), dtype=complex) self.coeff_ptr = &self.coeff[0] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _call_core(self, complex[:,::1] out, complex* coeff): cdef int i, j cdef complex* ptr cdef complex* out_ptr #copy(self.cte, out) ptr = &self.cte[0,0] out_ptr = &out[0,0] for i in range(self.shape0 * self.shape0): out_ptr[i] = ptr[i] for i in range(self.num_ops): ptr = &self.ops[i,0,0] for j in range(self.shape0 * self.shape0): out_ptr[j] += ptr[j]*coeff[i] def call(self, double t, int data=0): cdef np.ndarray[complex, ndim=2] data_t = \ np.empty((self.shape0, self.shape1), dtype=complex) self._factor(t) self._call_core(data_t, self.coeff_ptr) if data: return sp.csr_matrix(data_t, dtype=complex, copy=True) else: return Qobj(data_t, dims = self.dims) def call_with_coeff(self, complex[::1] coeff, int data=0): cdef np.ndarray[complex, ndim=2] data_t = \ np.empty((self.shape0, self.shape1), dtype=complex) self._call_core(data_t, &coeff[0]) if data: return sp.csr_matrix(data_t, dtype=complex, copy=True) else: return Qobj(data_t, dims = self.dims) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_vec(self, double t, complex* vec, complex* out): cdef int[2] shape shape[0] = self.shape1 shape[1] = 1 self._factor_dyn(t, vec, shape) self._call_core(self.data_t, self.coeff_ptr) cdef int i, j for i in range(self.shape0): for j in range(self.shape1): out[i] += self.data_t[i,j]*vec[j] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int i, j, k cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) self._call_core(self.data_t, self.coeff_ptr) for i in range(self.shape0): for j in range(nrow): for k in range(ncol): out[i + j*self.shape0] += self.data_ptr[i*nrow + k] *\ mat[k + j*nrow] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int i, j, k cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) self._call_core(self.data_t, self.coeff_ptr) for i in range(self.shape0): for j in range(ncol): for k in range(nrow): out[i*ncol + j] += self.data_ptr[i*nrow + k]*mat[k*ncol + j] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect(self, double t, complex* vec): cdef int row cdef complex dot = 0 cdef int[2] shape shape[0] = self.shape1 shape[1] = 1 self._factor_dyn(t, vec, shape) self._call_core(self.data_t, self.coeff_ptr) for i in range(self.shape0): for j in range(self.shape1): dot += conj(vec[i])*self.data_t[i,j]*vec[j] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect_super(self, double t, complex* vec): cdef int row, i cdef int num_rows = self.shape0 cdef int n = libc.math.sqrt(num_rows) cdef complex dot = 0.0 cdef int[2] shape shape[0] = n shape[1] = n self._factor_dyn(t, vec, shape) self._call_core(self.data_t, self.coeff_ptr) for row from 0 <= row < num_rows by n+1: for i in range(self.shape1): dot += self.data_t[row,i]*vec[i] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _overlapse(self, double t, complex* oper): cdef int i, j cdef int[2] shape shape[0] = self.shape0 shape[1] = self.shape0 self._factor_dyn(t, oper, shape) self._call_core(self.data_t, self.coeff_ptr) cdef complex tr = 0.0 for i in range(self.shape0): for j in range(self.shape0): tr += self.data_t[i*self.shape0, j] * oper[j*self.shape0 + i] return tr cdef class CQobjEvoTdMatched(CQobjEvo): def set_data(self, cte, ops): cdef int i, j self.shape0 = cte.shape[0] self.shape1 = cte.shape[1] self.dims = cte.dims self.super = cte.issuper self.num_ops = len(ops) self.coeff = np.zeros((self.num_ops), dtype=complex) self.coeff_ptr = &self.coeff[0] sparse_list = [] for op in ops: sparse_list.append(op[0].data) sparse_list += [cte.data] matched = _zcsr_match(sparse_list) self.indptr = matched[0].indptr self.indices = matched[0].indices self.cte = matched[-1].data self.nnz = len(self.cte) self.data_t = np.zeros((self.nnz), dtype=complex) self.data_ptr = &self.data_t[0] self.ops = np.zeros((self.num_ops, self.nnz), dtype=complex) for i, op in enumerate(matched[:-1]): for j in range(self.nnz): self.ops[i,j] = op.data[j] def set_factor(self, func=None, ptr=False, obj=None): self.factor_use_cobj = 0 if func is not None: self.factor_func = func elif obj is not None: self.factor_use_cobj = 1 self.factor_cobj = obj else: raise Exception("Could not set coefficient function") def __getstate__(self): return (self.shape0, self.shape1, self.dims, self.nnz, self.super, self.factor_use_cobj, self.factor_cobj, self.factor_func, self.num_ops, np.array(self.indptr), np.array(self.indices), np.array(self.cte), np.array(self.ops)) def __setstate__(self, state): self.shape0 = state[0] self.shape1 = state[1] self.dims = state[2] self.nnz = state[3] self.super = state[4] self.factor_use_cobj = state[5] if self.factor_use_cobj: self.factor_cobj = state[6] self.factor_func = state[7] self.num_ops = state[8] self.indptr = state[9] self.indices = state[10] self.cte = state[11] self.ops = state[12] self.coeff = np.zeros((self.num_ops), dtype=complex) self.coeff_ptr = &self.coeff[0] self.data_t = np.zeros((self.nnz), dtype=complex) self.data_ptr = &self.data_t[0] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _call_core(self, complex[::1] out, complex* coeff): cdef int i, j cdef complex * ptr ptr = &self.cte[0] for j in range(self.nnz): out[j] = ptr[j] for i in range(self.num_ops): ptr = &self.ops[i,0] for j in range(self.nnz): out[j] += ptr[j] * coeff[i] def call(self, double t, int data=0): cdef int i cdef complex[::1] data_t = np.empty(self.nnz, dtype=complex) self._factor(t) self._call_core(data_t, self.coeff_ptr) cdef CSR_Matrix out_csr init_CSR(&out_csr, self.nnz, self.shape0, self.shape1) for i in range(self.nnz): out_csr.data[i] = data_t[i] out_csr.indices[i] = self.indices[i] for i in range(self.shape0+1): out_csr.indptr[i] = self.indptr[i] scipy_obj = CSR_to_scipy(&out_csr) # free_CSR(&out)? data is own by the scipy_obj? if data: return scipy_obj else: return Qobj(scipy_obj, dims=self.dims) def call_with_coeff(self, complex[::1] coeff, int data=0): cdef complex[::1] out = np.empty(self.nnz, dtype=complex) self._call_core(out, &coeff[0]) cdef CSR_Matrix out_csr init_CSR(&out_csr, self.nnz, self.shape0, self.shape1) for i in range(self.nnz): out_csr.data[i] = out[i] out_csr.indices[i] = self.indices[i] for i in range(self.shape0+1): out_csr.indptr[i] = self.indptr[i] scipy_obj = CSR_to_scipy(&out_csr) # free_CSR(&out)? data is own by the scipy_obj? if data: return scipy_obj else: return Qobj(scipy_obj, dims=self.dims) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_vec(self, double t, complex* vec, complex* out): cdef int[2] shape shape[0] = self.shape1 shape[1] = 1 self._factor_dyn(t, vec, shape) self._call_core(self.data_t, self.coeff_ptr) spmvpy(self.data_ptr, &self.indices[0], &self.indptr[0], vec, 1., out, self.shape0) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) self._call_core(self.data_t, self.coeff_ptr) _spmm_f_py(self.data_ptr, &self.indices[0], &self.indptr[0], mat, 1., out, self.shape0, nrow, ncol) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) self._call_core(self.data_t, self.coeff_ptr) _spmm_c_py(self.data_ptr, &self.indices[0], &self.indptr[0], mat, 1., out, self.shape0, nrow, ncol) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect(self, double t, complex* vec): cdef complex [::1] y = np.zeros(self.shape0, dtype=complex) cdef int row cdef complex dot = 0 self._mul_vec(t, &vec[0], &y[0]) for row from 0 <= row < self.shape0: dot += conj(vec[row]) * y[row] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect_super(self, double t, complex* vec): cdef int row cdef int jj, row_start, row_end cdef int num_rows = self.shape0 cdef int n = libc.math.sqrt(num_rows) cdef complex dot = 0.0 cdef int[2] shape shape[0] = n shape[1] = n self._factor_dyn(t, vec, shape) self._call_core(self.data_t, self.coeff_ptr) for row from 0 <= row < num_rows by n+1: row_start = self.indptr[row] row_end = self.indptr[row+1] for jj from row_start <= jj < row_end: dot += self.data_ptr[jj]*vec[self.indices[jj]] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _overlapse(self, double t, complex* oper): """tr( self * oper )""" cdef int row cdef int jj, row_start, row_end cdef int num_rows = self.shape0 cdef complex tr = 0.0 cdef int[2] shape shape[0] = self.shape0 shape[1] = self.shape0 self._factor_dyn(t, oper, shape) self._call_core(self.data_t, self.coeff_ptr) for row in range(num_rows): row_start = self.cte.indptr[row] row_end = self.cte.indptr[row+1] for jj from row_start <= jj < row_end: tr += self.data_ptr[jj]*oper[num_rows*jj + row] return tr qutip-4.4.1/qutip/cy/cqobjevo_factor.pxd000066400000000000000000000012121352460343600203240ustar00rootroot00000000000000#!python #cython: language_level=3 cimport numpy as np cdef np.ndarray[complex, ndim=1] zptr2array1d(complex* ptr, int N) cdef np.ndarray[complex, ndim=2] zptr2array2d(complex* ptr, int R, int C) cdef np.ndarray[int, ndim=1] iprt2array(int* ptr, int N) cdef class CoeffFunc: cdef dict _args cdef int _num_ops cdef void _call_core(self, double t, complex* coeff) cdef void _dyn_args(self, double t, complex* state, int[::1] shape) cdef class StrCoeff(CoeffFunc): cdef list _dyn_args_list cdef int _num_expect cdef int[2] _mat_shape cdef list _expect_op cdef complex[::1] _expect_vec cdef complex[::1] _vec qutip-4.4.1/qutip/cy/cqobjevo_factor.pyx000066400000000000000000000276251352460343600203710ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as np import cython cimport cython cimport libc.math from qutip.cy.inter import _prep_cubic_spline from qutip.cy.inter cimport (_spline_complex_cte_second, _spline_complex_t_second, _step_complex_t, _step_complex_cte) from qutip.cy.interpolate cimport (interp, zinterp) from qutip.cy.cqobjevo cimport CQobjEvo include "complex_math.pxi" """ Support cqobjevo's array and str based coefficient. By using inheritance, it is possible to 'cimport' coefficient compiled at runtime. Pure array based, (inter.pyx or interpolate.pyx) are defined here. str inherite from StrCoeff and just add the _call_core method. """ cdef np.ndarray[complex, ndim=1] zptr2array1d(complex* ptr, int N): cdef np.npy_intp Ns[1] Ns[0] = N return np.PyArray_SimpleNewFromData(1, Ns, np.NPY_COMPLEX128, ptr) cdef np.ndarray[complex, ndim=2] zptr2array2d(complex* ptr, int R, int C): cdef np.npy_intp Ns[2] Ns[0] = R Ns[1] = C return np.PyArray_SimpleNewFromData(2, Ns, np.NPY_COMPLEX128, ptr) cdef np.ndarray[int, ndim=1] iprt2array(int* ptr, int N): cdef np.npy_intp Ns[1] Ns[0] = N return np.PyArray_SimpleNewFromData(1, Ns, np.NPY_INT32, ptr) cdef class CoeffFunc: def __init__(self, ops, args, tlist): self._args = {} def __call__(self, double t, args={}): cdef np.ndarray[ndim=1, dtype=complex] coeff = \ np.zeros(self._num_ops, dtype=complex) self._call_core(t, &coeff[0]) return coeff def set_args(self, args): pass cdef void _call_core(self, double t, complex* coeff): pass cdef void _dyn_args(self, double t, complex* state, int[::1] shape): pass def __getstate__(self): pass def __setstate__(self, state): pass def get_args(self): return self._args cdef class InterpolateCoeff(CoeffFunc): cdef double a, b cdef complex[:,::1] c def __init__(self, ops, args, tlist): cdef int i, j, l self._args = {} self._num_ops = len(ops) self.a = ops[0][2].a self.b = ops[0][2].b l = len(ops[0][2].coeffs) self.c = np.zeros((self._num_ops, l), dtype=complex) for i in range(self._num_ops): for j in range(l): self.c[i,j] = ops[i][2].coeffs[j] def __call__(self, t, args={}): cdef np.ndarray[ndim=1, dtype=complex] coeff = \ np.zeros(self._num_ops, dtype=complex) self._call_core(t, &coeff[0]) return coeff cdef void _call_core(self, double t, complex* coeff): cdef int i for i in range(self._num_ops): coeff[i] = zinterp(t, self.a, self.b, self.c[i,:]) def set_args(self, args): pass def __getstate__(self): return (self._num_ops, self.a, self.b, np.array(self.c)) def __setstate__(self, state): self._num_ops = state[0] self.a = state[1] self.b = state[2] self.c = state[3] cdef class InterCoeffCte(CoeffFunc): cdef int n_t cdef double dt cdef double[::1] tlist cdef complex[:,::1] y, M def __init__(self, ops, args, tlist): cdef int i, j self._args = {} self._num_ops = len(ops) self.tlist = tlist self.n_t = len(tlist) self.dt = tlist[1]-tlist[0] self.y = np.zeros((self._num_ops, self.n_t), dtype=complex) self.M = np.zeros((self._num_ops, self.n_t), dtype=complex) for i in range(self._num_ops): m, cte = _prep_cubic_spline(ops[i][2], tlist) if not cte: raise Exception("tlist not sampled uniformly") for j in range(self.n_t): self.y[i,j] = ops[i][2][j] self.M[i,j] = m[j] cdef void _call_core(self, double t, complex* coeff): cdef int i for i in range(self._num_ops): coeff[i] = _spline_complex_cte_second(t, self.tlist, self.y[i,:], self.M[i,:], self.n_t, self.dt) def set_args(self, args): pass def __getstate__(self): return (self._num_ops, self.n_t, self.dt, np.array(self.tlist), np.array(self.y), np.array(self.M)) def __setstate__(self, state): self._num_ops = state[0] self.n_t = state[1] self.dt = state[2] self.tlist = state[3] self.y = state[4] self.M = state[5] cdef class InterCoeffT(CoeffFunc): cdef int n_t cdef double dt cdef double[::1] tlist cdef complex[:,::1] y, M def __init__(self, ops, args, tlist): cdef int i, j self._args = {} self._num_ops = len(ops) self.tlist = tlist self.n_t = len(tlist) self.y = np.zeros((self._num_ops, self.n_t), dtype=complex) self.M = np.zeros((self._num_ops, self.n_t), dtype=complex) for i in range(self._num_ops): m, cte = _prep_cubic_spline(ops[i][2], tlist) if cte: print("tlist not uniform?") for j in range(self.n_t): self.y[i,j] = ops[i][2][j] self.M[i,j] = m[j] cdef void _call_core(self, double t, complex* coeff): cdef int i for i in range(self._num_ops): coeff[i] = _spline_complex_t_second(t, self.tlist, self.y[i,:], self.M[i,:], self.n_t) def set_args(self, args): pass def __getstate__(self): return (self._num_ops, self.n_t, None, np.array(self.tlist), np.array(self.y), np.array(self.M)) def __setstate__(self, state): self._num_ops = state[0] self.n_t = state[1] self.tlist = state[3] self.y = state[4] self.M = state[5] cdef class StepCoeff(CoeffFunc): cdef int n_t cdef double[::1] tlist cdef complex[:,::1] y def __init__(self, ops, args, tlist): cdef int i, j self._args = {} self._num_ops = len(ops) self.tlist = tlist self.n_t = len(tlist) self.y = np.zeros((self._num_ops, self.n_t), dtype=complex) for i in range(self._num_ops): for j in range(self.n_t): self.y[i,j] = ops[i][2][j] def set_arg(self, args): pass def __getstate__(self): return (self._num_ops, self.n_t, None, np.array(self.tlist), np.array(self.y)) def __setstate__(self, state): self._num_ops = state[0] self.n_t = state[1] self.tlist = state[3] self.y = state[4] cdef class StepCoeffT(StepCoeff): cdef void _call_core(self, double t, complex* coeff): cdef int i for i in range(self._num_ops): coeff[i] = _step_complex_t(t, self.tlist, self.y[i, :], self.n_t) cdef class StepCoeffCte(StepCoeff): cdef void _call_core(self, double t, complex* coeff): cdef int i for i in range(self._num_ops): coeff[i] = _step_complex_cte(t, self.tlist, self.y[i, :], self.n_t) cdef class StrCoeff(CoeffFunc): def __init__(self, ops, args, tlist, dyn_args=[]): self._num_ops = len(ops) self._args = args self._dyn_args_list = dyn_args self.set_args(args) self._set_dyn_args(dyn_args) def _set_dyn_args(self, dyn_args): self._num_expect = 0 self._expect_op = [] expect_def = [] self._mat_shape[0] = 0 self._mat_shape[1] = 0 if dyn_args: for name, what, op in dyn_args: if what == "expect": self._expect_op.append(op.compiled_qobjevo) expect_def.append(self._args[name]) self._num_expect += 1 elif what == "vec": self._vec = self._args[name] elif what == "mat": self._vec = self._args[name].ravel("F") self._mat_shape[0] = self._args[name].shape[0] self._mat_shape[1] = self._args[name].shape[0] elif what == "Qobj": self._vec = self._args[name].full().ravel("F") self._mat_shape[0] = self._args[name].shape[0] self._mat_shape[1] = self._args[name].shape[0] self._expect_vec = np.array(expect_def, dtype=complex) cdef void _dyn_args(self, double t, complex* state, int[::1] shape): cdef int ii, nn = shape[0] * shape[1] self._vec = state self._mat_shape[0] = shape[0] self._mat_shape[1] = shape[1] cdef CQobjEvo cop for ii in range(self._num_expect): cop = self._expect_op[ii] if cop.shape1 != nn: self._expect_vec[ii] = cop._overlapse(t, state) elif cop.super: self._expect_vec[ii] = cop._expect_super(t, state) else: self._expect_vec[ii] = cop._expect(t, state) def __call__(self, double t, args={}, vec=None): cdef np.ndarray[ndim=1, dtype=complex] coeff = \ np.zeros(self._num_ops, dtype=complex) cdef int[2] shape if vec is not None: if isinstance(vec, np.ndarray): self._vec = vec.ravel("F") shape[0] = vec.shape[0] shape[1] = vec.shape[1] else: full = vec.full() self._vec = full.ravel("F") shape[0] = full.shape[0] shape[1] = full.shape[1] self._dyn_args(t, &self._vec[0], shape) if args: now_args = self.args.copy() now_args.update(args) self.set_args(now_args) self._call_core(t, &coeff[0]) self.set_args(self._args) else: self._call_core(t, &coeff[0]) return coeff def __getstate__(self): return (self._num_ops, self._args, self._dyn_args_list) def __setstate__(self, state): self._num_ops = state[0] self._args = state[1] self._dyn_args_list = state[2] self.set_args(self._args) self._set_dyn_args(self._dyn_args_list) qutip-4.4.1/qutip/cy/graph_utils.pyx000066400000000000000000000360571352460343600175430ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as cnp cimport cython from libcpp.algorithm cimport sort from libcpp.vector cimport vector cnp.import_array() include "parameters.pxi" cdef extern from "numpy/arrayobject.h" nogil: void PyArray_ENABLEFLAGS(cnp.ndarray arr, int flags) void PyDataMem_FREE(void * ptr) void PyDataMem_RENEW(void * ptr, size_t size) void PyDataMem_NEW_ZEROED(size_t size, size_t elsize) void PyDataMem_NEW(size_t size) #Struct used for arg sorting cdef struct _int_pair: int data int idx ctypedef _int_pair int_pair ctypedef int (*cfptr)(int_pair, int_pair) @cython.boundscheck(False) @cython.wraparound(False) cdef int int_sort(int_pair x, int_pair y): return x.data < y.data @cython.boundscheck(False) @cython.wraparound(False) cdef int * int_argsort(int * x, int nrows): cdef vector[int_pair] pairs cdef cfptr cfptr_ = &int_sort cdef size_t kk pairs.resize(nrows) for kk in range(nrows): pairs[kk].data = x[kk] pairs[kk].idx = kk sort(pairs.begin(),pairs.end(),cfptr_) cdef int * out = PyDataMem_NEW(nrows *sizeof(int)) for kk in range(nrows): out[kk] = pairs[kk].idx return out @cython.boundscheck(False) @cython.wraparound(False) cpdef int[::1] _node_degrees(int[::1] ind, int[::1] ptr, unsigned int num_rows): cdef size_t ii, jj cdef int[::1] degree = np.zeros(num_rows, dtype=np.int32) for ii in range(num_rows): degree[ii] = ptr[ii + 1] - ptr[ii] for jj in range(ptr[ii], ptr[ii + 1]): if ind[jj] == ii: # add one if the diagonal is in row ii degree[ii] += 1 break return degree @cython.boundscheck(False) @cython.wraparound(False) def _breadth_first_search( cnp.ndarray[ITYPE_t, ndim=1, mode="c"] ind, cnp.ndarray[ITYPE_t, ndim=1, mode="c"] ptr, int num_rows, int seed): """ Does a breath first search (BSF) of a graph in sparse CSR format matrix form starting at a given seed node. """ cdef unsigned int i, j, ii, jj, N = 1 cdef unsigned int level_start = 0 cdef unsigned int level_end = N cdef unsigned int current_level = 1 cdef cnp.ndarray[ITYPE_t] order = -1 * np.ones(num_rows, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] level = -1 * np.ones(num_rows, dtype=ITYPE) level[seed] = 0 order[0] = seed while level_start < level_end: # for nodes of the last level for ii in range(level_start, level_end): i = order[ii] # add unvisited neighbors to queue for jj in range(ptr[i], ptr[i + 1]): j = ind[jj] if level[j] == -1: order[N] = j level[j] = current_level N += 1 level_start = level_end level_end = N current_level += 1 return order, level @cython.boundscheck(False) @cython.wraparound(False) def _reverse_cuthill_mckee(int[::1] ind, int[::1] ptr, int num_rows): """ Reverse Cuthill-McKee ordering of a sparse csr or csc matrix. """ cdef unsigned int N = 0, N_old, seed, level_start, level_end cdef unsigned int zz, i, j, ii, jj, kk, ll, level_len, temp, temp2 cdef cnp.ndarray[int, ndim=1] order = np.zeros(num_rows, dtype=np.int32) cdef int[::1] degree = _node_degrees(ind, ptr, num_rows) cdef int * inds = int_argsort(°ree[0], num_rows) cdef int * rev_inds = int_argsort(inds, num_rows) cdef int * temp_degrees = NULL # loop over zz takes into account possible disconnected graph. for zz in range(num_rows): if inds[zz] != -1: # Do BFS with seed=inds[zz] seed = inds[zz] order[N] = seed N += 1 inds[rev_inds[seed]] = -1 level_start = N - 1 level_end = N while level_start < level_end: for ii in range(level_start, level_end): i = order[ii] N_old = N # add unvisited neighbors for jj in range(ptr[i], ptr[i + 1]): # j is node number connected to i j = ind[jj] if inds[rev_inds[j]] != -1: inds[rev_inds[j]] = -1 order[N] = j N += 1 # Add values to temp_degrees array for insertion sort temp_degrees = PyDataMem_RENEW(temp_degrees, (N-N_old)*sizeof(int)) level_len = 0 for kk in range(N_old, N): temp_degrees[level_len] = degree[order[kk]] level_len += 1 # Do insertion sort for nodes from lowest to highest degree for kk in range(1, level_len): temp = temp_degrees[kk] temp2 = order[N_old+kk] ll = kk while (ll > 0) and (temp < temp_degrees[ll-1]): temp_degrees[ll] = temp_degrees[ll-1] order[N_old+ll] = order[N_old+ll-1] ll -= 1 temp_degrees[ll] = temp order[N_old+ll] = temp2 # set next level start and end ranges level_start = level_end level_end = N if N == num_rows: break PyDataMem_FREE(inds) PyDataMem_FREE(rev_inds) PyDataMem_FREE(temp_degrees) # return reversed order for RCM ordering return order[::-1] @cython.boundscheck(False) @cython.wraparound(False) def _pseudo_peripheral_node( cnp.ndarray[ITYPE_t, ndim=1, mode="c"] ind, cnp.ndarray[ITYPE_t, ndim=1, mode="c"] ptr, int num_rows): """ Find a pseudo peripheral node of a graph represented by a sparse csr_matrix. """ cdef unsigned int ii, jj, delta, flag, node, start cdef int maxlevel, minlevel, minlastnodesdegree cdef cnp.ndarray[cnp.intp_t] lastnodes cdef cnp.ndarray[cnp.intp_t] lastnodesdegree cdef cnp.ndarray[cnp.intp_t] degree = np.zeros(num_rows, dtype=ITYPE) degree = _node_degrees(ind, ptr, num_rows).astype(ITYPE) start = 0 delta = 0 flag = 1 while flag: # do a level-set traversal from x order, level = _breadth_first_search(ind, ptr, num_rows, start) # select node in last level with min degree maxlevel = max(level) lastnodes = np.where(level == maxlevel)[0] lastnodesdegree = degree[lastnodes] minlastnodesdegree = min(lastnodesdegree) node = np.where(lastnodesdegree == minlastnodesdegree)[0][0] node = lastnodes[node] # if d(x,y) > delta, set, and do another BFS from this minimal node if level[node] > delta: start = node delta = level[node] else: flag = 0 return start, order, level @cython.boundscheck(False) @cython.wraparound(False) def _maximum_bipartite_matching( cnp.ndarray[ITYPE_t, ndim=1, mode="c"] inds, cnp.ndarray[ITYPE_t, ndim=1, mode="c"] ptrs, int n): cdef cnp.ndarray[ITYPE_t] visited = np.zeros(n, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] queue = np.zeros(n, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] previous = np.zeros(n, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] match = -1 * np.ones(n, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] row_match = -1 * np.ones(n, dtype=ITYPE) cdef int queue_ptr, queue_col, ptr, i, j, queue_size cdef int row, col, temp, eptr, next_num = 1 for i in range(n): if match[i] == -1 and (ptrs[i] != ptrs[i + 1]): queue[0] = i queue_ptr = 0 queue_size = 1 while (queue_size > queue_ptr): queue_col = queue[queue_ptr] queue_ptr += 1 eptr = ptrs[queue_col + 1] for ptr in range(ptrs[queue_col], eptr): row = inds[ptr] temp = visited[row] if (temp != next_num and temp != -1): previous[row] = queue_col visited[row] = next_num col = row_match[row] if (col == -1): while (row != -1): col = previous[row] temp = match[col] match[col] = row row_match[row] = col row = temp next_num += 1 queue_size = 0 break else: queue[queue_size] = col queue_size += 1 if match[i] == -1: for j in range(1, queue_size): visited[match[queue[j]]] = -1 return match @cython.boundscheck(False) @cython.wraparound(False) def _max_row_weights( double[::1] data, int[::1] inds, int[::1] ptrs, int ncols): """ Finds the largest abs value in each matrix column and the max. total number of elements in the cols (given by weights[-1]). Here we assume that the user already took the ABS value of the data. This keeps us from having to call abs over and over. """ cdef cnp.ndarray[DTYPE_t] weights = np.zeros(ncols + 1, dtype=DTYPE) cdef int ln, mx, ii, jj cdef DTYPE_t weight, current mx = 0 for jj in range(ncols): ln = (ptrs[jj + 1] - ptrs[jj]) if ln > mx: mx = ln weight = data[ptrs[jj]] for ii in range(ptrs[jj] + 1, ptrs[jj + 1]): current = data[ii] if current > weight: weight = current weights[jj] = weight weights[ncols] = mx return weights @cython.boundscheck(False) @cython.wraparound(False) def _weighted_bipartite_matching( double[::1] data, int[::1] inds, int[::1] ptrs, int n): """ Here we assume that the user already took the ABS value of the data. This keeps us from having to call abs over and over. """ cdef cnp.ndarray[ITYPE_t] visited = np.zeros(n, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] queue = np.zeros(n, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] previous = np.zeros(n, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] match = -1 * np.ones(n, dtype=ITYPE) cdef cnp.ndarray[ITYPE_t] row_match = -1 * np.ones(n, dtype=ITYPE) cdef cnp.ndarray[DTYPE_t] weights = _max_row_weights(data, inds, ptrs, n) cdef cnp.ndarray[ITYPE_t] order = np.argsort(-weights[0:n]).astype(ITYPE) cdef cnp.ndarray[ITYPE_t] row_order = np.zeros(int(weights[n]), dtype=ITYPE) cdef cnp.ndarray[DTYPE_t] temp_weights = np.zeros(int(weights[n]), dtype=DTYPE) cdef int queue_ptr, queue_col, queue_size, next_num cdef int i, j, zz, ll, kk, row, col, temp, eptr, temp2 next_num = 1 for i in range(n): zz = order[i] # cols with largest abs values first if (match[zz] == -1 and (ptrs[zz] != ptrs[zz + 1])): queue[0] = zz queue_ptr = 0 queue_size = 1 while (queue_size > queue_ptr): queue_col = queue[queue_ptr] queue_ptr += 1 eptr = ptrs[queue_col + 1] # get row inds in current column temp = ptrs[queue_col] for kk in range(eptr - ptrs[queue_col]): row_order[kk] = inds[temp] temp_weights[kk] = data[temp] temp += 1 # linear sort by row weight for kk in range(1, (eptr - ptrs[queue_col])): val = temp_weights[kk] row_val = row_order[kk] ll = kk - 1 while (ll >= 0) and (temp_weights[ll] > val): temp_weights[ll + 1] = temp_weights[ll] row_order[ll + 1] = row_order[ll] ll -= 1 temp_weights[ll + 1] = val row_order[ll + 1] = row_val # go through rows by decending weight temp2 = (eptr - ptrs[queue_col]) - 1 for kk in range(eptr - ptrs[queue_col]): row = row_order[temp2 - kk] temp = visited[row] if temp != next_num and temp != -1: previous[row] = queue_col visited[row] = next_num col = row_match[row] if col == -1: while row != -1: col = previous[row] temp = match[col] match[col] = row row_match[row] = col row = temp next_num += 1 queue_size = 0 break else: queue[queue_size] = col queue_size += 1 if match[zz] == -1: for j in range(1, queue_size): visited[match[queue[j]]] = -1 return match qutip-4.4.1/qutip/cy/heom.pyx000066400000000000000000000066471352460343600161540ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as cnp cimport cython @cython.boundscheck(False) @cython.wraparound(False) def cy_pad_csr(object A, int row_scale, int col_scale, int insertrow=0, int insertcol=0): cdef int nrowin = A.shape[0] cdef int ncolin = A.shape[1] cdef int nnz = A.indptr[nrowin] cdef int nrowout = nrowin*row_scale cdef int ncolout = ncolin*col_scale cdef size_t kk cdef int temp, temp2 cdef int[::1] ind = A.indices cdef int[::1] ptr_in = A.indptr cdef cnp.ndarray[int, ndim=1, mode='c'] ptr_out = np.zeros(nrowout+1,dtype=np.int32) A._shape = (nrowout, ncolout) if insertcol == 0: pass elif insertcol > 0 and insertcol < col_scale: temp = insertcol*ncolin for kk in range(nnz): ind[kk] += temp else: raise ValueError("insertcol must be >= 0 and < col_scale") if insertrow == 0: temp = ptr_in[nrowin] for kk in range(nrowin): ptr_out[kk] = ptr_in[kk] for kk in range(nrowin, nrowout+1): ptr_out[kk] = temp elif insertrow == row_scale-1: temp = (row_scale - 1) * nrowin for kk in range(temp, nrowout+1): ptr_out[kk] = ptr_in[kk-temp] elif insertrow > 0 and insertrow < row_scale - 1: temp = insertrow*nrowin for kk in range(temp, temp+nrowin): ptr_out[kk] = ptr_in[kk-temp] temp = kk+1 temp2 = ptr_in[nrowin] for kk in range(temp, nrowout+1): ptr_out[kk] = temp2 else: raise ValueError("insertrow must be >= 0 and < row_scale") A.indptr = ptr_out return A qutip-4.4.1/qutip/cy/inter.pxd000066400000000000000000000055151352460343600163110ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cdef complex _spline_complex_t_second(double x, double[::1] t, complex[::1] y, complex[::1] M, int N) cdef complex _spline_complex_cte_second(double x, double[::1] t, complex[::1] y, complex[::1] M, int N, double dt) cdef double _spline_float_t_second(double x, double[::1] t, double[::1] y, double[::1] M, int N) cdef double _spline_float_cte_second(double x, double[::1] t, double[::1] y, double[::1] M, int N, double dt) cdef double _step_float_cte(double x, double[::1] t, double[::1] y, int n_t) cdef complex _step_complex_cte(double x, double[::1] t, complex[::1] y, int n_t) cdef double _step_float_t(double x, double[::1] t, double[::1] y, int n_t) cdef complex _step_complex_t(double x, double[::1] t, complex[::1] y, int n_t)qutip-4.4.1/qutip/cy/inter.pyx000066400000000000000000000177721352460343600163460ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ Second version of cublicspline interpolation. (in parallel with interpolate) - Accept non-uniformely sampled data. - Faster but use more memory than interpolate - No python interface, used by QobjEvo. """ import cython cimport cython import numpy as np cimport numpy as cnp import scipy.linalg def _prep_cubic_spline(array, tlist): """ Prepare coefficients for interpalation of array. boudary conditions assumed: second derivative null at the extremities. Parameters ---------- array : nd.array of double / complex Array to interpolate tlist : nd.array of double times or x of the array, must be inscreasing. The step size do not need to be constant. Returns ------- np.array the second derivative at each time """ n_t = len(tlist) M = np.zeros((3,n_t), dtype=array.dtype) x = np.zeros(n_t, dtype=array.dtype) M[1,:] = 2. dt_cte = True dt0 = tlist[1]-tlist[0] for i in range(1,n_t-1): dt1 = tlist[i]-tlist[i-1] dt2 = tlist[i+1]-tlist[i] if ((dt2 - dt0) > 10e-10): dt_cte = False M[0,i+1] = dt1 / (dt1+dt2) M[2,i-1] = dt2 / (dt1+dt2) x[i] = ((array[i-1] - array[i]) / dt1 - (array[i] - array[i+1]) / dt2) \ * 6 / (dt1+dt2) Ms = scipy.linalg.solve_banded((1,1), M, x, True, True) / 6 return (Ms, dt_cte) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef int _binary_search(double x, double[::1] t, int n): #Binary search for the interval cdef int low = 0 cdef int high = n cdef int middle cdef int count = 0 while low+1 != high and count < 30: middle = (low+high)//2 if x < t[middle]: high = middle else: low = middle count += 1 return low @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef double _spline_float_cte_second(double x, double[::1] t, double[::1] y, double[::1] M, int n_t, double dt): # inbound? if x < t[0]: return y[0] elif x > t[n_t-1]: return y[n_t-1] cdef double xx = (x-t[0])/dt cdef int p = xx cdef double tb = (xx - p) cdef double te = 1 - tb cdef double dt2 = dt * dt cdef double Me = M[p+1] * dt2 cdef double Mb = M[p] * dt2 return te * (Mb * te * te + (y[p] - Mb)) + \ tb * (Me * tb * tb + (y[p+1] - Me)) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef double _spline_float_t_second(double x, double[::1] t, double[::1] y, double[::1] M, int n_t): # inbound? if x < t[0]: return y[0] elif x > t[n_t-1]: return y[n_t-1] cdef int p = _binary_search(x, t, n_t) cdef double dt = t[p+1] - t[p] cdef double tb = (x - t[p]) / dt cdef double te = 1 - tb cdef double dt2 = dt * dt cdef double Me = M[p+1] * dt2 cdef double Mb = M[p] * dt2 return te * (Mb * te * te + (y[p] - Mb)) + \ tb * (Me * tb * tb + (y[p+1] - Me)) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef complex _spline_complex_cte_second(double x, double[::1] t, complex[::1] y, complex[::1] M, int n_t, double dt): # inbound? if x < t[0]: return y[0] elif x > t[n_t-1]: return y[n_t-1] cdef double xx = (x-t[0])/dt cdef int p = xx cdef double tb = (xx - p) cdef double te = 1 - tb cdef double dt2 = dt * dt cdef complex Me = M[p+1] * dt2 cdef complex Mb = M[p] * dt2 return te * (Mb * te * te + (y[p] - Mb)) + \ tb * (Me * tb * tb + (y[p+1] - Me)) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef complex _spline_complex_t_second(double x, double[::1] t, complex[::1] y, complex[::1] M, int n_t): # inbound? if x < t[0]: return y[0] elif x > t[n_t-1]: return y[n_t-1] cdef int p = _binary_search(x, t, n_t) cdef double dt = t[p+1] - t[p] cdef double tb = (x - t[p]) / dt cdef double te = 1 - tb cdef double dt2 = dt * dt cdef complex Me = M[p+1] * dt2 cdef complex Mb = M[p] * dt2 return te * (Mb * te * te + (y[p] - Mb)) + \ tb * (Me * tb * tb + (y[p+1] - Me)) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef double _step_float_cte(double x, double[::1] t, double[::1] y, int n_t): if x < t[0]: return y[0] elif x >= t[n_t-1]: return y[n_t-1] cdef int p = ((x-t[0]) / (t[n_t-1]-t[0]) * (n_t-1)) return y[p] @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef complex _step_complex_cte(double x, double[::1] t, complex[::1] y, int n_t): if x < t[0]: return y[0] elif x >= t[n_t-1]: return y[n_t-1] cdef int p = ((x-t[0]) / (t[n_t-1]-t[0]) * (n_t-1)) return y[p] @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef double _step_float_t(double x, double[::1] t, double[::1] y, int n_t): if x < t[0]: return y[0] elif x >= t[n_t-1]: return y[n_t-1] # TODO this can be moved out for better performance cdef int p = _binary_search(x, t, n_t) return y[p] @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef complex _step_complex_t(double x, double[::1] t, complex[::1] y, int n_t): if x < t[0]: return y[0] elif x >= t[n_t-1]: return y[n_t-1] # TODO this can be moved out for better performance cdef int p = _binary_search(x, t, n_t) return y[p] qutip-4.4.1/qutip/cy/interpolate.pxd000066400000000000000000000037011352460343600175110ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cpdef double interp(double x, double a, double b, double[::1] c) cpdef complex zinterp(double x, double a, double b, complex[::1] c) qutip-4.4.1/qutip/cy/interpolate.pyx000066400000000000000000000107211352460343600175360ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as cnp from libc.math cimport (fabs, fmin) cimport cython @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef inline double phi(double t): cdef double abs_t = fabs(t) if abs_t <= 1: return 4 - 6 * abs_t**2 + 3 * abs_t**3 elif abs_t <= 2: return (2-abs_t)**3 else: return 0 @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cpdef double interp(double x, double a, double b, double[::1] c): cdef int n = c.shape[0] - 3 cdef double h = (b-a) / n cdef int l = ((x-a)/h) + 1 cdef int m = (fmin(l+3, n+3)) cdef size_t ii cdef double s = 0, _tmp cdef double pos = (x-a)/h + 2 for ii in range(l, m+1): _tmp = phi(pos - ii) if _tmp: s += c[ii-1] * _tmp return s @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cpdef complex zinterp(double x, double a, double b, complex[::1] c): cdef int n = c.shape[0] - 3 cdef double h = (b-a) / n cdef int l = ((x-a)/h) + 1 cdef int m = (fmin(l+3, n+3)) cdef size_t ii cdef complex s = 0 cdef double _tmp, pos = (x-a)/h + 2 for ii in range(l, m+1): _tmp = phi(pos - ii) if _tmp: s += c[ii-1] * _tmp return s @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) def arr_interp(double[::1] x, double a, double b, double[::1] c): cdef int lenx = x.shape[0] cdef int lenc = c.shape[0] cdef int n = lenc - 3 cdef double h = (b-a) / n cdef size_t ii, jj cdef int l, m cdef double pos, _tmp cdef cnp.ndarray[double, ndim=1, mode="c"] out = np.zeros(lenx, dtype=float) for jj in range(lenx): l = ((x[jj]-a)/h) + 1 m = (fmin(l+3, n+3)) pos = (x[jj]-a)/h + 2 for ii in range(l, m+1): _tmp = phi(pos - ii) if _tmp: out[jj] += c[ii-1] * _tmp return out @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) def arr_zinterp(double[::1] x, double a, double b, complex[::1] c): cdef int lenx = x.shape[0] cdef int lenc = c.shape[0] cdef int n = lenc - 3 cdef double h = (b-a) / n cdef size_t ii, jj cdef int l, m cdef double pos, _tmp cdef cnp.ndarray[complex, ndim=1, mode="c"] out = np.zeros(lenx, dtype=complex) for jj in range(lenx): l = ((x[jj]-a)/h) + 1 m = (fmin(l+3, n+3)) pos = (x[jj]-a)/h + 2 for ii in range(l, m+1): _tmp = phi(pos - ii) if _tmp: out[jj] = out[jj] + c[ii-1] * _tmp return out qutip-4.4.1/qutip/cy/math.pxd000066400000000000000000000035541352460343600161220ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cdef double erf(double x) cdef double complex zerf(double complex Z) qutip-4.4.1/qutip/cy/math.pyx000066400000000000000000000141671352460343600161510ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cimport cython from libc.math cimport (fabs, sinh, cosh, exp, pi, sqrt, cos, sin, copysign) cdef extern from "" namespace "std" nogil: double real(double complex x) double imag(double complex x) @cython.boundscheck(False) @cython.cdivision(True) cdef double erf(double x): """ A Cython version of the erf function from the cdflib in SciPy. """ cdef double c = 0.564189583547756 cdef double a[5] a[:] = [0.771058495001320e-4, -0.133733772997339e-2, 0.323076579225834e-1, 0.479137145607681e-1, 0.128379167095513] cdef double b[3] b[:] = [0.301048631703895e-2, 0.538971687740286e-1, .375795757275549] cdef double p[8] p[:] = [-1.36864857382717e-7, 5.64195517478974e-1, 7.21175825088309, 4.31622272220567e1, 1.52989285046940e2, 3.39320816734344e2, 4.51918953711873e2, 3.00459261020162e2] cdef double q[8] q[:]= [1.0, 1.27827273196294e1, 7.70001529352295e1, 2.77585444743988e2, 6.38980264465631e2, 9.31354094850610e2, 7.90950925327898e2, 3.00459260956983e2] cdef double r[5] r[:] = [2.10144126479064, 2.62370141675169e1, 2.13688200555087e1, 4.65807828718470, 2.82094791773523e-1] cdef double s[4] s[:] = [9.41537750555460e1, 1.87114811799590e2, 9.90191814623914e1, 1.80124575948747e1] cdef double ax = fabs(x) cdef double t, x2, top, bot, erf if ax <= 0.5: t = x*x top = ((((a[0]*t+a[1])*t+a[2])*t+a[3])*t+a[4]) + 1.0 bot = ((b[0]*t+b[1])*t+b[2])*t + 1.0 erf = x * (top/bot) return erf elif ax <= 4.0: x2 = x*x top = ((((((p[0]*ax+p[1])*ax+p[2])*ax+p[3])*ax+p[4])*ax+p[5])*ax+p[6])*ax + p[7] bot = ((((((q[0]*ax+q[1])*ax+q[2])*ax+q[3])*ax+q[4])*ax+q[5])*ax+q[6])*ax + q[7] erf = 0.5 + (0.5-exp(-x2)*top/bot) if x < 0.0: erf = -erf return erf elif ax < 5.8: x2 = x*x t = 1.0/x2 top = (((r[0]*t+r[1])*t+r[2])*t+r[3])*t + r[4] bot = (((s[0]*t+s[1])*t+s[2])*t+s[3])*t + 1.0 erf = (c-top/(x2*bot))/ax erf = 0.5 + (0.5-exp(-x2)*erf) if x < 0.0: erf = -erf return erf else: erf = copysign(1.0, x) return erf # COMPUTATION OF SPECIAL FUNCTIONS # # Shanjie Zhang and Jianming Jin # # Copyrighted but permission granted to use code in programs. # Buy their book "Computation of Special Functions", 1996, John Wiley & Sons, Inc. @cython.cdivision(True) @cython.boundscheck(False) cdef double complex zerf(double complex Z): """ Parameters ---------- Z : double complex Input parameter. X : double Real part of Z. Y : double Imag part of Z. Returns ------- erf(z) : double complex """ cdef double EPS = 1e-12 cdef double X = real(Z) cdef double Y = imag(Z) cdef double X2 = X * X cdef double ER, R, W, C0, ER0, ERR, ERI, CS, SS, ER1, EI1, ER2, W1 cdef size_t K, N if X < 3.5: ER = 1.0 R = 1.0 W = 0.0 for K in range(1, 100): R = R*X2/(K+0.5) ER = ER+R if (fabs(ER-W) < EPS*fabs(ER)): break W = ER C0 = 2.0/sqrt(pi)*X*exp(-X2) ER0 = C0*ER else: ER = 1.0 R=1.0 for K in range(1, 12): R = -R*(K-0.5)/X2 ER = ER+R C0 = exp(-X2)/(X*sqrt(pi)) ER0 = 1.0-C0*ER if Y == 0.0: ERR = ER0 ERI = 0.0 else: CS = cos(2.0*X*Y) SS = sin(2.0*X*Y) ER1 = exp(-X2)*(1.0-CS)/(2.0*pi*X) EI1 = exp(-X2)*SS/(2.0*pi*X) ER2 = 0.0 W1 = 0.0 for N in range(1,100): ER2 = ER2+exp(-.25*N*N)/(N*N+4.0*X2)*(2.0*X-2.0*X*cosh(N*Y)*CS+N*sinh(N*Y)*SS) if (fabs((ER2-W1)/ER2) < EPS): break W1 = ER2 C0 = 2.0*exp(-X2)/pi ERR = ER0+ER1+C0*ER2 EI2 = 0.0 W2 = 0.0 for N in range(1,100): EI2 = EI2+exp(-.25*N*N)/(N*N+4.0*X2)*(2.0*X*cosh(N*Y)*SS+N*sinh(N*Y)*CS) if (fabs((EI2-W2)/EI2) < EPS): break W2 = EI2 ERI = EI1+C0*EI2 return ERR + 1j*ERIqutip-4.4.1/qutip/cy/mcsolve.pyx000066400000000000000000000423311352460343600166620ustar00rootroot00000000000000#!python #cython: language_level=3 ## cython: profile=True ## cython: linetrace=True # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as np cimport cython import scipy.sparse as sp from scipy.linalg.cython_blas cimport dznrm2 as raw_dznrm2 from qutip.qobj import Qobj from qutip.cy.cqobjevo cimport CQobjEvo from qutip.cy.spmatfuncs cimport cy_expect_psi # from qutip.cy.dopri5 import ode_td_dopri #from qutip.cy.complex_math cimport conj include "complex_math.pxi" cdef int ONE = 1 cdef double dznrm2(complex[::1] psi): cdef int l = psi.shape[0] return raw_dznrm2(&l, &psi[0], &ONE) @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) cdef np.ndarray[complex, ndim=1] normalize(complex[::1] psi): cdef int i, l = psi.shape[0] cdef double norm = dznrm2(psi) cdef np.ndarray[ndim=1, dtype=complex] out = np.empty(l, dtype=complex) for i in range(l): out[i] = psi[i] / norm return out cdef class CyMcOde: cdef: int steady_state, store_states, col_args int norm_steps, l_vec, num_ops double norm_t_tol, norm_tol list collapses list collapses_args list c_ops list n_ops complex[:,::1] states_out complex[:,::1] ss_out double[::1] n_dp def __init__(self, ss, opt): self.c_ops = ss.td_c_ops self.n_ops = ss.td_n_ops self.norm_steps = opt.norm_steps self.norm_t_tol = opt.norm_t_tol self.norm_tol = opt.norm_tol self.steady_state = opt.steady_state_average self.store_states = opt.store_states or opt.average_states self.collapses = [] self.l_vec = self.c_ops[0].cte.shape[0] self.num_ops = len(ss.td_n_ops) self.n_dp = np.zeros(self.num_ops) if ss.col_args: self.col_args = 1 self.collapses_args = ss.args[ss.col_args] if ss.type == "QobjEvo": ss.H_td.coeff_get.get_args()[ss.col_args] = self.collapses_args for c in ss.td_c_ops: c.coeff_get.get_args()[ss.col_args] = self.collapses_args for c in ss.td_n_ops: c.coeff_get.get_args()[ss.col_args] = self.collapses_args else: self.col_args = 0 if self.steady_state: self.ss_out = np.zeros((self.l_vec, self.l_vec), dtype=complex) else: self.ss_out = np.zeros((0, 0), dtype=complex) @cython.boundscheck(False) @cython.wraparound(False) cdef void sumsteadystate(self, complex[::1] state): cdef int ii, jj, l_vec l_vec = state.shape[0] for ii in range(l_vec): for jj in range(l_vec): self.ss_out[ii,jj] += state[ii]*conj(state[jj]) @cython.boundscheck(False) @cython.wraparound(False) def run_ode(self, ODE, tlist_, e_call, prng): cdef np.ndarray[double, ndim=1] rand_vals cdef np.ndarray[double, ndim=1] tlist = np.array(tlist_, dtype=np.double) cdef np.ndarray[complex, ndim=1] y_prev cdef np.ndarray[complex, ndim=1] out_psi = ODE._y cdef int num_times = tlist.shape[0] cdef int ii, which, k cdef double norm2_prev, norm2_psi cdef double t_prev if self.steady_state: self.sumsteadystate(out_psi) if self.store_states: self.states_out = np.zeros((num_times, self.l_vec), dtype=complex) for ii in range(self.l_vec): self.states_out[0, ii] = out_psi[ii] else: self.states_out = np.zeros((1, self.l_vec), dtype=complex) e_call.step(0, out_psi) rand_vals = prng.rand(2) # RUN ODE UNTIL EACH TIME IN TLIST norm2_prev = dznrm2(ODE._y) ** 2 for k in range(1, num_times): # ODE WHILE LOOP FOR INTEGRATE UP TO TIME TLIST[k] t_prev = ODE.t y_prev = ODE.y while t_prev < tlist[k]: # integrate up to tlist[k], one step at a time. ODE.integrate(tlist[k], step=1) if not ODE.successful(): print(ODE.t, t_prev, tlist[k]) print(ODE._integrator.call_args) raise Exception("ZVODE failed!") norm2_psi = dznrm2(ODE._y) ** 2 if norm2_psi <= rand_vals[0]: # collapse has occured: self._find_collapse(ODE, norm2_psi, t_prev, y_prev, norm2_prev, rand_vals[0]) t_prev = ODE.t y_prev = ODE.y which = self._which_collapse(t_prev, y_prev, rand_vals[1]) y_prev = self._collapse(t_prev, which, y_prev) ODE.set_initial_value(y_prev, t_prev) self.collapses.append((t_prev, which)) if self.col_args: self.collapses_args.append((t_prev, which)) rand_vals = prng.rand(2) norm2_prev = 1. # dznrm2(ODE._y)**2 else: norm2_prev = norm2_psi t_prev = ODE.t y_prev = ODE.y # after while loop # ---------------- out_psi = normalize(ODE._y) e_call.step(k, out_psi) if self.steady_state: self.sumsteadystate(out_psi) if self.store_states: for ii in range(self.l_vec): self.states_out[k, ii] = out_psi[ii] if not self.store_states: for ii in range(self.l_vec): self.states_out[0, ii] = out_psi[ii] return np.array(self.states_out), np.array(self.ss_out), self.collapses @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) cdef void _find_collapse(self, ODE, double norm2_psi, double t_prev, np.ndarray[complex, ndim=1] y_prev, double norm2_prev, double target_norm): # find collapse time to within specified tolerance cdef int ii = 0 cdef double t_final = ODE.t cdef double t_guess, norm2_guess while ii < self.norm_steps: ii += 1 if (t_final - t_prev) < self.norm_t_tol: t_prev = t_final y_prev = ODE.y break t_guess = (t_prev + (log(norm2_prev / target_norm)).real / (log(norm2_prev / norm2_psi)).real * (t_final - t_prev)) if (t_guess - t_prev) < self.norm_t_tol: t_guess = t_prev + self.norm_t_tol ODE.t = t_prev ODE._y = y_prev ODE._integrator.call_args[3] = 1 ODE.integrate(t_guess, step=0) if not ODE.successful(): raise Exception("ZVODE failed after adjusting step size!") norm2_guess = dznrm2(ODE._y)**2 if (np.abs(target_norm - norm2_guess) < self.norm_tol * target_norm): break elif (norm2_guess < target_norm): # t_guess is still > t_jump t_final = t_guess norm2_psi = norm2_guess else: # t_guess < t_jump t_prev = t_guess y_prev = ODE.y norm2_prev = norm2_guess if ii > self.norm_steps: raise Exception("Norm tolerance not reached. " + "Increase accuracy of ODE solver or " + "Options.norm_steps.") @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) cdef int _which_collapse(self, double t, complex[::1] y, double rand): # determine which operator does collapse cdef int ii, j = self.num_ops cdef double e, sum_ = 0 cdef CQobjEvo cobj for ii in range(self.num_ops): cobj = self.n_ops[ii].compiled_qobjevo e = real(cobj._expect(t, &y[0])) self.n_dp[ii] = e sum_ += e rand *= sum_ for ii in range(self.num_ops): if rand <= self.n_dp[ii]: j = ii break else: rand -= self.n_dp[ii] return j @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) cdef np.ndarray[complex, ndim=1] _collapse(self, double t, int j, complex[::1] y): # np.ndarray[complex, ndim=1] y): cdef CQobjEvo cobj cdef np.ndarray[complex, ndim=1] state cobj = self.c_ops[j].compiled_qobjevo state = cobj.mul_vec(t, y) state = normalize(state) return state cdef class CyMcOdeDiag(CyMcOde): cdef: complex[::1] diag complex[::1] diag_dt complex[::1] psi complex[::1] psi_temp double t object prng def __init__(self, ss, opt): self.c_ops = ss.td_c_ops self.n_ops = ss.td_n_ops self.diag = ss.H_diag self.norm_steps = opt.norm_steps self.norm_t_tol = opt.norm_t_tol self.norm_tol = opt.norm_tol self.steady_state = opt.steady_state_average self.store_states = opt.store_states or opt.average_states self.collapses = [] self.l_vec = self.c_ops[0].cte.shape[0] self.num_ops = len(ss.td_n_ops) self.n_dp = np.zeros(self.num_ops) self.col_args = 0 if self.steady_state: self.ss_out = np.zeros((self.l_vec, self.l_vec), dtype=complex) else: self.ss_out = np.zeros((0, 0), dtype=complex) @cython.boundscheck(False) @cython.wraparound(False) cdef void qode(self, complex[::1] psi_new): cdef int i for i in range(self.l_vec): psi_new[i] = self.diag_dt[i] * self.psi[i] @cython.boundscheck(False) @cython.wraparound(False) cdef void ode(self, double t_new, complex[::1] psi_new): cdef int i cdef double dt = t_new - self.t for i in range(self.l_vec): psi_new[i] = exp(self.diag[i]*dt) * self.psi[i] @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) cdef double advance(self, double t_target, double norm2_prev, double[::1] rand_vals, int use_quick): target_norm = rand_vals[0] if use_quick: self.qode(self.psi_temp) else: self.ode(t_target, self.psi_temp) norm2_psi = dznrm2(self.psi_temp) ** 2 if norm2_psi <= target_norm: # collapse has occured: self._find_collapse_diag(norm2_psi, t_target, self.psi_temp, norm2_prev, target_norm) which = self._which_collapse(self.t, self.psi, rand_vals[1]) self.psi = self._collapse(self.t, which, self.psi) self.collapses.append((self.t, which)) prn = self.prng.rand(2) rand_vals[0] = prn[0] rand_vals[1] = prn[1] norm2_psi = 1. else: self.t = t_target for ii in range(self.l_vec): self.psi[ii] = self.psi_temp[ii] return norm2_psi @cython.boundscheck(False) @cython.wraparound(False) def run_ode(self, initial_vector, tlist_, e_call, prng): cdef np.ndarray[double, ndim=1] rand_vals cdef np.ndarray[double, ndim=1] tlist = np.array(tlist_) cdef np.ndarray[complex, ndim=1] out_psi = initial_vector.copy() cdef int ii, which, k, use_quick, num_times = tlist.shape[0] cdef double norm2_prev dt = tlist_[1]-tlist_[0] if np.allclose(np.diff(tlist_), dt): use_quick = 1 self.diag_dt = np.zeros(self.l_vec, dtype=complex) for ii in range(self.l_vec): self.diag_dt[ii] = exp(self.diag[ii]*dt) else: use_quick = 0 if self.steady_state: self.sumsteadystate(out_psi) if self.store_states: self.states_out = np.zeros((num_times, self.l_vec), dtype=complex) for ii in range(self.l_vec): self.states_out[0, ii] = out_psi[ii] else: self.states_out = np.zeros((1, self.l_vec), dtype=complex) e_call.step(0, out_psi) rand_vals = prng.rand(2) self.prng = prng # RUN ODE UNTIL EACH TIME IN TLIST self.psi = initial_vector.copy() self.psi_temp = initial_vector.copy() self.t = tlist[0] norm2_prev = dznrm2(self.psi) ** 2 for k in range(1, num_times): #print(self.t, tlist[k], norm2_prev, rand_vals[0]) norm2_prev = self.advance(tlist[k], norm2_prev, rand_vals, use_quick) while self.t < tlist[k]: #print(self.t, tlist[k], norm2_prev, rand_vals[0]) norm2_prev = self.advance(tlist[k], norm2_prev, rand_vals, 0) # after while loop # ---------------- out_psi = normalize(self.psi) e_call.step(k, out_psi) if self.steady_state: self.sumsteadystate(out_psi) if self.store_states: for ii in range(self.l_vec): self.states_out[k, ii] = out_psi[ii] if not self.store_states: for ii in range(self.l_vec): self.states_out[0, ii] = out_psi[ii] return np.array(self.states_out), np.array(self.ss_out), self.collapses @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) cpdef void _find_collapse_diag(self, double norm2_psi, double t_final, complex[::1] y_new, double norm2_prev, double target_norm): # find collapse time to within specified tolerance cdef int ii = 0, jj cdef double t_guess, norm2_guess # cdef double t_final = ODE.t #print("before", self.t, norm2_prev) #print("after", t_final, norm2_psi) #print("target", target_norm) while ii < self.norm_steps: ii += 1 if (t_final - self.t) < self.norm_t_tol: self.t = t_final for jj in range(self.l_vec): self.psi[jj] = y_new[jj] break t_guess = (self.t + (log(norm2_prev / target_norm)).real / (log(norm2_prev / norm2_psi)).real * (t_final - self.t)) if (t_guess - self.t) < self.norm_t_tol: t_guess = self.t + self.norm_t_tol self.ode(t_guess, y_new) norm2_guess = dznrm2(y_new)**2 #print(ii, "guess", t_guess, norm2_guess) if (np.abs(target_norm - norm2_guess) < self.norm_tol * target_norm): self.t = t_guess for jj in range(self.l_vec): self.psi[jj] = y_new[jj] #print("found") break elif (norm2_guess < target_norm): # t_guess is still > t_jump t_final = t_guess norm2_psi = norm2_guess else: # t_guess < t_jump self.t = t_guess for jj in range(self.l_vec): self.psi[jj] = y_new[jj] norm2_prev = norm2_guess #print("finish", ii, self.norm_steps) if ii > self.norm_steps: raise Exception("Norm tolerance not reached. " + "Increase accuracy of ODE solver or " + "Options.norm_steps.") qutip-4.4.1/qutip/cy/openmp/000077500000000000000000000000001352460343600157435ustar00rootroot00000000000000qutip-4.4.1/qutip/cy/openmp/__init__.py000077500000000000000000000000011352460343600200460ustar00rootroot00000000000000 qutip-4.4.1/qutip/cy/openmp/bench_openmp.py000066400000000000000000000115101352460343600207500ustar00rootroot00000000000000import numpy as np from qutip.cy.openmp.benchmark import _spmvpy, _spmvpy_openmp from timeit import default_timer as timer def _min_timer(function, *args, **kwargs): min_time = 1e6 for kk in range(10000): t0 = timer() function(*args, **kwargs) t1 = timer() min_time = min(min_time, t1-t0) return min_time def system_bench(func, dims): from qutip.random_objects import rand_ket ratio = 0 ratio_old = 0 nnz_old = 0 for N in dims: L = func(N).data vec = rand_ket(L.shape[0],0.25).full().ravel() nnz = L.nnz out = np.zeros_like(vec) ser = _min_timer(_spmvpy, L.data, L.indices, L.indptr, vec, 1, out) out = np.zeros_like(vec) par = _min_timer(_spmvpy_openmp, L.data, L.indices, L.indptr, vec, 1, out, 2) ratio = ser/par if ratio > 1: break nnz_old = nnz ratio_old = ratio if ratio > 1: rate = (ratio-ratio_old)/(nnz-nnz_old) return int((1.0-ratio_old)/rate+nnz_old) else: return -1 def calculate_openmp_thresh(): jc_dims = np.arange(2,60,dtype=int) jc_result = system_bench(_jc_liouvillian, jc_dims) opto_dims = np.arange(2,60,dtype=int) opto_result = system_bench(_opto_liouvillian, opto_dims) spin_dims = np.arange(2,15,dtype=int) spin_result = system_bench(_spin_hamiltonian, spin_dims) # Double result to be conservative thresh = 2*int(max([jc_result,opto_result,spin_result])) if thresh < 0: thresh = np.iinfo(np.int32).max return thresh def _jc_liouvillian(N): from qutip.tensor import tensor from qutip.operators import destroy, qeye from qutip.superoperator import liouvillian wc = 1.0 * 2 * np.pi # cavity frequency wa = 1.0 * 2 * np.pi # atom frequency g = 0.05 * 2 * np.pi # coupling strength kappa = 0.005 # cavity dissipation rate gamma = 0.05 # atom dissipation rate n_th_a = 1 # temperature in frequency units use_rwa = 0 # operators a = tensor(destroy(N), qeye(2)) sm = tensor(qeye(N), destroy(2)) # Hamiltonian if use_rwa: H = wc * a.dag() * a + wa * sm.dag() * sm + g * (a.dag() * sm + a * sm.dag()) else: H = wc * a.dag() * a + wa * sm.dag() * sm + g * (a.dag() + a) * (sm + sm.dag()) c_op_list = [] rate = kappa * (1 + n_th_a) if rate > 0.0: c_op_list.append(np.sqrt(rate) * a) rate = kappa * n_th_a if rate > 0.0: c_op_list.append(np.sqrt(rate) * a.dag()) rate = gamma if rate > 0.0: c_op_list.append(np.sqrt(rate) * sm) return liouvillian(H, c_op_list) def _opto_liouvillian(N): from qutip.tensor import tensor from qutip.operators import destroy, qeye from qutip.superoperator import liouvillian Nc = 5 # Number of cavity states Nm = N # Number of mech states kappa = 0.3 # Cavity damping rate E = 0.1 # Driving Amplitude g0 = 2.4*kappa # Coupling strength Qm = 1e4 # Mech quality factor gamma = 1/Qm # Mech damping rate n_th = 1 # Mech bath temperature delta = -0.43 # Detuning a = tensor(destroy(Nc), qeye(Nm)) b = tensor(qeye(Nc), destroy(Nm)) num_b = b.dag()*b num_a = a.dag()*a H = -delta*(num_a)+num_b+g0*(b.dag()+b)*num_a+E*(a.dag()+a) cc = np.sqrt(kappa)*a cm = np.sqrt(gamma*(1.0 + n_th))*b cp = np.sqrt(gamma*n_th)*b.dag() c_ops = [cc,cm,cp] return liouvillian(H, c_ops) def _spin_hamiltonian(N): from qutip.tensor import tensor from qutip.operators import qeye, sigmax, sigmay, sigmaz # array of spin energy splittings and coupling strengths. here we use # uniform parameters, but in general we don't have too h = 1.0 * 2 * np.pi * np.ones(N) Jz = 0.1 * 2 * np.pi * np.ones(N) Jx = 0.1 * 2 * np.pi * np.ones(N) Jy = 0.1 * 2 * np.pi * np.ones(N) # dephasing rate gamma = 0.01 * np.ones(N) si = qeye(2) sx = sigmax() sy = sigmay() sz = sigmaz() sx_list = [] sy_list = [] sz_list = [] for n in range(N): op_list = [] for m in range(N): op_list.append(si) op_list[n] = sx sx_list.append(tensor(op_list)) op_list[n] = sy sy_list.append(tensor(op_list)) op_list[n] = sz sz_list.append(tensor(op_list)) # construct the hamiltonian H = 0 # energy splitting terms for n in range(N): H += - 0.5 * h[n] * sz_list[n] # interaction terms for n in range(N-1): H += - 0.5 * Jx[n] * sx_list[n] * sx_list[n+1] H += - 0.5 * Jy[n] * sy_list[n] * sy_list[n+1] H += - 0.5 * Jz[n] * sz_list[n] * sz_list[n+1] return H qutip-4.4.1/qutip/cy/openmp/benchmark.pyx000077500000000000000000000050671352460343600204520ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cimport cython from qutip.cy.spmatfuncs cimport spmvpy from qutip.cy.openmp.parfuncs cimport spmvpy_openmp @cython.boundscheck(False) @cython.wraparound(False) def _spmvpy(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec, complex a, complex[::1] out): spmvpy(&data[0], &ind[0], &ptr[0], &vec[0], a, &out[0], vec.shape[0]) @cython.boundscheck(False) @cython.wraparound(False) def _spmvpy_openmp(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec, complex a, complex[::1] out, unsigned int num_threads): spmvpy_openmp(&data[0], &ind[0], &ptr[0], &vec[0], a, &out[0], vec.shape[0], num_threads) qutip-4.4.1/qutip/cy/openmp/br_omp.pxd000066400000000000000000000051731352460343600177440ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cimport numpy as cnp #Spectral function with signature (w,t) ctypedef complex (*spec_func)(double, double) cdef void cop_super_mult_openmp(complex[::1,:] cop, complex[::1,:] evecs, double complex * vec, double complex alpha, double complex * out, unsigned int nrows, unsigned int omp_thresh, unsigned int nthr, double atol) cdef void br_term_mult_openmp(double t, complex[::1,:] A, complex[::1,:] evecs, double[:,::1] skew, double dw_min, spec_func spectral, double complex * vec, double complex * out, unsigned int nrows, int use_secular, double sec_cutoff, unsigned int omp_thresh, unsigned int nthr, double atol) qutip-4.4.1/qutip/cy/openmp/br_omp.pyx000066400000000000000000000207421352460343600177700ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### from scipy.linalg.cython_blas cimport zgemv from qutip.cy.spmath cimport (_zcsr_kron_core, _zcsr_kron, _zcsr_add, _zcsr_transpose, _zcsr_adjoint, _zcsr_mult) from qutip.cy.spconvert cimport fdense2D_to_CSR from qutip.cy.spmatfuncs cimport spmvpy from qutip.cy.openmp.parfuncs cimport spmvpy_openmp from qutip.cy.brtools cimport (spec_func, vec2mat_index, dense_to_eigbasis) from libc.math cimport fabs, fmin from libc.float cimport DBL_MAX from libcpp.vector cimport vector from qutip.cy.sparse_structs cimport (CSR_Matrix, COO_Matrix) include "../sparse_routines.pxi" cdef extern from "" namespace "std" nogil: double complex conj(double complex x) double cabs "abs" (double complex x) @cython.boundscheck(False) @cython.wraparound(False) cdef void ZGEMV(double complex * A, double complex * vec, double complex * out, int Arows, int Acols, int transA = 0, double complex alpha=1, double complex beta=1): cdef char tA cdef int idx = 1, idy = 1 if transA == 0: tA = b'N' elif transA == 1: tA = b'T' elif transA == 2: tA = b'C' else: raise Exception('Invalid transA value.') zgemv(&tA, &Arows, &Acols, &alpha, A, &Arows, vec, &idx, &beta, out, &idy) @cython.boundscheck(False) @cython.wraparound(False) cdef void cop_super_mult_openmp(complex[::1,:] cop, complex[::1,:] evecs, double complex * vec, double complex alpha, double complex * out, unsigned int nrows, unsigned int omp_thresh, unsigned int nthr, double atol): cdef size_t kk cdef CSR_Matrix mat1, mat2, mat3, mat4 cdef complex[::1,:] cop_eig = dense_to_eigbasis(cop, evecs, nrows, atol) #Mat1 holds cop_eig in CSR format fdense2D_to_CSR(cop_eig, &mat1, nrows, nrows) #Multiply by alpha for time-dependence for kk in range(mat1.nnz): mat1.data[kk] *= alpha #Free data associated with cop_eig as it is no longer needed. PyDataMem_FREE(&cop_eig[0,0]) #create temp array of conj data for cop_eig_sparse cdef complex * conj_data = PyDataMem_NEW(mat1.nnz * sizeof(complex)) for kk in range(mat1.nnz): conj_data[kk] = conj(mat1.data[kk]) #mat2 holds data for kron(cop.dag(), c) init_CSR(&mat2, mat1.nnz**2, mat1.nrows**2, mat1.ncols**2) _zcsr_kron_core(conj_data, mat1.indices, mat1.indptr, mat1.data, mat1.indices, mat1.indptr, &mat2, mat1.nrows, mat1.nrows, mat1.ncols) #Do spmv with kron(cop.dag(), c) if mat2.nnz >= omp_thresh: spmvpy_openmp(mat2.data,mat2.indices,mat2.indptr, &vec[0], 1, out, nrows**2, nthr) else: spmvpy(mat2.data,mat2.indices,mat2.indptr, &vec[0], 1, out, nrows**2) #Free temp conj_data array PyDataMem_FREE(conj_data) #Free mat2 free_CSR(&mat2) #Create identity in mat3 identity_CSR(&mat3, nrows) #Take adjoint of cop (mat1) -> mat2 _zcsr_adjoint(&mat1, &mat2) #multiply cop.dag() * c (cdc) -> mat4 _zcsr_mult(&mat2, &mat1, &mat4) #Free mat1 and mat2 free_CSR(&mat1) free_CSR(&mat2) # kron(eye, cdc) -> mat1 _zcsr_kron(&mat3, &mat4, &mat1) #Do spmv with -0.5*kron(eye, cdc) if mat1.nnz >= omp_thresh: spmvpy_openmp(mat1.data,mat1.indices,mat1.indptr, vec, -0.5, &out[0], nrows**2, nthr) else: spmvpy(mat1.data,mat1.indices,mat1.indptr, vec, -0.5, &out[0], nrows**2) #Free mat1 (mat1 and mat2 are currently free) free_CSR(&mat1) #Take traspose of cdc (mat4) -> mat1 _zcsr_transpose(&mat4, &mat1) #Free mat4 (mat2 and mat4 currently free) free_CSR(&mat4) # kron(cdct, eye) -> mat2 _zcsr_kron(&mat1, &mat3, &mat2) #Do spmv with -0.5*kron(cdct, eye) if mat2.nnz >= omp_thresh: spmvpy_openmp(mat2.data,mat2.indices,mat2.indptr, vec, -0.5, &out[0], nrows**2, nthr) else: spmvpy(mat2.data,mat2.indices,mat2.indptr, vec, -0.5, &out[0], nrows**2) #Free mat1, mat2, and mat3 free_CSR(&mat1) free_CSR(&mat2) free_CSR(&mat3) @cython.boundscheck(False) @cython.wraparound(False) cdef void br_term_mult_openmp(double t, complex[::1,:] A, complex[::1,:] evecs, double[:,::1] skew, double dw_min, spec_func spectral, double complex * vec, double complex * out, unsigned int nrows, int use_secular, double sec_cutoff, unsigned int omp_thresh, unsigned int nthr, double atol): cdef size_t kk cdef size_t I, J # vector index variables cdef int[2] ab, cd #matrix indexing variables cdef complex[::1,:] A_eig = dense_to_eigbasis(A, evecs, nrows, atol) cdef complex elem, ac_elem, bd_elem cdef vector[int] coo_rows, coo_cols cdef vector[complex] coo_data cdef unsigned int nnz cdef COO_Matrix coo cdef CSR_Matrix csr cdef complex[:,::1] non_sec_mat for I in range(nrows**2): vec2mat_index(nrows, I, ab) for J in range(nrows**2): vec2mat_index(nrows, J, cd) if (not use_secular) or (fabs(skew[ab[0],ab[1]]-skew[cd[0],cd[1]]) < (dw_min * sec_cutoff)): elem = (A_eig[ab[0],cd[0]]*A_eig[cd[1],ab[1]]) * 0.5 elem *= (spectral(skew[cd[0],ab[0]],t)+spectral(skew[cd[1],ab[1]],t)) if (ab[0]==cd[0]): ac_elem = 0 for kk in range(nrows): ac_elem += A_eig[cd[1],kk]*A_eig[kk,ab[1]] * spectral(skew[cd[1],kk],t) elem -= 0.5*ac_elem if (ab[1]==cd[1]): bd_elem = 0 for kk in range(nrows): bd_elem += A_eig[ab[0],kk]*A_eig[kk,cd[0]] * spectral(skew[cd[0],kk],t) elem -= 0.5*bd_elem if (elem != 0): coo_rows.push_back(I) coo_cols.push_back(J) coo_data.push_back(elem) PyDataMem_FREE(&A_eig[0,0]) #Number of elements in BR tensor nnz = coo_rows.size() coo.nnz = nnz coo.rows = coo_rows.data() coo.cols = coo_cols.data() coo.data = coo_data.data() coo.nrows = nrows**2 coo.ncols = nrows**2 coo.is_set = 1 coo.max_length = nnz COO_to_CSR(&csr, &coo) if csr.nnz > omp_thresh: spmvpy_openmp(csr.data, csr.indices, csr.indptr, vec, 1, out, nrows**2, nthr) else: spmvpy(csr.data, csr.indices, csr.indptr, vec, 1, out, nrows**2) free_CSR(&csr) qutip-4.4.1/qutip/cy/openmp/cqobjevo_omp.pyx000066400000000000000000000207041352460343600211730ustar00rootroot00000000000000#!python #cython: language_level=3 # distutils: language = c++ # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ Parallel version of the cqobjevo's See ../cqobjevo.pyx for more details """ from qutip.cy.cqobjevo cimport CQobjCte, CQobjEvoTd, CQobjEvoTdMatched from qutip.cy.openmp.parfuncs cimport spmvpy_openmp import numpy as np import scipy.sparse as sp cimport numpy as np import cython cimport cython from cython.parallel import prange include "../complex_math.pxi" @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _spmmcpy_par(complex* data, int* ind, int* ptr, complex* mat, complex a, complex* out, int sp_rows, unsigned int nrows, unsigned int ncols, int nthr): """ sparse*dense "C" ordered. """ cdef int row, col, ii, jj, row_start, row_end for row in prange(sp_rows, nogil=True, num_threads=nthr): row_start = ptr[row] row_end = ptr[row+1] for jj from row_start <= jj < row_end: for col in range(ncols): out[row * ncols + col] += a*data[jj]*mat[ind[jj] * ncols + col] @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _spmmfpy_omp(complex* data, int* ind, int* ptr, complex* mat, complex a, complex* out, unsigned int sp_rows, unsigned int nrows, unsigned int ncols, int nthr): """ sparse*dense "F" ordered. """ cdef int col for col in range(ncols): spmvpy_openmp(data, ind, ptr, mat+nrows*col, a, out+sp_rows*col, sp_rows, nthr) cdef class CQobjCteOmp(CQobjCte): cdef int nthr def set_threads(self, nthr): self.nthr = nthr @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_vec(self, double t, complex* vec, complex* out): spmvpy_openmp(self.cte.data, self.cte.indices, self.cte.indptr, vec, 1., out, self.shape0, self.nthr) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex _expect(self, double t, complex* vec): cdef complex[::1] y = np.zeros(self.shape0, dtype=complex) spmvpy_openmp(self.cte.data, self.cte.indices, self.cte.indptr, vec, 1., &y[0], self.shape0, self.nthr) cdef int row cdef complex dot = 0 for row from 0 <= row < self.shape0: dot += conj(vec[row])*y[row] return dot @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncol): _spmmfpy_omp(self.cte.data, self.cte.indices, self.cte.indptr, mat, 1., out, self.shape0, nrow, ncol, self.nthr) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncol): _spmmcpy_par(self.cte.data, self.cte.indices, self.cte.indptr, mat, 1., out, self.shape0, nrow, ncol, self.nthr) cdef class CQobjEvoTdOmp(CQobjEvoTd): cdef int nthr def set_threads(self, nthr): self.nthr = nthr @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_vec(self, double t, complex* vec, complex* out): cdef int[2] shape shape[0] = self.shape1 shape[1] = 1 self._factor_dyn(t, vec, shape) cdef int i spmvpy_openmp(self.cte.data, self.cte.indices, self.cte.indptr, vec, 1., out, self.shape0, self.nthr) for i in range(self.num_ops): spmvpy_openmp(self.ops[i].data, self.ops[i].indices, self.ops[i].indptr, vec, self.coeff_ptr[i], out, self.shape0, self.nthr) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) cdef int i _spmmfpy_omp(self.cte.data, self.cte.indices, self.cte.indptr, mat, 1., out, self.shape0, nrow, ncol, self.nthr) for i in range(self.num_ops): _spmmfpy_omp(self.ops[i].data, self.ops[i].indices, self.ops[i].indptr, mat, self.coeff_ptr[i], out, self.shape0, nrow, ncol, self.nthr) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) cdef int i _spmmcpy_par(self.cte.data, self.cte.indices, self.cte.indptr, mat, 1., out, self.shape0, nrow, ncol, self.nthr) for i in range(self.num_ops): _spmmcpy_par(self.ops[i].data, self.ops[i].indices, self.ops[i].indptr, mat, self.coeff_ptr[i], out, self.shape0, nrow, ncol, self.nthr) cdef class CQobjEvoTdMatchedOmp(CQobjEvoTdMatched): cdef int nthr def set_threads(self, nthr): self.nthr = nthr @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_vec(self, double t, complex* vec, complex* out): cdef int[2] shape shape[0] = self.shape1 shape[1] = 1 self._factor_dyn(t, vec, shape) self._call_core(self.data_t, self.coeff_ptr) spmvpy_openmp(self.data_ptr, &self.indices[0], &self.indptr[0], vec, 1., out, self.shape0, self.nthr) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matf(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) self._call_core(self.data_t, self.coeff_ptr) _spmmfpy_omp(self.data_ptr, &self.indices[0], &self.indptr[0], mat, 1., out, self.shape0, nrow, ncol, self.nthr) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _mul_matc(self, double t, complex* mat, complex* out, int nrow, int ncol): cdef int[2] shape shape[0] = nrow shape[1] = ncol self._factor_dyn(t, mat, shape) self._call_core(self.data_t, self.coeff_ptr) _spmmcpy_par(self.data_ptr, &self.indices[0], &self.indptr[0], mat, 1., out, self.shape0, nrow, ncol, self.nthr) qutip-4.4.1/qutip/cy/openmp/omp_sparse_utils.pyx000066400000000000000000000054661352460343600221100ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as cnp cimport cython from libcpp cimport bool from libc.math cimport fabs from cython.parallel cimport parallel, prange cdef extern from "" namespace "std" nogil: double real(double complex x) double imag(double complex x) @cython.boundscheck(False) @cython.wraparound(False) cpdef bool omp_tidyup(complex[::1] data, double atol, int nnz, int nthr): cdef int kk, cdef double re, im cdef bool re_flag, im_flag, out_flag = 0 with nogil, parallel(num_threads = nthr): for kk in prange(nnz, schedule='static'): re_flag = 0 im_flag = 0 re = real(data[kk]) im = imag(data[kk]) if fabs(re) < atol: re = 0 re_flag = 1 if fabs(im) < atol: im = 0 im_flag = 1 if re_flag or im_flag: data[kk] = re +1j*im if re_flag and im_flag: out_flag = 1 return out_flag qutip-4.4.1/qutip/cy/openmp/parfuncs.pxd000066400000000000000000000044011352460343600203000ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cimport numpy as cnp cimport cython cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmv_csr_openmp(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec, unsigned int nthr) cdef void spmvpy_openmp(complex * data, int * ind, int * ptr, complex * vec, complex a, complex * out, unsigned int nrows, unsigned int nthr) qutip-4.4.1/qutip/cy/openmp/parfuncs.pyx000066400000000000000000000131631352460343600203320ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as cnp cimport cython cdef extern from "src/zspmv_openmp.hpp" nogil: void zspmvpy_openmp(double complex *data, int *ind, int *ptr, double complex *vec, double complex a, double complex *out, int nrows, int nthr) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmv_openmp( object super_op, complex[::1] vec, unsigned int nthr): """ Sparse matrix, dense vector multiplication. Here the vector is assumed to have one-dimension. Matrix must be in CSR format and have complex entries. Parameters ---------- super_op : csr matrix vec : array Dense vector for multiplication. Must be one-dimensional. Returns ------- out : array Returns dense array. """ return spmv_csr_openmp(super_op.data, super_op.indices, super_op.indptr, vec, nthr) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmv_csr_openmp(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec, unsigned int nthr): """ Sparse matrix, dense vector multiplication. Here the vector is assumed to have one-dimension. Matrix must be in CSR format and have complex entries. Parameters ---------- data : array Data for sparse matrix. idx : array Indices for sparse matrix data. ptr : array Pointers for sparse matrix data. vec : array Dense vector for multiplication. Must be one-dimensional. Returns ------- out : array Returns dense array. """ cdef unsigned int num_rows = vec.shape[0] cdef cnp.ndarray[complex, ndim=1, mode="c"] out = np.zeros(num_rows, dtype=complex) zspmvpy_openmp(&data[0], &ind[0], &ptr[0], &vec[0], 1.0, &out[0], num_rows, nthr) return out @cython.boundscheck(False) @cython.wraparound(False) cdef inline void spmvpy_openmp(complex * data, int * ind, int * ptr, complex * vec, complex a, complex * out, unsigned int nrows, unsigned int nthr): zspmvpy_openmp(data, ind, ptr, vec, a, out, nrows, nthr) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_rhs_openmp( double t, complex[::1] rho, complex[::1] data, int[::1] ind, int[::1] ptr, unsigned int nthr): cdef unsigned int nrows = rho.shape[0] cdef cnp.ndarray[complex, ndim=1, mode="c"] out = \ np.zeros((nrows), dtype=complex) zspmvpy_openmp(&data[0], &ind[0], &ptr[0], &rho[0], 1.0, &out[0], nrows, nthr) return out @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_psi_func_td_openmp( double t, cnp.ndarray[complex, ndim=1, mode="c"] psi, object H_func, object args, unsigned int nthr): H = H_func(t, args).data return -1j * spmv_csr_openmp(H.data, H.indices, H.indptr, psi, nthr) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_psi_func_td_with_state_openmp( double t, cnp.ndarray[complex, ndim=1, mode="c"] psi, object H_func, object args, unsigned int nthr): H = H_func(t, psi, args) return -1j * spmv_csr_openmp(H.data, H.indices, H.indptr, psi, nthr) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_rho_func_td_openmp( double t, cnp.ndarray[complex, ndim=1, mode="c"] rho, object L0, object L_func, object args, unsigned int nthr): cdef object L L = L0 + L_func(t, args).data return spmv_csr_openmp(L.data, L.indices, L.indptr, rho, nthr) qutip-4.4.1/qutip/cy/openmp/src/000077500000000000000000000000001352460343600165325ustar00rootroot00000000000000qutip-4.4.1/qutip/cy/openmp/src/zspmv_openmp.cpp000066400000000000000000000202411352460343600217720ustar00rootroot00000000000000// This file is part of QuTiP: Quantum Toolbox in Python. // // Copyright (c) 2011 and later, QuSTaR. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names // of its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //############################################################################# #include #include #if defined(__GNUC__) && defined(__SSE3__) // Using GCC or CLANG and SSE3 #include void zspmvpy_openmp(const std::complex * __restrict__ data, const int * __restrict__ ind, const int * __restrict__ ptr, const std::complex * __restrict__ vec, const std::complex a, std::complex * __restrict__ out, const unsigned int nrows, const unsigned int nthr) { size_t row, jj; unsigned int row_start, row_end; __m128d num1, num2, num3, num4; #pragma omp parallel for \ private(row,num1,num2,num3,num4,row_start,row_end,jj) \ shared(data,ind,ptr,out,vec) schedule(static) \ num_threads(nthr) for (row=0; row < nrows; row++) { num4 = _mm_setzero_pd(); row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj (data[jj])[0]); num2 = _mm_set_pd(std::imag(vec[ind[jj]]),std::real(vec[ind[jj]])); num3 = _mm_mul_pd(num2, num1); num1 = _mm_loaddup_pd(&reinterpret_cast(data[jj])[1]); num2 = _mm_shuffle_pd(num2, num2, 1); num2 = _mm_mul_pd(num2, num1); num3 = _mm_addsub_pd(num3, num2); num4 = _mm_add_pd(num3, num4); } num1 = _mm_loaddup_pd(&reinterpret_cast(a)[0]); num3 = _mm_mul_pd(num4, num1); num1 = _mm_loaddup_pd(&reinterpret_cast(a)[1]); num4 = _mm_shuffle_pd(num4, num4, 1); num4 = _mm_mul_pd(num4, num1); num3 = _mm_addsub_pd(num3, num4); num2 = _mm_loadu_pd((double *)&out[row]); num3 = _mm_add_pd(num2, num3); _mm_storeu_pd((double *)&out[row], num3); } } #elif defined(__GNUC__) // Using GCC or CLANG but no SSE3 void zspmvpy_openmp(const std::complex * __restrict__ data, const int * __restrict__ ind, const int * __restrict__ ptr, const std::complex * __restrict__ vec, const std::complex a, std::complex * __restrict__ out, const unsigned int nrows, const unsigned int nthr) { size_t row, jj; unsigned int row_start, row_end; std::complex dot; #pragma omp parallel for \ private(row,dot,row_start,row_end,jj) \ shared(data,ind,ptr,out,vec) schedule(static) \ num_threads(nthr) for (row=0; row < nrows; row++) { dot = 0; row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj void zspmvpy_openmp(const std::complex * __restrict data, const int * __restrict ind, const int * __restrict ptr, const std::complex * __restrict vec, const std::complex a, std::complex * __restrict out, const int nrows, const unsigned int nthr) { int row, jj; int row_start, row_end; __m128d num1, num2, num3, num4; #pragma omp parallel for \ private(row,num1,num2,num3,num4,row_start,row_end,jj) \ shared(data,ind,ptr,out,vec) schedule(static) \ num_threads(nthr) for (row=0; row < nrows; row++) { num4 = _mm_setzero_pd(); row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj (data[jj])[0]); num2 = _mm_set_pd(std::imag(vec[ind[jj]]),std::real(vec[ind[jj]])); num3 = _mm_mul_pd(num2, num1); num1 = _mm_loaddup_pd(&reinterpret_cast(data[jj])[1]); num2 = _mm_shuffle_pd(num2, num2, 1); num2 = _mm_mul_pd(num2, num1); num3 = _mm_addsub_pd(num3, num2); num4 = _mm_add_pd(num3, num4); } num1 = _mm_loaddup_pd(&reinterpret_cast(a)[0]); num3 = _mm_mul_pd(num4, num1); num1 = _mm_loaddup_pd(&reinterpret_cast(a)[1]); num4 = _mm_shuffle_pd(num4, num4, 1); num4 = _mm_mul_pd(num4, num1); num3 = _mm_addsub_pd(num3, num4); num2 = _mm_loadu_pd((double *)&out[row]); num3 = _mm_add_pd(num2, num3); _mm_storeu_pd((double *)&out[row], num3); } } #elif defined(_MSC_VER) // Visual Studio no AVX void zspmvpy_openmp(const std::complex * __restrict data, const int * __restrict ind, const int * __restrict ptr, const std::complex * __restrict vec, const std::complex a, std::complex * __restrict out, const int nrows, const unsigned int nthr) { int row, jj; int row_start, row_end; std::complex dot; #pragma omp parallel for \ private(row,dot,row_start,row_end,jj) \ shared(data,ind,ptr,out,vec) schedule(static) \ num_threads(nthr) for (row=0; row < nrows; row++) { dot = 0; row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj * data, const int * ind, const int * ptr, const std::complex * vec, const std::complex a, std::complex * out, const unsigned int nrows, const unsigned int nthr) { size_t row, jj; unsigned int row_start, row_end; std::complex dot; #pragma omp parallel for \ private(row,dot,row_start,row_end,jj) \ shared(data,ind,ptr,out,vec) schedule(static) \ num_threads(nthr) for (row=0; row < nrows; row++) { dot = 0; row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj #ifdef __GNUC__ void zspmvpy_openmp(const std::complex * __restrict__ data, const int * __restrict__ ind, const int *__restrict__ ptr, const std::complex * __restrict__ vec, const std::complex a, std::complex * __restrict__ out, const unsigned int nrows, const unsigned int nthr); #elif defined(_MSC_VER) void zspmvpy_openmp(const std::complex * __restrict data, const int * __restrict ind, const int *__restrict ptr, const std::complex * __restrict vec, const std::complex a, std::complex * __restrict out, const int nrows, const unsigned int nthr); #else void zspmvpy_openmp(const std::complex * data, const int * ind, const int * ptr, const std::complex * vec, const std::complex a, std::complex * out, const unsigned int nrows, const unsigned int nthr); #endifqutip-4.4.1/qutip/cy/openmp/utilities.py000066400000000000000000000056121352460343600203340ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, QuSTaR, # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import os import numpy as np import qutip.settings as qset def check_use_openmp(options): """ Check to see if OPENMP should be used in dynamic solvers. """ force_omp = False if qset.has_openmp and options.use_openmp is None: options.use_openmp = True force_omp = False elif qset.has_openmp and options.use_openmp == True: force_omp = True elif qset.has_openmp and options.use_openmp == False: force_omp = False elif qset.has_openmp == False and options.use_openmp == True: raise Exception('OPENMP not available.') else: options.use_openmp = False force_omp = False #Disable OPENMP in parallel mode unless explicitly set. if not force_omp and os.environ['QUTIP_IN_PARALLEL'] == 'TRUE': options.use_openmp = False def use_openmp(): """ Check for using openmp in general cases outside of dynamics """ if qset.has_openmp and os.environ['QUTIP_IN_PARALLEL'] != 'TRUE': return True else: return False def openmp_components(ptr_list): return np.array([ptr[-1] >= qset.openmp_thresh for ptr in ptr_list], dtype=bool) qutip-4.4.1/qutip/cy/parameters.pxi000066400000000000000000000003621352460343600173330ustar00rootroot00000000000000import numpy as np cimport numpy as cnp DTYPE = np.float64 ctypedef cnp.float64_t DTYPE_t ITYPE = np.int32 ctypedef cnp.int32_t ITYPE_t CTYPE = np.complex128 ctypedef cnp.complex128_t CTYPE_t CTYPE = np.int64 ctypedef cnp.int64_t LTYPE_t qutip-4.4.1/qutip/cy/piqs.pyx000066400000000000000000000452251352460343600161730ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ Cythonized code for permutationally invariant Lindbladian generation """ import numpy as np from scipy.sparse import csr_matrix, dok_matrix from qutip import Qobj cimport numpy as cnp cimport cython def _num_dicke_states(N): """ Calculate the number of Dicke states. Parameters ---------- N: int The number of two-level systems. Returns ------- nds: int The number of Dicke states. """ if (not float(N).is_integer()): raise ValueError("Number of TLS should be an integer") if (N < 1): raise ValueError("Number of TLS should be non-negative") nds = (N/2 + 1)**2 - (N % 2)/4 return int(nds) def _num_dicke_ladders(N): """ Calculate the total number of Dicke ladders in the Dicke space. Parameters ---------- N: int The number of two-level systems. Returns ------- Nj: int The number of Dicke ladders. """ Nj = (N+1) * 0.5 + (1-np.mod(N, 2)) * 0.5 return int(Nj) @cython.boundscheck(False) @cython.wraparound(False) cpdef list get_blocks(int N): """ Calculate the number of cumulative elements at each block boundary. Parameters ---------- N: int The number of two-level systems. Returns ------- blocks: np.ndarray An array with the number of cumulative elements at the boundary of each block. """ cdef int num_blocks = _num_dicke_ladders(N) cdef list blocks blocks = [i * (N+2-i) for i in range(1, num_blocks+1)] return blocks @cython.boundscheck(False) @cython.wraparound(False) cpdef float j_min(N): """ Calculate the minimum value of j for given N. Parameters ---------- N: int Number of two-level systems. Returns ------- jmin: float The minimum value of j for odd or even number of two level systems. """ if N % 2 == 0: return 0 else: return 0.5 def j_vals(N): """ Get the valid values of j for given N. Parameters ---------- N: int The number of two-level systems. Returns ------- jvals: np.ndarray The j values for given N as a 1D array. """ j = np.arange(j_min(N), N/2 + 1, 1) return j def m_vals(j): """ Get all the possible values of m or m1 for given j. Parameters ---------- N: int The number of two-level systems. Returns ------- mvals: np.ndarray The m values for given j as a 1D array. """ return np.arange(-j, j+1, 1) def get_index(N, j, m, m1, blocks): """ Get the index in the density matrix for this j, m, m1 value. Parameters ---------- N: int The number of two-level systems. j, m, m1: float The j, m, m1 values. blocks: np.ndarray An 1D array with the number of cumulative elements at the boundary of each block. Returns ------- mvals: array The m values for given j. """ _k = int(j-m1) _k_prime = int(j-m) block_number = int(N/2 - j) offset = 0 if block_number > 0: offset = blocks[block_number-1] i = _k_prime + offset k = _k + offset return (i, k) @cython.boundscheck(False) @cython.wraparound(False) cpdef list jmm1_dictionary(int N): """ Get the index in the density matrix for this j, m, m1 value. The (j, m, m1) values are mapped to the (i, k) index of a block diagonal matrix which has the structure to capture the permutationally symmetric part of the density matrix. For each (j, m, m1) value, first we get the block by using the "j" value and then the addition in the row/column due to the m and m1 is determined. Four dictionaries are returned giving a map from the (j, m, m1) values to (i, k), the inverse map, a flattened map and the inverse of the flattened map. """ cdef long i cdef long k cdef dict jmm1_dict = {} cdef dict jmm1_inv = {} cdef dict jmm1_flat = {} cdef dict jmm1_flat_inv = {} cdef int l cdef int nds = _num_dicke_states(N) cdef list blocks = get_blocks(N) jvalues = j_vals(N) for j in jvalues: mvalues = m_vals(j) for m in mvalues: for m1 in mvalues: i, k = get_index(N, j, m, m1, blocks) jmm1_dict[(i, k)] = (j, m, m1) jmm1_inv[(j, m, m1)] = (i, k) l = nds * i+k jmm1_flat[l] = (j, m, m1) jmm1_flat_inv[(j, m, m1)] = l return [jmm1_dict, jmm1_inv, jmm1_flat, jmm1_flat_inv] @cython.boundscheck(False) @cython.wraparound(False) cdef class Dicke(object): """ A faster Cythonized Dicke state class to build the Lindbladian. Parameters ---------- N: int The number of two-level systems. emission: float Incoherent emission coefficient (also nonradiative emission). default: 0.0 dephasing: float Local dephasing coefficient. default: 0.0 pumping: float Incoherent pumping coefficient. default: 0.0 collective_emission: float Collective (superradiant) emmission coefficient. default: 0.0 collective_pumping: float Collective pumping coefficient. default: 0.0 collective_dephasing: float Collective dephasing coefficient. default: 0.0 Attributes ---------- N: int The number of two-level systems. emission: float Incoherent emission coefficient (also nonradiative emission). default: 0.0 dephasing: float Local dephasing coefficient. default: 0.0 pumping: float Incoherent pumping coefficient. default: 0.0 collective_emission: float Collective (superradiant) emmission coefficient. default: 0.0 collective_pumping: float Collective pumping coefficient. default: 0.0 collective_dephasing: float Collective dephasing coefficient. default: 0.0 """ cdef int N cdef float emission, dephasing, pumping cdef float collective_emission, collective_dephasing, collective_pumping def __init__(self, int N, float emission=0., float dephasing=0., float pumping=0., float collective_emission=0., collective_dephasing=0., collective_pumping=0.): self.N = N self.emission = emission self.dephasing = dephasing self.pumping = pumping self.collective_emission = collective_emission self.collective_dephasing = collective_dephasing self.collective_pumping = collective_pumping @cython.boundscheck(False) @cython.wraparound(False) cpdef object lindbladian(self): """ Build the Lindbladian superoperator of the dissipative dynamics as a sparse matrix. Returns ---------- lindblad_qobj: :class: qutip.Qobj The matrix size is (nds**2, nds**2) where nds is the number of Dicke states. """ N = self.N cdef int nds = _num_dicke_states(N) cdef int num_ladders = _num_dicke_ladders(N) cdef list lindblad_row = [] cdef list lindblad_col = [] cdef list lindblad_data = [] cdef tuple jmm1_1 cdef tuple jmm1_2 cdef tuple jmm1_3 cdef tuple jmm1_4 cdef tuple jmm1_5 cdef tuple jmm1_6 cdef tuple jmm1_7 cdef tuple jmm1_8 cdef tuple jmm1_9 _1, _2, jmm1_row, jmm1_inv = jmm1_dictionary(N) # perform loop in each row of matrix for r in jmm1_row: j, m, m1 = jmm1_row[r] jmm1_1 = (j, m, m1) jmm1_2 = (j, m+1, m1+1) jmm1_3 = (j+1, m+1, m1+1) jmm1_4 = (j-1, m+1, m1+1) jmm1_5 = (j+1, m, m1) jmm1_6 = (j-1, m, m1) jmm1_7 = (j+1, m-1, m1-1) jmm1_8 = (j, m-1, m1-1) jmm1_9 = (j-1, m-1, m1-1) g1 = self.gamma1(jmm1_1) c1 = jmm1_inv[jmm1_1] lindblad_row.append(int(r)) lindblad_col.append(int(c1)) lindblad_data.append(g1) # generate gammas in the given row # check if the gammas exist # load gammas in the lindbladian in the correct position if jmm1_2 in jmm1_inv: g2 = self.gamma2(jmm1_2) c2 = jmm1_inv[jmm1_2] lindblad_row.append(int(r)) lindblad_col.append(int(c2)) lindblad_data.append(g2) if jmm1_3 in jmm1_inv: g3 = self.gamma3(jmm1_3) c3 = jmm1_inv[jmm1_3] lindblad_row.append(int(r)) lindblad_col.append(int(c3)) lindblad_data.append(g3) if jmm1_4 in jmm1_inv: g4 = self.gamma4(jmm1_4) c4 = jmm1_inv[jmm1_4] lindblad_row.append(int(r)) lindblad_col.append(int(c4)) lindblad_data.append(g4) if jmm1_5 in jmm1_inv: g5 = self.gamma5(jmm1_5) c5 = jmm1_inv[jmm1_5] lindblad_row.append(int(r)) lindblad_col.append(int(c5)) lindblad_data.append(g5) if jmm1_6 in jmm1_inv: g6 = self.gamma6(jmm1_6) c6 = jmm1_inv[jmm1_6] lindblad_row.append(int(r)) lindblad_col.append(int(c6)) lindblad_data.append(g6) if jmm1_7 in jmm1_inv: g7 = self.gamma7(jmm1_7) c7 = jmm1_inv[jmm1_7] lindblad_row.append(int(r)) lindblad_col.append(int(c7)) lindblad_data.append(g7) if jmm1_8 in jmm1_inv: g8 = self.gamma8(jmm1_8) c8 = jmm1_inv[jmm1_8] lindblad_row.append(int(r)) lindblad_col.append(int(c8)) lindblad_data.append(g8) if jmm1_9 in jmm1_inv: g9 = self.gamma9(jmm1_9) c9 = jmm1_inv[jmm1_9] lindblad_row.append(int(r)) lindblad_col.append(int(c9)) lindblad_data.append(g9) cdef lindblad_matrix = csr_matrix((lindblad_data, (lindblad_row, lindblad_col)), shape=(nds**2, nds**2)) # make matrix a Qobj superoperator with expected dims llind_dims = [[[nds], [nds]], [[nds], [nds]]] cdef object lindblad_qobj = Qobj(lindblad_matrix, dims=llind_dims) return lindblad_qobj @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma1(self, tuple jmm1): """ Calculate gamma1 for value of j, m, m'. """ cdef float j, m, m1 cdef float yCE, yE, yD, yP, yCP, yCD cdef float N cdef float spontaneous, losses, pump, collective_pump cdef float dephase, collective_dephase, g1 j, m, m1 = jmm1 N = float(self.N) yE = self.emission yD = self.dephasing yP = self.pumping yCE = self.collective_emission yCP = self.collective_pumping yCD = self.collective_dephasing spontaneous = yCE/2 * (2*j*(j+1) - m * (m-1) - m1 * (m1 - 1)) losses = (yE/2) * (N+m+m1) pump = yP/2 * (N-m-m1) collective_pump = yCP/2 * \ (2*j * (j+1) - m*(m+1) - m1*(m1+1)) collective_dephase = yCD/2 * (m-m1)**2 if j <= 0: dephase = yD*N/4 else: dephase = yD/2*(N/2 - m*m1 * (N/2 + 1)/j/(j+1)) g1 = spontaneous + losses + pump + dephase + \ collective_pump + collective_dephase return -g1 @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma2(self, tuple jmm1): """ Calculate gamma2 for given j, m, m'. """ cdef float j, m, m1 cdef float yCE, yE, yD, yP, yCP, yCD, g2 cdef float N cdef float spontaneous, losses, pump, collective_pump cdef float dephase, collective_dephase j, m, m1 = jmm1 N = float(self.N) yCE = self.collective_emission yE = self.emission if yCE == 0: spontaneous = 0.0 else: spontaneous = yCE * np.sqrt((j+m) * (j-m+1) * (j+m1) * (j-m1+1)) if (yE == 0) or (j <= 0): losses = 0.0 else: losses = yE/2 * np.sqrt((j+m) * (j-m+1) * (j+m1) * (j-m1+1)) * \ (N/2 + 1)/(j*(j+1)) g2 = spontaneous + losses return g2 @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma3(self, tuple jmm1): """ Calculate gamma3 for given j, m, m'. """ cdef float j, m, m1 cdef float yE cdef float N cdef float spontaneous, losses, pump, collective_pump cdef float dephase, collective_dephase cdef complex g3 j, m, m1 = jmm1 N = float(self.N) yE = self.emission if (yE == 0) or (j <= 0): g3 = 0.0 else: g3 = yE/2 * np.sqrt((j+m) * (j+m-1) * (j+m1) * (j+m1-1)) * \ (N/2 + j+1)/(j*(2*j + 1)) return g3 @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma4(self, tuple jmm1): """ Calculate gamma4 for given j, m, m'. """ cdef float j, m, m1 cdef complex g4 cdef float yE cdef float N N = float(self.N) j, m, m1 = jmm1 yE = self.emission if (yE == 0) or ((j+1) <= 0): g4 = 0.0 else: g4 = yE/2 * np.sqrt((j-m+1) * (j-m+2) * (j-m1+1) * (j-m1+2)) * (N/2 - j)/((j+1) * (2*j + 1)) return g4 @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma5(self, tuple jmm1): """ Calculate gamma5 for given j, m, m'. """ cdef float j, m, m1 cdef complex g5 j, m, m1 = jmm1 cdef float yD cdef float N N = float(self.N) yD = self.dephasing if (yD == 0) or (j <= 0): g5 = 0.0 else: g5 = yD/2 * np.sqrt((j**2 - m**2)*(j**2 - m1**2)) * \ (N/2 + j + 1)/(j*(2*j + 1)) return g5 @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma6(self, tuple jmm1): """ Calculate gamma6 for given j, m, m'. """ cdef float j, m, m1 cdef float yD cdef float N cdef complex g6 j, m, m1 = jmm1 N = float(self.N) yD = self.dephasing if yD == 0: g6 = 0.0 else: g6 = yD/2 * np.sqrt(((j+1)**2 - m**2)*((j+1) ** 2-m1**2)) * \ (N/2 - j)/((j+1) * (2*j+1)) return g6 @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma7(self, tuple jmm1): """ Calculate gamma7 for given j, m, m'. """ cdef float j, m, m1 cdef float yP cdef float N cdef complex g7 j, m, m1 = jmm1 N = float(self.N) yP = self.pumping if (yP == 0) or (j <= 0): g7 = 0.0 else: g7 = yP/2 * np.sqrt((j-m-1)*(j-m)*(j-m1-1) * (j-m1)) * (N/2 + j + 1)/(j * (2*j+1)) return g7 @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma8(self, tuple jmm1): """ Calculate gamma8 for given j, m, m'. """ cdef float j, m, m1 cdef float yP, yCP cdef float N cdef complex g8 j, m, m1 = jmm1 N = float(self.N) yP = self.pumping yCP = self.collective_pumping if (yP == 0) or (j <= 0): pump = 0.0 else: pump = yP/2 * np.sqrt((j+m+1) * (j-m) * (j+m1+1) * (j-m1)) * (N/2 + 1)/(j*(j+1)) if yCP == 0: collective_pump = 0.0 else: collective_pump = yCP * \ np.sqrt((j-m) * (j+m+1) * (j+m1+1) * (j-m1)) g8 = pump + collective_pump return g8 @cython.boundscheck(False) @cython.wraparound(False) cpdef complex gamma9(self, tuple jmm1): """ Calculate gamma9 for given j, m, m'. """ cdef float j, m, m1 cdef float yP cdef float N cdef complex g9 j, m, m1 = jmm1 N = float(self.N) yP = self.pumping if (yP == 0): g9 = 0.0 else: g9 = yP/2 * np.sqrt((j+m+1) * (j+m+2) * (j+m1+1) * (j+m1+2)) * (N/2 - j)/((j+1) * (2*j+1)) return g9 qutip-4.4.1/qutip/cy/ptrace.pyx000066400000000000000000000234121352460343600164670ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np from qutip.cy.spconvert import zcsr_reshape from qutip.cy.spmath import zcsr_mult from qutip.fastsparse import fast_csr_matrix, csr2fast cimport numpy as cnp cimport cython from libc.math cimport floor, trunc import scipy.sparse as sp @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) def _ptrace_legacy(object rho, _sel): """ Private function calculating the partial trace. """ if np.prod(rho.dims[1]) == 1: rho = rho * rho.dag() cdef size_t mm, ii cdef int _tmp cdef cnp.ndarray[int, ndim=1, mode='c'] drho = np.asarray(rho.dims[0], dtype=np.int32).ravel() if isinstance(_sel, int): _sel = np.array([_sel], dtype=np.int32) else: _sel = np.asarray(_sel, dtype = np.int32) cdef int[::1] sel = _sel for mm in range(sel.shape[0]): if (sel[mm] < 0) or (sel[mm] >= drho.shape[0]): raise TypeError("Invalid selection index in ptrace.") cdef int[::1] rest = np.delete(np.arange(drho.shape[0],dtype=np.int32),sel) cdef int N = np.prod(drho) cdef int M = np.prod(drho.take(sel)) cdef int R = np.prod(drho.take(rest)) cdef int[:,::1] ilistsel = _select(sel, drho, M) cdef int[::1] indsel = _list2ind(ilistsel, drho) cdef int[:,::1] ilistrest = _select(rest, drho, R) cdef int[::1] indrest = _list2ind(ilistrest, drho) for mm in range(indrest.shape[0]): _tmp = indrest[mm] * N + indrest[mm]-1 indrest[mm] = _tmp cdef cnp.ndarray[int, ndim=1, mode='c'] ind = np.zeros(M**2*indrest.shape[0],dtype=np.int32) for mm in range(M**2): for ii in range(indrest.shape[0]): ind[mm*indrest.shape[0]+ii] = indrest[ii] + \ N*indsel[floor(mm / M)] + \ indsel[(mm % M)]+1 data = np.ones_like(ind,dtype=complex) ptr = np.arange(0,(M**2+1)*indrest.shape[0],indrest.shape[0], dtype=np.int32) perm = fast_csr_matrix((data,ind,ptr),shape=(M * M, N * N)) # No need to sort here, will be sorted in reshape rhdata = zcsr_mult(perm, zcsr_reshape(rho.data, np.prod(rho.shape), 1), sorted=0) rho1_data = zcsr_reshape(rhdata, M, M) dims_kept0 = np.asarray(rho.dims[0], dtype=np.int32).take(sel) rho1_dims = [dims_kept0.tolist(), dims_kept0.tolist()] rho1_shape = [np.prod(dims_kept0), np.prod(dims_kept0)] return rho1_data, rho1_dims, rho1_shape @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cpdef cnp.ndarray[int, ndim=1, mode='c'] _list2ind(int[:,::1] ilist, int[::1] dims): """! Private function returning indicies """ cdef size_t kk, ll cdef int[::1] fact = np.ones(dims.shape[0],dtype=np.int32) for kk in range(dims.shape[0]): for ll in range(kk+1,dims.shape[0]): fact[kk] *= dims[ll] # If we make ilist a csr_matrix, then this is just spmv then sort return np.sort(np.dot(ilist, fact), 0) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cpdef cnp.ndarray[int, ndim=2, mode='c'] _select(int[::1] sel, int[::1] dims, int M): """ Private function finding selected components """ cdef size_t ii, jj, kk cdef int _sel, _prd cdef cnp.ndarray[int, ndim=2, mode='c'] ilist = np.zeros((M, dims.shape[0]), dtype=np.int32) for jj in range(sel.shape[0]): _sel = sel[jj] _prd = 1 for kk in range(jj+1,sel.shape[0]): _prd *= dims[sel[kk]] for ii in range(M): ilist[ii, _sel] = (trunc(ii / _prd) % dims[_sel]) return ilist @cython.boundscheck(False) @cython.wraparound(False) cdef int _in(int val, int[::1] vec): # val in vec in pure cython cdef int ii for ii in range(vec.shape[0]): if val == vec[ii]: return 1 return 0 @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void _i2_k_t(int N, int[:, ::1] tensor_table, int[::1] out): # indices determining function for ptrace cdef int ii, t1, t2 out[0] = 0 out[1] = 0 for ii in range(tensor_table.shape[1]): t1 = tensor_table[0, ii] t2 = N / t1 N = N % t1 out[0] += tensor_table[1, ii] * t2 out[1] += tensor_table[2, ii] * t2 @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) def _ptrace(object rho, sel): # work for N<= 26 on 16G Ram cdef int[::1] _sel cdef object _oper cdef size_t ii cdef size_t factor_keep = 1, factor_trace = 1, factor_tensor = 1 cdef cnp.ndarray[int, ndim=1, mode='c'] drho = np.asarray(rho.dims[0], dtype=np.int32).ravel() cdef int num_dims = drho.shape[0] cdef int[:, ::1] tensor_table = np.zeros((3, num_dims), dtype=np.int32) if isinstance(sel, int): _sel = np.array([sel], dtype=np.int32) else: _sel = np.asarray(sel, dtype=np.int32) for ii in range(_sel.shape[0]): if _sel[ii] < 0 or _sel[ii] >= num_dims: raise TypeError("Invalid selection index in ptrace.") if np.prod(rho.shape[1]) == 1: _oper = (rho * rho.dag()).data else: _oper = rho.data for ii in range(num_dims-1,-1,-1): tensor_table[0, ii] = factor_tensor factor_tensor *= drho[ii] if _in(ii, _sel): tensor_table[1, ii] = factor_keep factor_keep *= drho[ii] else: tensor_table[2, ii] = factor_trace factor_trace *= drho[ii] dims_kept0 = drho.take(_sel).tolist() rho1_dims = [dims_kept0, dims_kept0] rho1_shape = [np.prod(dims_kept0), np.prod(dims_kept0)] # Try to evaluate how sparse the result will be. if factor_keep*factor_keep > _oper.nnz: return csr2fast(_ptrace_core_sp(_oper, tensor_table, factor_keep)), rho1_dims, rho1_shape else: return csr2fast(_ptrace_core_dense(_oper, tensor_table, factor_keep)), rho1_dims, rho1_shape @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef object _ptrace_core_sp(rho, int[:, ::1] tensor_table, int num_sel_dims): cdef int p = 0, nnz = rho.nnz, ii, jj, nrow = rho.shape[0] cdef int[::1] pos_c = np.empty(2, dtype=np.int32) cdef int[::1] pos_r = np.empty(2, dtype=np.int32) cdef cnp.ndarray[complex, ndim=1, mode='c'] new_data = np.zeros(nnz, dtype=complex) cdef cnp.ndarray[int, ndim=1, mode='c'] new_col = np.zeros(nnz, dtype=np.int32) cdef cnp.ndarray[int, ndim=1, mode='c'] new_row = np.zeros(nnz, dtype=np.int32) cdef cnp.ndarray[complex, ndim=1, mode='c'] data = rho.data cdef cnp.ndarray[int, ndim=1, mode='c'] ptr = rho.indptr cdef cnp.ndarray[int, ndim=1, mode='c'] ind = rho.indices for ii in range(nrow): for jj in range(ptr[ii], ptr[ii+1]): _i2_k_t(ind[jj], tensor_table, pos_c) _i2_k_t(ii, tensor_table, pos_r) if pos_c[1] == pos_r[1]: new_data[p] = data[jj] new_row[p] = (pos_r[0]) new_col[p] = (pos_c[0]) p += 1 return sp.coo_matrix((new_data, [new_row, new_col]), shape=(num_sel_dims,num_sel_dims)).tocsr() @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef object _ptrace_core_dense(rho, int[:, ::1] tensor_table, int num_sel_dims): cdef int nnz = rho.nnz, ii, jj, nrow = rho.shape[0] cdef int[::1] pos_c = np.empty(2, dtype=np.int32) cdef int[::1] pos_r = np.empty(2, dtype=np.int32) cdef cnp.ndarray[complex, ndim=1, mode='c'] data = rho.data cdef cnp.ndarray[int, ndim=1, mode='c'] ptr = rho.indptr cdef cnp.ndarray[int, ndim=1, mode='c'] ind = rho.indices cdef complex[:, ::1] data_mat = np.zeros((num_sel_dims, num_sel_dims), dtype=complex) for ii in range(nrow): for jj in range(ptr[ii], ptr[ii+1]): _i2_k_t(ind[jj], tensor_table, pos_c) _i2_k_t(ii, tensor_table, pos_r) if pos_c[1] == pos_r[1]: data_mat[pos_r[0], pos_c[0]] += data[jj] return sp.coo_matrix(data_mat).tocsr() qutip-4.4.1/qutip/cy/pyxbuilder.py000066400000000000000000000053441352460343600172140ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import sys, os import pyximport from pyximport import install old_get_distutils_extension = pyximport.pyximport.get_distutils_extension def new_get_distutils_extension(modname, pyxfilename, language_level=None): extension_mod, setup_args = old_get_distutils_extension(modname, pyxfilename, language_level) extension_mod.language='c++' # If on Win and Python version >= 3.5 and not in MSYS2 (i.e. Visual studio compile) if sys.platform == 'win32' and int(str(sys.version_info[0])+str(sys.version_info[1])) >= 35 and os.environ.get('MSYSTEM') is None: extension_mod.extra_compile_args = ['/w', '/O1'] else: extension_mod.extra_compile_args = ['-w', '-O1'] if sys.platform == 'darwin': extension_mod.extra_compile_args.append('-mmacosx-version-min=10.9') extension_mod.extra_link_args = ['-mmacosx-version-min=10.9'] return extension_mod,setup_args pyximport.pyximport.get_distutils_extension = new_get_distutils_extension qutip-4.4.1/qutip/cy/sparse_routines.pxi000066400000000000000000000471631352460343600204270ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np from scipy.sparse import coo_matrix from qutip.fastsparse import fast_csr_matrix cimport numpy as np cimport cython from libcpp.algorithm cimport sort from libcpp.vector cimport vector from qutip.cy.sparse_structs cimport CSR_Matrix, COO_Matrix np.import_array() cdef extern from "numpy/arrayobject.h" nogil: void PyArray_ENABLEFLAGS(np.ndarray arr, int flags) void PyDataMem_FREE(void * ptr) void PyDataMem_RENEW(void * ptr, size_t size) void PyDataMem_NEW_ZEROED(size_t size, size_t elsize) void PyDataMem_NEW(size_t size) #Struct used for CSR indices sorting cdef struct _data_ind_pair: double complex data int ind ctypedef _data_ind_pair data_ind_pair ctypedef int (*cfptr)(data_ind_pair, data_ind_pair) cdef void raise_error_CSR(int E, CSR_Matrix * C = NULL): if not C.numpy_lock and C != NULL: free_CSR(C) if E == -1: raise MemoryError('Could not allocate memory.') elif E == -2: raise Exception('Error manipulating CSR_Matrix structure.') elif E == -3: raise Exception('CSR_Matrix is not initialized.') elif E == -4: raise Exception('NumPy already has lock on data.') elif E == -5: raise Exception('Cannot expand data structures past max_length.') elif E == -6: raise Exception('CSR_Matrix cannot be expanded.') elif E == -7: raise Exception('Data length cannot be larger than max_length') else: raise Exception('Error in Cython code.') cdef void raise_error_COO(int E, COO_Matrix * C = NULL): if not C.numpy_lock and C != NULL: free_COO(C) if E == -1: raise MemoryError('Could not allocate memory.') elif E == -2: raise Exception('Error manipulating COO_Matrix structure.') elif E == -3: raise Exception('COO_Matrix is not initialized.') elif E == -4: raise Exception('NumPy already has lock on data.') elif E == -5: raise Exception('Cannot expand data structures past max_length.') elif E == -6: raise Exception('COO_Matrix cannot be expanded.') elif E == -7: raise Exception('Data length cannot be larger than max_length') else: raise Exception('Error in Cython code.') cdef inline int int_min(int a, int b) nogil: return b if b < a else a cdef inline int int_max(int a, int b) nogil: return a if a > b else b @cython.boundscheck(False) @cython.wraparound(False) cdef void init_CSR(CSR_Matrix * mat, int nnz, int nrows, int ncols = 0, int max_length = 0, int init_zeros = 1): """ Initialize CSR_Matrix struct. Matrix is assumed to be square with shape nrows x nrows. Manually set mat.ncols otherwise Parameters ---------- mat : CSR_Matrix * Pointer to struct. nnz : int Length of data and indices arrays. Also number of nonzero elements nrows : int Number of rows in matrix. Also gives length of indptr array (nrows+1). ncols : int (default = 0) Number of cols in matrix. Default is ncols = nrows. max_length : int (default = 0) Maximum length of data and indices arrays. Used for resizing. Default value of zero indicates no resizing. """ if max_length == 0: max_length = nnz if nnz > max_length: raise_error_CSR(-7, mat) if init_zeros: mat.data = PyDataMem_NEW_ZEROED(nnz, sizeof(double complex)) else: mat.data = PyDataMem_NEW(nnz * sizeof(double complex)) if mat.data == NULL: raise_error_CSR(-1, mat) if init_zeros: mat.indices = PyDataMem_NEW_ZEROED(nnz, sizeof(int)) mat.indptr = PyDataMem_NEW_ZEROED((nrows+1), sizeof(int)) else: mat.indices = PyDataMem_NEW(nnz * sizeof(int)) mat.indptr = PyDataMem_NEW((nrows+1) * sizeof(int)) mat.nnz = nnz mat.nrows = nrows if ncols == 0: mat.ncols = nrows else: mat.ncols = ncols mat.is_set = 1 mat.max_length = max_length mat.numpy_lock = 0 @cython.boundscheck(False) @cython.wraparound(False) cdef void copy_CSR(CSR_Matrix * out, CSR_Matrix * mat): """ Copy a CSR_Matrix. """ cdef size_t kk if not mat.is_set: raise_error_CSR(-3) elif out.is_set: raise_error_CSR(-2) init_CSR(out, mat.nnz, mat.nrows, mat.nrows, mat.max_length) # We cannot use memcpy here since there are issues with # doing so on Win with the GCC compiler for kk in range(mat.nnz): out.data[kk] = mat.data[kk] out.indices[kk] = mat.indices[kk] for kk in range(mat.nrows+1): out.indptr[kk] = mat.indptr[kk] @cython.boundscheck(False) @cython.wraparound(False) cdef void init_COO(COO_Matrix * mat, int nnz, int nrows, int ncols = 0, int max_length = 0, int init_zeros = 1): """ Initialize COO_Matrix struct. Matrix is assumed to be square with shape nrows x nrows. Manually set mat.ncols otherwise Parameters ---------- mat : COO_Matrix * Pointer to struct. nnz : int Number of nonzero elements. nrows : int Number of rows in matrix. nrows : int (default = 0) Number of cols in matrix. Default is ncols = nrows. max_length : int (default = 0) Maximum length of arrays. Used for resizing. Default value of zero indicates no resizing. """ if max_length == 0: max_length = nnz if nnz > max_length: raise_error_COO(-7, mat) if init_zeros: mat.data = PyDataMem_NEW_ZEROED(nnz, sizeof(double complex)) else: mat.data = PyDataMem_NEW(nnz * sizeof(double complex)) if mat.data == NULL: raise_error_COO(-1, mat) if init_zeros: mat.rows = PyDataMem_NEW_ZEROED(nnz, sizeof(int)) mat.cols = PyDataMem_NEW_ZEROED(nnz, sizeof(int)) else: mat.rows = PyDataMem_NEW(nnz * sizeof(int)) mat.cols = PyDataMem_NEW(nnz * sizeof(int)) mat.nnz = nnz mat.nrows = nrows if ncols == 0: mat.ncols = nrows else: mat.ncols = ncols mat.is_set = 1 mat.max_length = max_length mat.numpy_lock = 0 @cython.boundscheck(False) @cython.wraparound(False) cdef void free_CSR(CSR_Matrix * mat): """ Manually free CSR_Matrix data structures if data is not locked by NumPy. """ if not mat.numpy_lock and mat.is_set: if mat.data != NULL: PyDataMem_FREE(mat.data) if mat.indices != NULL: PyDataMem_FREE(mat.indices) if mat.indptr != NULL: PyDataMem_FREE(mat.indptr) mat.is_set = 0 else: raise_error_CSR(-2) @cython.boundscheck(False) @cython.wraparound(False) cdef void free_COO(COO_Matrix * mat): """ Manually free COO_Matrix data structures if data is not locked by NumPy. """ if not mat.numpy_lock and mat.is_set: if mat.data != NULL: PyDataMem_FREE(mat.data) if mat.rows != NULL: PyDataMem_FREE(mat.rows) if mat.cols != NULL: PyDataMem_FREE(mat.cols) mat.is_set = 0 else: raise_error_COO(-2) @cython.boundscheck(False) @cython.wraparound(False) cdef void shorten_CSR(CSR_Matrix * mat, int N): """ Shortends the length of CSR data and indices arrays. """ if (not mat.numpy_lock) and mat.is_set: mat.data = PyDataMem_RENEW(mat.data, N * sizeof(double complex)) mat.indices = PyDataMem_RENEW(mat.indices, N * sizeof(int)) mat.nnz = N else: if mat.numpy_lock: raise_error_CSR(-4, mat) elif not mat.is_set: raise_error_CSR(-3, mat) @cython.boundscheck(False) @cython.wraparound(False) cdef void expand_CSR(CSR_Matrix * mat, int init_zeros=0): """ Expands the length of CSR data and indices arrays to accomodate more nnz. THIS IS CURRENTLY NOT USED """ cdef size_t ii cdef int new_size if mat.nnz == mat.max_length: raise_error_CSR(-5, mat) #Cannot expand data past max_length. elif (not mat.numpy_lock) and mat.is_set: new_size = int_min(2*mat.nnz, mat.max_length) new_data = PyDataMem_RENEW(mat.data, new_size * sizeof(double complex)) if new_data == NULL: raise_error_CSR(-1, mat) else: mat.data = new_data if init_zeros == 1: for ii in range(mat.nnz, new_size): mat.data[ii] = 0 new_ind = PyDataMem_RENEW(mat.indices, new_size * sizeof(int)) mat.indices = new_ind if init_zeros == 1: for ii in range(mat.nnz, new_size): mat.indices[ii] = 0 mat.nnz = new_size else: if mat.numpy_lock: raise_error_CSR(-4, mat) elif not mat.is_set: raise_error_CSR(-3, mat) @cython.boundscheck(False) @cython.wraparound(False) cdef object CSR_to_scipy(CSR_Matrix * mat): """ Converts a CSR_Matrix struct to a SciPy csr_matrix class object. The NumPy arrays are generated from the pointers, and the lifetime of the pointer memory is tied to that of the NumPy array (i.e. automatic garbage cleanup.) Parameters ---------- mat : CSR_Matrix * Pointer to CSR_Matrix. """ cdef np.npy_intp dat_len, ptr_len cdef np.ndarray[complex, ndim=1] _data cdef np.ndarray[int, ndim=1] _ind, _ptr if (not mat.numpy_lock) and mat.is_set: dat_len = mat.nnz ptr_len = mat.nrows+1 _data = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_COMPLEX128, mat.data) PyArray_ENABLEFLAGS(_data, np.NPY_OWNDATA) _ind = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_INT32, mat.indices) PyArray_ENABLEFLAGS(_ind, np.NPY_OWNDATA) _ptr = np.PyArray_SimpleNewFromData(1, &ptr_len, np.NPY_INT32, mat.indptr) PyArray_ENABLEFLAGS(_ptr, np.NPY_OWNDATA) mat.numpy_lock = 1 return fast_csr_matrix((_data, _ind, _ptr), shape=(mat.nrows,mat.ncols)) else: if mat.numpy_lock: raise_error_CSR(-4) elif not mat.is_set: raise_error_CSR(-3) @cython.boundscheck(False) @cython.wraparound(False) cdef object COO_to_scipy(COO_Matrix * mat): """ Converts a COO_Matrix struct to a SciPy coo_matrix class object. The NumPy arrays are generated from the pointers, and the lifetime of the pointer memory is tied to that of the NumPy array (i.e. automatic garbage cleanup.) Parameters ---------- mat : COO_Matrix * Pointer to COO_Matrix. """ cdef np.npy_intp dat_len cdef np.ndarray[complex, ndim=1] _data cdef np.ndarray[int, ndim=1] _row, _col if (not mat.numpy_lock) and mat.is_set: dat_len = mat.nnz _data = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_COMPLEX128, mat.data) PyArray_ENABLEFLAGS(_data, np.NPY_OWNDATA) _row = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_INT32, mat.rows) PyArray_ENABLEFLAGS(_row, np.NPY_OWNDATA) _col = np.PyArray_SimpleNewFromData(1, &dat_len, np.NPY_INT32, mat.cols) PyArray_ENABLEFLAGS(_col, np.NPY_OWNDATA) mat.numpy_lock = 1 return coo_matrix((_data, (_row, _col)), shape=(mat.nrows,mat.ncols)) else: if mat.numpy_lock: raise_error_COO(-4) elif not mat.is_set: raise_error_COO(-3) @cython.boundscheck(False) @cython.wraparound(False) cdef void COO_to_CSR(CSR_Matrix * out, COO_Matrix * mat): """ Conversion from COO to CSR. Not in place, but result is sorted correctly. """ cdef int i, j, iad, j0 cdef double complex val cdef size_t kk init_CSR(out, mat.nnz, mat.nrows, mat.ncols, max_length=0, init_zeros=1) # Determine row lengths for kk in range(mat.nnz): out.indptr[mat.rows[kk]] = out.indptr[mat.rows[kk]] + 1 # Starting position of rows j = 0 for kk in range(mat.nrows): j0 = out.indptr[kk] out.indptr[kk] = j j += j0 #Do the data for kk in range(mat.nnz): i = mat.rows[kk] j = mat.cols[kk] val = mat.data[kk] iad = out.indptr[i] out.data[iad] = val out.indices[iad] = j out.indptr[i] = iad+1 # Shift back for kk in range(mat.nrows,0,-1): out.indptr[kk] = out.indptr[kk-1] out.indptr[0] = 0 @cython.boundscheck(False) @cython.wraparound(False) cdef void CSR_to_COO(COO_Matrix * out, CSR_Matrix * mat): """ Converts a CSR_Matrix to a COO_Matrix. """ cdef int k1, k2 cdef size_t jj, kk init_COO(out, mat.nnz, mat.nrows, mat.ncols) for kk in range(mat.nnz): out.data[kk] = mat.data[kk] out.cols[kk] = mat.indices[kk] for kk in range(mat.nrows-1,0,-1): k1 = mat.indptr[kk+1] k2 = mat.indptr[kk] for jj in range(k2, k1): out.rows[jj] = kk @cython.boundscheck(False) @cython.wraparound(False) cdef void COO_to_CSR_inplace(CSR_Matrix * out, COO_Matrix * mat): """ In place conversion from COO to CSR. In place, but not sorted. The length of the COO (data,rows,cols) must be equal to the NNZ in the final matrix (i.e. no padded zeros on ends of arrays). """ cdef size_t kk cdef int i, j, init, inext, jnext, ipos cdef int * _tmp_rows cdef complex val, val_next cdef int * work = PyDataMem_NEW_ZEROED(mat.nrows+1, sizeof(int)) # Determine output indptr array for kk in range(mat.nnz): i = mat.rows[kk] work[i+1] += 1 work[0] = 0 for kk in range(mat.nrows): work[kk+1] += work[kk] if mat.nnz < (mat.nrows+1): _tmp_rows = PyDataMem_RENEW(mat.rows, (mat.nrows+1) * sizeof(int)) mat.rows = _tmp_rows init = 0 while init < mat.nnz: while (mat.rows[init] < 0): init += 1 val = mat.data[init] i = mat.rows[init] j = mat.cols[init] mat.rows[init] = -1 while 1: ipos = work[i] val_next = mat.data[ipos] inext = mat.rows[ipos] jnext = mat.cols[ipos] mat.data[ipos] = val mat.cols[ipos] = j mat.rows[ipos] = -1 work[i] += 1 if inext < 0: break val = val_next i = inext j = jnext init += 1 for kk in range(mat.nrows): mat.rows[kk+1] = work[kk] mat.rows[0] = 0 if mat.nnz > (mat.nrows+1): _tmp_rows = PyDataMem_RENEW(mat.rows, (mat.nrows+1) * sizeof(int)) mat.rows = _tmp_rows #Free working array PyDataMem_FREE(work) #Set CSR pointers to original COO data. out.data = mat.data out.indices = mat.cols out.indptr = mat.rows out.nrows = mat.nrows out.ncols = mat.ncols out.nnz = mat.nnz out.max_length = mat.nnz out.is_set = 1 out.numpy_lock = 0 @cython.boundscheck(False) @cython.wraparound(False) cdef int ind_sort(data_ind_pair x, data_ind_pair y): return x.ind < y.ind @cython.boundscheck(False) @cython.wraparound(False) cdef void sort_indices(CSR_Matrix * mat): """ Sorts the indices of a CSR_Matrix inplace. """ cdef size_t ii, jj cdef vector[data_ind_pair] pairs cdef cfptr cfptr_ = &ind_sort cdef int row_start, row_end, length for ii in range(mat.nrows): row_start = mat.indptr[ii] row_end = mat.indptr[ii+1] length = row_end - row_start pairs.resize(length) for jj in range(length): pairs[jj].data = mat.data[row_start+jj] pairs[jj].ind = mat.indices[row_start+jj] sort(pairs.begin(),pairs.end(),cfptr_) for jj in range(length): mat.data[row_start+jj] = pairs[jj].data mat.indices[row_start+jj] = pairs[jj].ind @cython.boundscheck(False) @cython.wraparound(False) cdef CSR_Matrix CSR_from_scipy(object A): """ Converts a SciPy CSR sparse matrix to a CSR_Matrix struct. """ cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef int nrows = A.shape[0] cdef int ncols = A.shape[1] cdef int nnz = ptr[nrows] cdef CSR_Matrix mat mat.data = &data[0] mat.indices = &ind[0] mat.indptr = &ptr[0] mat.nrows = nrows mat.ncols = ncols mat.nnz = nnz mat.max_length = nnz mat.is_set = 1 mat.numpy_lock = 1 return mat @cython.boundscheck(False) @cython.wraparound(False) cdef void CSR_from_scipy_inplace(object A, CSR_Matrix* mat): """ Converts a SciPy CSR sparse matrix to a CSR_Matrix struct. """ cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef int nrows = A.shape[0] cdef int ncols = A.shape[1] cdef int nnz = ptr[nrows] mat.data = &data[0] mat.indices = &ind[0] mat.indptr = &ptr[0] mat.nrows = nrows mat.ncols = ncols mat.nnz = nnz mat.max_length = nnz mat.is_set = 1 mat.numpy_lock = 1 @cython.boundscheck(False) @cython.wraparound(False) cdef COO_Matrix COO_from_scipy(object A): """ Converts a SciPy COO sparse matrix to a COO_Matrix struct. """ cdef complex[::1] data = A.data cdef int[::1] rows = A.row cdef int[::1] cols = A.col cdef int nrows = A.shape[0] cdef int ncols = A.shape[1] cdef int nnz = data.shape[0] cdef COO_Matrix mat mat.data = &data[0] mat.rows = &rows[0] mat.cols = &cols[0] mat.nrows = nrows mat.ncols = ncols mat.nnz = nnz mat.max_length = nnz mat.is_set = 1 mat.numpy_lock = 1 return mat @cython.boundscheck(False) @cython.wraparound(False) cdef void identity_CSR(CSR_Matrix * mat, unsigned int nrows): cdef size_t kk init_CSR(mat, nrows, nrows, nrows, 0, 0) for kk in range(nrows): mat.data[kk] = 1 mat.indices[kk] = kk mat.indptr[kk] = kk mat.indptr[nrows] = nrows qutip-4.4.1/qutip/cy/sparse_structs.pxd000066400000000000000000000042751352460343600202560ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cdef struct _csr_mat: double complex * data int * indices int * indptr int nnz int nrows int ncols int is_set int max_length int numpy_lock cdef struct _coo_mat: double complex * data int * rows int * cols int nnz int nrows int ncols int is_set int max_length int numpy_lock ctypedef _csr_mat CSR_Matrix ctypedef _coo_mat COO_Matrix qutip-4.4.1/qutip/cy/sparse_utils.pyx000066400000000000000000000275041352460343600177340ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np from qutip.fastsparse import fast_csr_matrix cimport numpy as cnp from libc.math cimport abs, fabs, sqrt from libcpp cimport bool cimport cython cnp.import_array() cdef extern from "numpy/arrayobject.h" nogil: void PyArray_ENABLEFLAGS(cnp.ndarray arr, int flags) void PyDataMem_FREE(void * ptr) void PyDataMem_RENEW(void * ptr, size_t size) void PyDataMem_NEW_ZEROED(size_t size, size_t elsize) void PyDataMem_NEW(size_t size) cdef extern from "" namespace "std" nogil: double abs(double complex x) double real(double complex x) double imag(double complex x) cdef extern from "" namespace "std" nogil: double cabs "abs" (double complex x) cdef inline int int_max(int x, int y): return x ^ ((x ^ y) & -(x < y)) include "parameters.pxi" @cython.boundscheck(False) @cython.wraparound(False) def _sparse_bandwidth( int[::1] idx, int[::1] ptr, int nrows): """ Calculates the max (mb), lower(lb), and upper(ub) bandwidths of a csr_matrix. """ cdef int ldist cdef int lb = -nrows cdef int ub = -nrows cdef int mb = 0 cdef size_t ii, jj for ii in range(nrows): for jj in range(ptr[ii], ptr[ii + 1]): ldist = ii - idx[jj] lb = int_max(lb, ldist) ub = int_max(ub, -ldist) mb = int_max(mb, ub + lb + 1) return mb, lb, ub @cython.boundscheck(False) @cython.wraparound(False) def _sparse_profile(int[::1] idx, int[::1] ptr, int nrows): cdef int ii, jj, temp, ldist=0 cdef LTYPE_t pro = 0 for ii in range(nrows): temp = 0 for jj in range(ptr[ii], ptr[ii + 1]): ldist = idx[jj] - ii temp = int_max(temp, ldist) pro += temp return pro @cython.boundscheck(False) @cython.wraparound(False) def _sparse_permute( cnp.ndarray[cython.numeric, ndim=1] data, int[::1] idx, int[::1] ptr, int nrows, int ncols, cnp.ndarray[ITYPE_t, ndim=1] rperm, cnp.ndarray[ITYPE_t, ndim=1] cperm, int flag): """ Permutes the rows and columns of a sparse CSR or CSC matrix according to the permutation arrays rperm and cperm, respectively. Here, the permutation arrays specify the new order of the rows and columns. i.e. [0,1,2,3,4] -> [3,0,4,1,2]. """ cdef int ii, jj, kk, k0, nnz cdef cnp.ndarray[cython.numeric] new_data = np.zeros_like(data) cdef cnp.ndarray[ITYPE_t] new_idx = np.zeros_like(idx) cdef cnp.ndarray[ITYPE_t] new_ptr = np.zeros_like(ptr) cdef cnp.ndarray[ITYPE_t] perm_r cdef cnp.ndarray[ITYPE_t] perm_c cdef cnp.ndarray[ITYPE_t] inds if flag == 0: # CSR matrix if rperm.shape[0] != 0: inds = np.argsort(rperm).astype(ITYPE) perm_r = np.arange(rperm.shape[0], dtype=ITYPE)[inds] for jj in range(nrows): ii = perm_r[jj] new_ptr[ii + 1] = ptr[jj + 1] - ptr[jj] for jj in range(nrows): new_ptr[jj + 1] = new_ptr[jj+1] + new_ptr[jj] for jj in range(nrows): k0 = new_ptr[perm_r[jj]] for kk in range(ptr[jj], ptr[jj + 1]): new_idx[k0] = idx[kk] new_data[k0] = data[kk] k0 = k0 + 1 if cperm.shape[0] != 0: inds = np.argsort(cperm).astype(ITYPE) perm_c = np.arange(cperm.shape[0], dtype=ITYPE)[inds] nnz = new_ptr[new_ptr.shape[0] - 1] for jj in range(nnz): new_idx[jj] = perm_c[new_idx[jj]] elif flag == 1: # CSC matrix if cperm.shape[0] != 0: inds = np.argsort(cperm).astype(ITYPE) perm_c = np.arange(cperm.shape[0], dtype=ITYPE)[inds] for jj in range(ncols): ii = perm_c[jj] new_ptr[ii + 1] = ptr[jj + 1] - ptr[jj] for jj in range(ncols): new_ptr[jj + 1] = new_ptr[jj + 1] + new_ptr[jj] for jj in range(ncols): k0 = new_ptr[perm_c[jj]] for kk in range(ptr[jj], ptr[jj + 1]): new_idx[k0] = idx[kk] new_data[k0] = data[kk] k0 = k0 + 1 if rperm.shape[0] != 0: inds = np.argsort(rperm).astype(ITYPE) perm_r = np.arange(rperm.shape[0], dtype=ITYPE)[inds] nnz = new_ptr[new_ptr.shape[0] - 1] for jj in range(nnz): new_idx[jj] = perm_r[new_idx[jj]] return new_data, new_idx, new_ptr @cython.boundscheck(False) @cython.wraparound(False) def _sparse_reverse_permute( cnp.ndarray[cython.numeric, ndim=1] data, int[::1] idx, int[::1] ptr, int nrows, int ncols, cnp.ndarray[ITYPE_t, ndim=1] rperm, cnp.ndarray[ITYPE_t, ndim=1] cperm, int flag): """ Reverse permutes the rows and columns of a sparse CSR or CSC matrix according to the original permutation arrays rperm and cperm, respectively. """ cdef int ii, jj, kk, k0, nnz cdef cnp.ndarray[cython.numeric, ndim=1] new_data = np.zeros_like(data) cdef cnp.ndarray[ITYPE_t, ndim=1] new_idx = np.zeros_like(idx) cdef cnp.ndarray[ITYPE_t, ndim=1] new_ptr = np.zeros_like(ptr) if flag == 0: # CSR matrix if rperm.shape[0] != 0: for jj in range(nrows): ii = rperm[jj] new_ptr[ii + 1] = ptr[jj + 1] - ptr[jj] for jj in range(nrows): new_ptr[jj + 1] = new_ptr[jj + 1] + new_ptr[jj] for jj in range(nrows): k0 = new_ptr[rperm[jj]] for kk in range(ptr[jj], ptr[jj + 1]): new_idx[k0] = idx[kk] new_data[k0] = data[kk] k0 = k0 + 1 if cperm.shape[0] > 0: nnz = new_ptr[new_ptr.shape[0] - 1] for jj in range(nnz): new_idx[jj] = cperm[new_idx[jj]] if flag == 1: # CSC matrix if cperm.shape[0] != 0: for jj in range(ncols): ii = cperm[jj] new_ptr[ii + 1] = ptr[jj + 1] - ptr[jj] for jj in range(ncols): new_ptr[jj + 1] = new_ptr[jj + 1] + new_ptr[jj] for jj in range(ncols): k0 = new_ptr[cperm[jj]] for kk in range(ptr[jj], ptr[jj + 1]): new_idx[k0] = idx[kk] new_data[k0] = data[kk] k0 = k0 + 1 if cperm.shape[0] != 0: nnz = new_ptr[new_ptr.shape[0] - 1] for jj in range(nnz): new_idx[jj] = rperm[new_idx[jj]] return new_data, new_idx, new_ptr @cython.boundscheck(False) @cython.wraparound(False) def _isdiag(int[::1] idx, int[::1] ptr, int nrows): cdef int row, num_elems for row in range(nrows): num_elems = ptr[row+1] - ptr[row] if num_elems > 1: return 0 elif num_elems == 1: if idx[ptr[row]] != row: return 0 return 1 @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode='c'] _csr_get_diag(complex[::1] data, int[::1] idx, int[::1] ptr, int k=0): cdef size_t row, jj cdef int num_rows = ptr.shape[0]-1 cdef int abs_k = abs(k) cdef int start, stop cdef cnp.ndarray[complex, ndim=1, mode='c'] out = np.zeros(num_rows-abs_k, dtype=complex) if k >= 0: start = 0 stop = num_rows-abs_k else: #k < 0 start = abs_k stop = num_rows for row in range(start, stop): for jj in range(ptr[row], ptr[row+1]): if idx[jj]-k == row: out[row-start] = data[jj] break return out @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) def unit_row_norm(complex[::1] data, int[::1] ptr, int nrows): cdef size_t row, ii cdef double total for row in range(nrows): total = 0 for ii in range(ptr[row], ptr[row+1]): total += real(data[ii]) * real(data[ii]) + imag(data[ii]) * imag(data[ii]) total = sqrt(total) for ii in range(ptr[row], ptr[row+1]): data[ii] /= total @cython.boundscheck(False) @cython.wraparound(False) cpdef double zcsr_one_norm(complex[::1] data, int[::1] ind, int[::1] ptr, int nrows, int ncols): cdef int k cdef size_t ii, jj cdef double * col_sum = PyDataMem_NEW_ZEROED(ncols, sizeof(double)) cdef double max_col = 0 for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): k = ind[jj] col_sum[k] += cabs(data[jj]) for ii in range(ncols): if col_sum[ii] > max_col: max_col = col_sum[ii] PyDataMem_FREE(col_sum) return max_col @cython.boundscheck(False) @cython.wraparound(False) cpdef double zcsr_inf_norm(complex[::1] data, int[::1] ind, int[::1] ptr, int nrows, int ncols): cdef int k cdef size_t ii, jj cdef double * row_sum = PyDataMem_NEW_ZEROED(nrows, sizeof(double)) cdef double max_row = 0 for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): row_sum[ii] += cabs(data[jj]) for ii in range(nrows): if row_sum[ii] > max_row: max_row = row_sum[ii] PyDataMem_FREE(row_sum) return max_row @cython.boundscheck(False) @cython.wraparound(False) cpdef bool cy_tidyup(complex[::1] data, double atol, unsigned int nnz): """ Performs an in-place tidyup of CSR matrix data """ cdef size_t kk cdef double re, im cdef bool re_flag, im_flag, out_flag = 0 for kk in range(nnz): re_flag = 0 im_flag = 0 re = real(data[kk]) im = imag(data[kk]) if fabs(re) < atol: re = 0 re_flag = 1 if fabs(im) < atol: im = 0 im_flag = 1 if re_flag or im_flag: data[kk] = re + 1j*im if re_flag and im_flag: out_flag = 1 return out_flag qutip-4.4.1/qutip/cy/spconvert.pxd000066400000000000000000000037401352460343600172110ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### from qutip.cy.sparse_structs cimport CSR_Matrix cdef void fdense2D_to_CSR(complex[::1, :] mat, CSR_Matrix * out, unsigned int nrows, unsigned int ncols) qutip-4.4.1/qutip/cy/spconvert.pyx000066400000000000000000000216311352460343600172350ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np from qutip.fastsparse import fast_csr_matrix cimport numpy as cnp cimport cython from libc.stdlib cimport div, malloc, free cdef extern from "stdlib.h": ctypedef struct div_t: int quot int rem include "sparse_routines.pxi" @cython.boundscheck(False) @cython.wraparound(False) def arr_coo2fast(complex[::1] data, int[::1] rows, int[::1] cols, int nrows, int ncols): """ Converts a set of ndarrays (data, rows, cols) that specify a COO sparse matrix to CSR format. """ cdef int nnz = data.shape[0] cdef COO_Matrix mat mat.data = &data[0] mat.rows = &rows[0] mat.cols = &cols[0] mat.nrows = nrows mat.ncols = ncols mat.nnz = nnz mat.is_set = 1 mat.max_length = nnz cdef CSR_Matrix out COO_to_CSR(&out, &mat) return CSR_to_scipy(&out) @cython.boundscheck(False) @cython.wraparound(False) def dense2D_to_fastcsr_cmode(complex[:, ::1] mat, int nrows, int ncols): """ Converts a dense c-mode complex ndarray to a sparse CSR matrix. Parameters ---------- mat : ndarray Input complex ndarray nrows : int Number of rows in matrix. ncols : int Number of cols in matrix. Returns ------- out : fast_csr_matrix Output matrix in CSR format. """ cdef int nnz = 0 cdef size_t ii, jj cdef np.ndarray[complex, ndim=1, mode='c'] data = np.zeros(nrows*ncols, dtype=complex) cdef np.ndarray[int, ndim=1, mode='c'] ind = np.zeros(nrows*ncols, dtype=np.int32) cdef np.ndarray[int, ndim=1, mode='c'] ptr = np.zeros(nrows+1, dtype=np.int32) for ii in range(nrows): for jj in range(ncols): if mat[ii,jj] != 0: ind[nnz] = jj data[nnz] = mat[ii,jj] nnz += 1 ptr[ii+1] = nnz if nnz < (nrows*ncols): return fast_csr_matrix((data[:nnz], ind[:nnz], ptr), shape=(nrows,ncols)) else: return fast_csr_matrix((data, ind, ptr), shape=(nrows,ncols)) @cython.boundscheck(False) @cython.wraparound(False) def dense1D_to_fastcsr_ket(complex[::1] vec): """ Converts a dense c-mode complex ndarray to a sparse CSR matrix. Parameters ---------- mat : ndarray Input complex ndarray Returns ------- out : fast_csr_matrix Output matrix in CSR format. """ cdef int nnz = 0 cdef size_t ii, nrows = vec.shape[0] cdef np.ndarray[complex, ndim=1, mode='c'] data = np.zeros(nrows, dtype=complex) cdef np.ndarray[int, ndim=1, mode='c'] ind = np.zeros(nrows, dtype=np.int32) cdef np.ndarray[int, ndim=1, mode='c'] ptr = np.zeros(nrows+1, dtype=np.int32) for ii in range(nrows): if vec[ii] != 0: data[nnz] = vec[ii] nnz += 1 ptr[ii+1] = nnz if nnz < (nrows): return fast_csr_matrix((data[:nnz], ind[:nnz], ptr), shape=(nrows,1)) else: return fast_csr_matrix((data, ind, ptr), shape=(nrows,1)) @cython.boundscheck(False) @cython.wraparound(False) cdef void fdense2D_to_CSR(complex[::1, :] mat, CSR_Matrix * out, unsigned int nrows, unsigned int ncols): """ Converts a dense complex ndarray to a CSR matrix struct. Parameters ---------- mat : ndarray Input complex ndarray nrows : int Number of rows in matrix. ncols : int Number of cols in matrix. Returns ------- out : CSR_Matrix Output matrix as CSR struct. """ cdef int nnz = 0 cdef size_t ii, jj init_CSR(out, nrows*ncols, nrows, ncols, nrows*ncols) for ii in range(nrows): for jj in range(ncols): if mat[ii,jj] != 0: out.indices[nnz] = jj out.data[nnz] = mat[ii,jj] nnz += 1 out.indptr[ii+1] = nnz if nnz < (nrows*ncols): shorten_CSR(out, nnz) @cython.boundscheck(False) @cython.wraparound(False) def dense2D_to_fastcsr_fmode(complex[::1, :] mat, int nrows, int ncols): """ Converts a dense fortran-mode complex ndarray to a sparse CSR matrix. Parameters ---------- mat : ndarray Input complex ndarray nrows : int Number of rows in matrix. ncols : int Number of cols in matrix. Returns ------- out : fast_csr_matrix Output matrix in CSR format. """ cdef int nnz = 0 cdef size_t ii, jj cdef np.ndarray[complex, ndim=1, mode='c'] data = np.zeros(nrows*ncols, dtype=complex) cdef np.ndarray[int, ndim=1, mode='c'] ind = np.zeros(nrows*ncols, dtype=np.int32) cdef np.ndarray[int, ndim=1, mode='c'] ptr = np.zeros(nrows+1, dtype=np.int32) for ii in range(nrows): for jj in range(ncols): if mat[ii,jj] != 0: ind[nnz] = jj data[nnz] = mat[ii,jj] nnz += 1 ptr[ii+1] = nnz if nnz < (nrows*ncols): return fast_csr_matrix((data[:nnz], ind[:nnz], ptr), shape=(nrows,ncols)) else: return fast_csr_matrix((data, ind, ptr), shape=(nrows,ncols)) @cython.boundscheck(False) @cython.wraparound(False) def zcsr_reshape(object A not None, int new_rows, int new_cols): """ Reshapes a complex CSR matrix. Parameters ---------- A : fast_csr_matrix Input CSR matrix. new_rows : int Number of rows in reshaped matrix. new_cols : int Number of cols in reshaped matrix. Returns ------- out : fast_csr_matrix Reshaped CSR matrix. Notes ----- This routine does not need to make a temp. copy of the matrix. """ cdef CSR_Matrix inmat = CSR_from_scipy(A) cdef COO_Matrix mat CSR_to_COO(&mat, &inmat) cdef CSR_Matrix out cdef div_t new_inds cdef size_t kk if (mat.nrows * mat.ncols) != (new_rows * new_cols): raise Exception('Total size of array must be unchanged.') for kk in range(mat.nnz): new_inds = div(mat.ncols*mat.rows[kk]+mat.cols[kk], new_cols) mat.rows[kk] = new_inds.quot mat.cols[kk] = new_inds.rem mat.nrows = new_rows mat.ncols = new_cols COO_to_CSR_inplace(&out, &mat) sort_indices(&out) return CSR_to_scipy(&out) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) def cy_index_permute(int [::1] idx_arr, int [::1] dims, int [::1] order): cdef int ndims = dims.shape[0] cdef int ii, n, dim, idx, orderr #the fastest way to allocate memory for a temporary array cdef int * multi_idx = malloc(sizeof(int) * ndims) try: for ii from 0 <= ii < idx_arr.shape[0]: idx = idx_arr[ii] #First, decompose long index into multi-index for n from ndims > n >= 0: dim = dims[n] multi_idx[n] = idx % dim idx = idx // dim #Finally, assemble new long index from reordered multi-index dim = 1 idx = 0 for n from ndims > n >= 0: orderr = order[n] idx += multi_idx[orderr] * dim dim *= dims[orderr] idx_arr[ii] = idx finally: free(multi_idx) qutip-4.4.1/qutip/cy/spmatfuncs.pxd000066400000000000000000000103541352460343600173500ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### cimport numpy as cnp cimport cython from libcpp cimport bool include "parameters.pxi" cpdef cnp.ndarray[CTYPE_t, ndim=1, mode="c"] spmv_csr(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec) cdef void spmvpy(complex * data, int * ind, int * ptr, complex * vec, complex a, complex * out, unsigned int nrows) cpdef cy_expect_rho_vec_csr(complex[::1] data, int[::1] idx, int[::1] ptr, complex[::1] rho_vec, int herm) cpdef cy_expect_psi(object A, complex[::1] vec, bool isherm) cpdef cy_expect_psi_csr(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec, bool isherm) cdef void _spmm_c_py(complex * data, int * ind, int * ptr, complex * mat, complex a, complex * out, unsigned int sp_rows, unsigned int nrows, unsigned int ncols) cpdef void spmmpy_c(complex[::1] data, int[::1] ind, int[::1] ptr, complex[:,::1] M, complex a, complex[:,::1] out) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmmc(object sparse, complex[:,::1] mat) cdef void _spmm_f_py(complex * data, int * ind, int * ptr, complex * mat, complex a, complex * out, unsigned int sp_rows, unsigned int nrows, unsigned int ncols) cpdef void spmmpy_f(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1,:] mat, complex a, complex[::1,:] out) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmmf(object sparse, complex[::1,:] mat) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmm(object sparse, cnp.ndarray[complex, ndim=2] mat) qutip-4.4.1/qutip/cy/spmatfuncs.pyx000066400000000000000000000414471352460343600174040ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as cnp cimport cython cimport libc.math from libcpp cimport bool cdef extern from "src/zspmv.hpp" nogil: void zspmvpy(double complex *data, int *ind, int *ptr, double complex *vec, double complex a, double complex *out, int nrows) include "complex_math.pxi" @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmv( object super_op, complex[::1] vec): """ Sparse matrix, dense vector multiplication. Here the vector is assumed to have one-dimension. Matrix must be in CSR format and have complex entries. Parameters ---------- super_op : csr matrix vec : array Dense vector for multiplication. Must be one-dimensional. Returns ------- out : array Returns dense array. """ return spmv_csr(super_op.data, super_op.indices, super_op.indptr, vec) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmv_csr(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec): """ Sparse matrix, dense vector multiplication. Here the vector is assumed to have one-dimension. Matrix must be in CSR format and have complex entries. Parameters ---------- data : array Data for sparse matrix. idx : array Indices for sparse matrix data. ptr : array Pointers for sparse matrix data. vec : array Dense vector for multiplication. Must be one-dimensional. Returns ------- out : array Returns dense array. """ cdef unsigned int num_rows = ptr.shape[0] - 1 cdef cnp.ndarray[complex, ndim=1, mode="c"] out = np.zeros((num_rows), dtype=np.complex) zspmvpy(&data[0], &ind[0], &ptr[0], &vec[0], 1.0, &out[0], num_rows) return out @cython.boundscheck(False) @cython.wraparound(False) def spmvpy_csr(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec, complex alpha, complex[::1] out): """ Sparse matrix, dense vector multiplication. Here the vector is assumed to have one-dimension. Matrix must be in CSR format and have complex entries. Parameters ---------- data : array Data for sparse matrix. idx : array Indices for sparse matrix data. ptr : array Pointers for sparse matrix data. vec : array Dense vector for multiplication. Must be one-dimensional. alpha : complex Numerical coefficient for sparse matrix. out: array Output array """ cdef unsigned int num_rows = vec.shape[0] zspmvpy(&data[0], &ind[0], &ptr[0], &vec[0], alpha, &out[0], num_rows) @cython.boundscheck(False) @cython.wraparound(False) cdef inline void spmvpy(complex* data, int* ind, int* ptr, complex* vec, complex a, complex* out, unsigned int nrows): zspmvpy(data, ind, ptr, vec, a, out, nrows) @cython.boundscheck(False) @cython.wraparound(False) cdef void _spmm_c_py(complex* data, int* ind, int* ptr, complex* mat, complex a, complex* out, unsigned int sp_rows, unsigned int nrows, unsigned int ncols): """ sparse*dense "C" ordered. """ cdef int row, col, ii, jj, row_start, row_end for row from 0 <= row < sp_rows : row_start = ptr[row] row_end = ptr[row+1] for jj from row_start <= jj < row_end: for col in range(ncols): out[row * ncols + col] += a*data[jj]*mat[ind[jj] * ncols + col] cpdef void spmmpy_c(complex[::1] data, int[::1] ind, int[::1] ptr, complex[:,::1] M, complex a, complex[:,::1] out): """ Sparse matrix, c ordered dense matrix multiplication. The sparse matrix must be in CSR format and have complex entries. Parameters ---------- data : array Data for sparse matrix. idx : array Indices for sparse matrix data. ptr : array Pointers for sparse matrix data. mat : array 2d Dense matrix for multiplication. Must be in c mode. alpha : complex Numerical coefficient for sparse matrix. out: array Output array. Must be in c mode. """ cdef unsigned int sp_rows = ptr.shape[0]-1 cdef unsigned int nrows = M.shape[0] cdef unsigned int ncols = M.shape[1] _spmm_c_py(&data[0], &ind[0], &ptr[0], &M[0,0], 1., &out[0,0], sp_rows, nrows, ncols) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmmc(object sparse, complex[:,::1] mat): """ Sparse matrix, c ordered dense matrix multiplication. The sparse matrix must be in CSR format and have complex entries. Parameters ---------- sparse : csr matrix mat : array 2d Dense matrix for multiplication. Must be in c mode. Returns ------- out : array Keep input ordering """ cdef unsigned int sp_rows = sparse.indptr.shape[0]-1 cdef unsigned int ncols = mat.shape[1] cdef cnp.ndarray[complex, ndim=2, mode="c"] out = \ np.zeros((sp_rows, ncols), dtype=complex) spmmpy_c(sparse.data, sparse.indices, sparse.indptr, mat, 1., out) return out @cython.boundscheck(False) @cython.wraparound(False) cdef void _spmm_f_py(complex* data, int* ind, int* ptr, complex* mat, complex a, complex* out, unsigned int sp_rows, unsigned int nrows, unsigned int ncols): """ sparse*dense "F" ordered. """ cdef int col for col in range(ncols): spmvpy(data, ind, ptr, mat+nrows*col, a, out+sp_rows*col, sp_rows) cpdef void spmmpy_f(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1,:] mat, complex a, complex[::1,:] out): """ Sparse matrix, fortran ordered dense matrix multiplication. The sparse matrix must be in CSR format and have complex entries. Parameters ---------- data : array Data for sparse matrix. idx : array Indices for sparse matrix data. ptr : array Pointers for sparse matrix data. mat : array 2d Dense matrix for multiplication. Must be in fortran mode. alpha : complex Numerical coefficient for sparse matrix. out: array Output array. Must be in fortran mode. """ cdef unsigned int sp_rows = ptr.shape[0]-1 cdef unsigned int nrows = mat.shape[0] cdef unsigned int ncols = mat.shape[1] _spmm_f_py(&data[0], &ind[0], &ptr[0], &mat[0,0], 1., &out[0,0], sp_rows, nrows, ncols) cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmmf(object sparse, complex[::1,:] mat): """ Sparse matrix, fortran ordered dense matrix multiplication. The sparse matrix must be in CSR format and have complex entries. Parameters ---------- sparse : csr matrix mat : array 2d Dense matrix for multiplication. Must be in fortran mode. Returns ------- out : array Keep input ordering """ cdef unsigned int sp_rows = sparse.indptr.shape[0]-1 cdef unsigned int ncols = mat.shape[1] cdef cnp.ndarray[complex, ndim=2, mode="fortran"] out = \ np.zeros((sp_rows, ncols), dtype=complex, order="F") spmmpy_f(sparse.data, sparse.indices, sparse.indptr, mat, 1., out) return out cpdef cnp.ndarray[complex, ndim=1, mode="c"] spmm(object sparse, cnp.ndarray[complex, ndim=2] mat): """ Sparse matrix, dense matrix multiplication. The sparse matrix must be in CSR format and have complex entries. Parameters ---------- sparse : csr matrix mat : array 2d Dense matrix for multiplication. Can be in c or fortran mode. Returns ------- out : array Keep input ordering """ if mat.flags["F_CONTIGUOUS"]: return spmmf(sparse, mat) else: return spmmc(sparse, mat) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_rhs( double t, complex[::1] rho, complex[::1] data, int[::1] ind, int[::1] ptr): cdef unsigned int nrows = rho.shape[0] cdef cnp.ndarray[complex, ndim=1, mode="c"] out = \ np.zeros(nrows, dtype=complex) zspmvpy(&data[0], &ind[0], &ptr[0], &rho[0], 1.0, &out[0], nrows) return out @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_psi_func_td( double t, cnp.ndarray[complex, ndim=1, mode="c"] psi, object H_func, object args): H = H_func(t, args).data return -1j * spmv_csr(H.data, H.indices, H.indptr, psi) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_psi_func_td_with_state( double t, cnp.ndarray[complex, ndim=1, mode="c"] psi, object H_func, object args): H = H_func(t, psi, args) return -1j * spmv_csr(H.data, H.indices, H.indptr, psi) @cython.boundscheck(False) @cython.wraparound(False) cpdef cnp.ndarray[complex, ndim=1, mode="c"] cy_ode_rho_func_td( double t, cnp.ndarray[complex, ndim=1, mode="c"] rho, object L0, object L_func, object args): cdef object L L = L0 + L_func(t, args).data return spmv_csr(L.data, L.indices, L.indptr, rho) @cython.boundscheck(False) @cython.wraparound(False) cpdef cy_expect_psi(object A, complex[::1] vec, bool isherm): cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef size_t row, jj cdef int nrows = vec.shape[0] cdef complex expt = 0, temp, cval for row in range(nrows): cval = conj(vec[row]) temp = 0 for jj in range(ptr[row], ptr[row+1]): temp += data[jj]*vec[ind[jj]] expt += cval*temp if isherm : return real(expt) else: return expt @cython.boundscheck(False) @cython.wraparound(False) cpdef cy_expect_psi_csr(complex[::1] data, int[::1] ind, int[::1] ptr, complex[::1] vec, bool isherm): cdef size_t row, jj cdef int nrows = vec.shape[0] cdef complex expt = 0, temp, cval for row in range(nrows): cval = conj(vec[row]) temp = 0 for jj in range(ptr[row], ptr[row+1]): temp += data[jj]*vec[ind[jj]] expt += cval*temp if isherm : return real(expt) else: return expt @cython.boundscheck(False) @cython.wraparound(False) cpdef cy_expect_rho_vec(object super_op, complex[::1] rho_vec, int herm): return cy_expect_rho_vec_csr(super_op.data, super_op.indices, super_op.indptr, rho_vec, herm) @cython.boundscheck(False) @cython.wraparound(False) cpdef cy_expect_rho_vec_csr(complex[::1] data, int[::1] idx, int[::1] ptr, complex[::1] rho_vec, int herm): cdef size_t row cdef int jj,row_start,row_end cdef int num_rows = rho_vec.shape[0] cdef int n = libc.math.sqrt(num_rows) cdef complex dot = 0.0 for row from 0 <= row < num_rows by n+1: row_start = ptr[row] row_end = ptr[row+1] for jj from row_start <= jj < row_end: dot += data[jj]*rho_vec[idx[jj]] if herm == 0: return dot else: return real(dot) @cython.boundscheck(False) @cython.wraparound(False) cpdef cy_spmm_tr(object op1, object op2, int herm): cdef size_t row cdef complex tr = 0.0 cdef int col1, row1_idx_start, row1_idx_end cdef complex[::1] data1 = op1.data cdef int[::1] idx1 = op1.indices cdef int[::1] ptr1 = op1.indptr cdef int col2, row2_idx_start, row2_idx_end cdef complex[::1] data2 = op2.data cdef int[::1] idx2 = op2.indices cdef int[::1] ptr2 = op2.indptr cdef int num_rows = ptr1.shape[0]-1 for row in range(num_rows): row1_idx_start = ptr1[row] row1_idx_end = ptr1[row + 1] for row1_idx from row1_idx_start <= row1_idx < row1_idx_end: col1 = idx1[row1_idx] row2_idx_start = ptr2[col1] row2_idx_end = ptr2[col1 + 1] for row2_idx from row2_idx_start <= row2_idx < row2_idx_end: col2 = idx2[row2_idx] if col2 == row: tr += data1[row1_idx] * data2[row2_idx] break if herm == 0: return tr else: return real(tr) @cython.boundscheck(False) @cython.wraparound(False) def expect_csr_ket(object A, object B, int isherm): cdef complex[::1] Adata = A.data cdef int[::1] Aind = A.indices cdef int[::1] Aptr = A.indptr cdef complex[::1] Bdata = B.data cdef int[::1] Bptr = B.indptr cdef int nrows = A.shape[0] cdef int j cdef size_t ii, jj cdef double complex cval=0, row_sum, expt = 0 for ii in range(nrows): if (Bptr[ii+1] - Bptr[ii]) != 0: cval = conj(Bdata[Bptr[ii]]) row_sum = 0 for jj in range(Aptr[ii], Aptr[ii+1]): j = Aind[jj] if (Bptr[j+1] - Bptr[j]) != 0: row_sum += Adata[jj]*Bdata[Bptr[j]] expt += cval*row_sum if isherm: return real(expt) else: return expt @cython.boundscheck(False) @cython.wraparound(False) cpdef double complex zcsr_mat_elem(object A, object left, object right, bool bra_ket=1): """ Computes the matrix element for an operator A and left and right vectors. right must be a ket, but left can be a ket or bra vector. If left is bra then bra_ket = 1, else set bra_ket = 0. """ cdef complex[::1] Adata = A.data cdef int[::1] Aind = A.indices cdef int[::1] Aptr = A.indptr cdef int nrows = A.shape[0] cdef complex[::1] Ldata = left.data cdef int[::1] Lind = left.indices cdef int[::1] Lptr = left.indptr cdef int Lnnz = Lind.shape[0] cdef complex[::1] Rdata = right.data cdef int[::1] Rind = right.indices cdef int[::1] Rptr = right.indptr cdef int j, go, head=0 cdef size_t ii, jj, kk cdef double complex cval=0, row_sum, mat_elem=0 for ii in range(nrows): row_sum = 0 go = 0 if bra_ket: for kk in range(head, Lnnz): if Lind[kk] == ii: cval = Ldata[kk] head = kk go = 1 else: if (Lptr[ii] - Lptr[ii+1]) != 0: cval = conj(Ldata[Lptr[ii]]) go = 1 if go: for jj in range(Aptr[ii], Aptr[ii+1]): j = Aind[jj] if (Rptr[j] - Rptr[j+1]) != 0: row_sum += Adata[jj]*Rdata[Rptr[j]] mat_elem += cval*row_sum return mat_elem qutip-4.4.1/qutip/cy/spmath.pxd000066400000000000000000000053531352460343600164640ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### from qutip.cy.sparse_structs cimport CSR_Matrix cdef void _zcsr_add(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C, double complex alpha) cdef int _zcsr_add_core(double complex * Adata, int * Aind, int * Aptr, double complex * Bdata, int * Bind, int * Bptr, double complex alpha, CSR_Matrix * C, int nrows, int ncols) nogil cdef void _zcsr_mult(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C) cdef void _zcsr_kron(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C) cdef void _zcsr_kron_core(double complex * dataA, int * indsA, int * indptrA, double complex * dataB, int * indsB, int * indptrB, CSR_Matrix * out, int rowsA, int rowsB, int colsB) nogil cdef void _zcsr_transpose(CSR_Matrix * A, CSR_Matrix * B) cdef void _zcsr_adjoint(CSR_Matrix * A, CSR_Matrix * B) qutip-4.4.1/qutip/cy/spmath.pyx000066400000000000000000000537731352460343600165220ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np import qutip.settings as qset cimport numpy as cnp cimport cython from libcpp cimport bool cdef extern from "" namespace "std" nogil: double complex conj(double complex x) double real(double complex) double imag(double complex) double abs(double complex) include "sparse_routines.pxi" @cython.boundscheck(False) @cython.wraparound(False) def zcsr_add(complex[::1] dataA, int[::1] indsA, int[::1] indptrA, complex[::1] dataB, int[::1] indsB, int[::1] indptrB, int nrows, int ncols, int Annz, int Bnnz, double complex alpha = 1): """ Adds two sparse CSR matries. Like SciPy, we assume the worse case for the fill A.nnz + B.nnz. """ cdef int worse_fill = Annz + Bnnz cdef int nnz #Both matrices are zero mats if Annz == 0 and Bnnz == 0: return fast_csr_matrix(([], [], []), shape=(nrows,ncols)) #A is the zero matrix elif Annz == 0: return fast_csr_matrix((alpha*np.asarray(dataB), indsB, indptrB), shape=(nrows,ncols)) #B is the zero matrix elif Bnnz == 0: return fast_csr_matrix((dataA, indsA, indptrA), shape=(nrows,ncols)) # Out CSR_Matrix cdef CSR_Matrix out init_CSR(&out, worse_fill, nrows, ncols, worse_fill) nnz = _zcsr_add_core(&dataA[0], &indsA[0], &indptrA[0], &dataB[0], &indsB[0], &indptrB[0], alpha, &out, nrows, ncols) #Shorten data and indices if needed if out.nnz > nnz: shorten_CSR(&out, nnz) return CSR_to_scipy(&out) @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_add(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C, double complex alpha): """ Adds two sparse CSR matries. Like SciPy, we assume the worse case for the fill A.nnz + B.nnz. """ cdef int worse_fill = A.nnz + B.nnz cdef int nrows = A.nrows cdef int ncols = A.ncols cdef int nnz init_CSR(C, worse_fill, nrows, ncols, worse_fill) nnz = _zcsr_add_core(A.data, A.indices, A.indptr, B.data, B.indices, B.indptr, alpha, C, nrows, ncols) #Shorten data and indices if needed if C.nnz > nnz: shorten_CSR(C, nnz) @cython.boundscheck(False) @cython.wraparound(False) cdef int _zcsr_add_core(double complex * Adata, int * Aind, int * Aptr, double complex * Bdata, int * Bind, int * Bptr, double complex alpha, CSR_Matrix * C, int nrows, int ncols) nogil: cdef int j1, j2, kc = 0 cdef int ka, kb, ka_max, kb_max cdef size_t ii cdef double complex tmp C.indptr[0] = 0 if alpha != 1: for ii in range(nrows): ka = Aptr[ii] kb = Bptr[ii] ka_max = Aptr[ii+1]-1 kb_max = Bptr[ii+1]-1 while (ka <= ka_max) or (kb <= kb_max): if ka <= ka_max: j1 = Aind[ka] else: j1 = ncols+1 if kb <= kb_max: j2 = Bind[kb] else: j2 = ncols+1 if j1 == j2: tmp = Adata[ka] + alpha*Bdata[kb] if tmp != 0: C.data[kc] = tmp C.indices[kc] = j1 kc += 1 ka += 1 kb += 1 elif j1 < j2: C.data[kc] = Adata[ka] C.indices[kc] = j1 ka += 1 kc += 1 elif j1 > j2: C.data[kc] = alpha*Bdata[kb] C.indices[kc] = j2 kb += 1 kc += 1 C.indptr[ii+1] = kc else: for ii in range(nrows): ka = Aptr[ii] kb = Bptr[ii] ka_max = Aptr[ii+1]-1 kb_max = Bptr[ii+1]-1 while (ka <= ka_max) or (kb <= kb_max): if ka <= ka_max: j1 = Aind[ka] else: j1 = ncols+1 if kb <= kb_max: j2 = Bind[kb] else: j2 = ncols+1 if j1 == j2: tmp = Adata[ka] + Bdata[kb] if tmp != 0: C.data[kc] = tmp C.indices[kc] = j1 kc += 1 ka += 1 kb += 1 elif j1 < j2: C.data[kc] = Adata[ka] C.indices[kc] = j1 ka += 1 kc += 1 elif j1 > j2: C.data[kc] = Bdata[kb] C.indices[kc] = j2 kb += 1 kc += 1 C.indptr[ii+1] = kc return kc @cython.boundscheck(False) @cython.wraparound(False) def zcsr_mult(object A, object B, int sorted = 1): cdef complex [::1] dataA = A.data cdef int[::1] indsA = A.indices cdef int[::1] indptrA = A.indptr cdef int Annz = A.nnz cdef complex [::1] dataB = B.data cdef int[::1] indsB = B.indices cdef int[::1] indptrB = B.indptr cdef int Bnnz = B.nnz cdef int nrows = A.shape[0] cdef int ncols = B.shape[1] #Both matrices are zero mats if Annz == 0 or Bnnz == 0: return fast_csr_matrix(shape=(nrows,ncols)) cdef int nnz cdef CSR_Matrix out nnz = _zcsr_mult_pass1(&dataA[0], &indsA[0], &indptrA[0], &dataB[0], &indsB[0], &indptrB[0], nrows, ncols) if nnz == 0: return fast_csr_matrix(shape=(nrows,ncols)) init_CSR(&out, nnz, nrows, ncols) _zcsr_mult_pass2(&dataA[0], &indsA[0], &indptrA[0], &dataB[0], &indsB[0], &indptrB[0], &out, nrows, ncols) #Shorten data and indices if needed if out.nnz > out.indptr[out.nrows]: shorten_CSR(&out, out.indptr[out.nrows]) if sorted: sort_indices(&out) return CSR_to_scipy(&out) @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_mult(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C): nnz = _zcsr_mult_pass1(A.data, A.indices, A.indptr, B.data, B.indices, B.indptr, A.nrows, B.ncols) init_CSR(C, nnz, A.nrows, B.ncols) _zcsr_mult_pass2(A.data, A.indices, A.indptr, B.data, B.indices, B.indptr, C, A.nrows, B.ncols) #Shorten data and indices if needed if C.nnz > C.indptr[C.nrows]: shorten_CSR(C, C.indptr[C.nrows]) sort_indices(C) @cython.boundscheck(False) @cython.wraparound(False) cdef int _zcsr_mult_pass1(double complex * Adata, int * Aind, int * Aptr, double complex * Bdata, int * Bind, int * Bptr, int nrows, int ncols) nogil: cdef int j, k, nnz = 0 cdef size_t ii,jj,kk #Setup mask array cdef int * mask = PyDataMem_NEW(ncols*sizeof(int)) for ii in range(ncols): mask[ii] = -1 #Pass 1 for ii in range(nrows): for jj in range(Aptr[ii], Aptr[ii+1]): j = Aind[jj] for kk in range(Bptr[j], Bptr[j+1]): k = Bind[kk] if mask[k] != ii: mask[k] = ii nnz += 1 PyDataMem_FREE(mask) return nnz @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_mult_pass2(double complex * Adata, int * Aind, int * Aptr, double complex * Bdata, int * Bind, int * Bptr, CSR_Matrix * C, int nrows, int ncols) nogil: cdef int head, length, temp, j, k, nnz = 0 cdef size_t ii,jj,kk cdef double complex val cdef double complex * sums = PyDataMem_NEW_ZEROED(ncols, sizeof(double complex)) cdef int * nxt = PyDataMem_NEW(ncols*sizeof(int)) for ii in range(ncols): nxt[ii] = -1 C.indptr[0] = 0 for ii in range(nrows): head = -2 length = 0 for jj in range(Aptr[ii], Aptr[ii+1]): j = Aind[jj] val = Adata[jj] for kk in range(Bptr[j], Bptr[j+1]): k = Bind[kk] sums[k] += val*Bdata[kk] if nxt[k] == -1: nxt[k] = head head = k length += 1 for jj in range(length): if sums[head] != 0: C.indices[nnz] = head C.data[nnz] = sums[head] nnz += 1 temp = head head = nxt[head] nxt[temp] = -1 sums[temp] = 0 C.indptr[ii+1] = nnz #Free temp arrays PyDataMem_FREE(sums) PyDataMem_FREE(nxt) @cython.boundscheck(False) @cython.wraparound(False) def zcsr_kron(object A, object B): """ Computes the kronecker product between two complex sparse matrices in CSR format. """ cdef complex[::1] dataA = A.data cdef int[::1] indsA = A.indices cdef int[::1] indptrA = A.indptr cdef int rowsA = A.shape[0] cdef int colsA = A.shape[1] cdef complex[::1] dataB = B.data cdef int[::1] indsB = B.indices cdef int[::1] indptrB = B.indptr cdef int rowsB = B.shape[0] cdef int colsB = B.shape[1] cdef int out_nnz = _safe_multiply(dataA.shape[0], dataB.shape[0]) cdef int rows_out = rowsA * rowsB cdef int cols_out = colsA * colsB cdef CSR_Matrix out init_CSR(&out, out_nnz, rows_out, cols_out) _zcsr_kron_core(&dataA[0], &indsA[0], &indptrA[0], &dataB[0], &indsB[0], &indptrB[0], &out, rowsA, rowsB, colsB) return CSR_to_scipy(&out) @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_kron(CSR_Matrix * A, CSR_Matrix * B, CSR_Matrix * C): """ Computes the kronecker product between two complex sparse matrices in CSR format. """ cdef int out_nnz = _safe_multiply(A.nnz, B.nnz) cdef int rows_out = A.nrows * B.nrows cdef int cols_out = A.ncols * B.ncols init_CSR(C, out_nnz, rows_out, cols_out) _zcsr_kron_core(A.data, A.indices, A.indptr, B.data, B.indices, B.indptr, C, A.nrows, B.nrows, B.ncols) @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_kron_core(double complex * dataA, int * indsA, int * indptrA, double complex * dataB, int * indsB, int * indptrB, CSR_Matrix * out, int rowsA, int rowsB, int colsB) nogil: cdef size_t ii, jj, ptrA, ptr cdef int row = 0 cdef int ptr_start, ptr_end cdef int row_startA, row_endA, row_startB, row_endB, distA, distB, ptrB for ii in range(rowsA): row_startA = indptrA[ii] row_endA = indptrA[ii+1] distA = row_endA - row_startA for jj in range(rowsB): row_startB = indptrB[jj] row_endB = indptrB[jj+1] distB = row_endB - row_startB ptr_start = out.indptr[row] ptr_end = ptr_start + distB out.indptr[row+1] = out.indptr[row] + distA * distB row += 1 for ptrA in range(row_startA, row_endA): ptrB = row_startB for ptr in range(ptr_start, ptr_end): out.indices[ptr] = indsA[ptrA] * colsB + indsB[ptrB] out.data[ptr] = dataA[ptrA] * dataB[ptrB] ptrB += 1 ptr_start += distB ptr_end += distB @cython.boundscheck(False) @cython.wraparound(False) def zcsr_transpose(object A): """ Transpose of a sparse matrix in CSR format. """ cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef int nrows = A.shape[0] cdef int ncols = A.shape[1] cdef CSR_Matrix out init_CSR(&out, data.shape[0], ncols, nrows) _zcsr_trans_core(&data[0], &ind[0], &ptr[0], &out, nrows, ncols) return CSR_to_scipy(&out) @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_transpose(CSR_Matrix * A, CSR_Matrix * B): """ Transpose of a sparse matrix in CSR format. """ init_CSR(B, A.nnz, A.ncols, A.nrows) _zcsr_trans_core(A.data, A.indices, A.indptr, B, A.nrows, A.ncols) @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_trans_core(double complex * data, int * ind, int * ptr, CSR_Matrix * out, int nrows, int ncols) nogil: cdef int k, nxt cdef size_t ii, jj for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): k = ind[jj] + 1 out.indptr[k] += 1 for ii in range(ncols): out.indptr[ii+1] += out.indptr[ii] for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): k = ind[jj] nxt = out.indptr[k] out.data[nxt] = data[jj] out.indices[nxt] = ii out.indptr[k] = nxt + 1 for ii in range(ncols,0,-1): out.indptr[ii] = out.indptr[ii-1] out.indptr[0] = 0 @cython.boundscheck(False) @cython.wraparound(False) def zcsr_adjoint(object A): """ Adjoint of a sparse matrix in CSR format. """ cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef int nrows = A.shape[0] cdef int ncols = A.shape[1] cdef CSR_Matrix out init_CSR(&out, data.shape[0], ncols, nrows) _zcsr_adjoint_core(&data[0], &ind[0], &ptr[0], &out, nrows, ncols) return CSR_to_scipy(&out) @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_adjoint(CSR_Matrix * A, CSR_Matrix * B): """ Adjoint of a sparse matrix in CSR format. """ init_CSR(B, A.nnz, A.ncols, A.nrows) _zcsr_adjoint_core(A.data, A.indices, A.indptr, B, A.nrows, A.ncols) @cython.boundscheck(False) @cython.wraparound(False) cdef void _zcsr_adjoint_core(double complex * data, int * ind, int * ptr, CSR_Matrix * out, int nrows, int ncols) nogil: cdef int k, nxt cdef size_t ii, jj for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): k = ind[jj] + 1 out.indptr[k] += 1 for ii in range(ncols): out.indptr[ii+1] += out.indptr[ii] for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): k = ind[jj] nxt = out.indptr[k] out.data[nxt] = conj(data[jj]) out.indices[nxt] = ii out.indptr[k] = nxt + 1 for ii in range(ncols,0,-1): out.indptr[ii] = out.indptr[ii-1] out.indptr[0] = 0 @cython.boundscheck(False) @cython.wraparound(False) def zcsr_isherm(object A not None, double tol = qset.atol): """ Determines if a given input sparse CSR matrix is Hermitian to within a specified floating-point tolerance. Parameters ---------- A : csr_matrix Input sparse matrix. tol : float (default is atol from settings) Desired tolerance value. Returns ------- isherm : int One if matrix is Hermitian, zero otherwise. Notes ----- This implimentation is esentially an adjoint calulation where the data and indices are not stored, but checked elementwise to see if they match those of the input matrix. Thus we do not need to build the actual adjoint. Here we only need a temp array of output indptr. """ cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef int nrows = A.shape[0] cdef int ncols = A.shape[1] cdef int k, nxt, isherm = 1 cdef size_t ii, jj cdef complex tmp, tmp2 if nrows != ncols: return 0 cdef int * out_ptr = PyDataMem_NEW_ZEROED(ncols+1, sizeof(int)) for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): k = ind[jj] + 1 out_ptr[k] += 1 for ii in range(nrows): out_ptr[ii+1] += out_ptr[ii] for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): k = ind[jj] nxt = out_ptr[k] out_ptr[k] += 1 #structure test if ind[nxt] != ii: isherm = 0 break tmp = conj(data[jj]) tmp2 = data[nxt] #data test if abs(tmp-tmp2) > tol: isherm = 0 break else: continue break PyDataMem_FREE(out_ptr) return isherm @cython.overflowcheck(True) cdef _safe_multiply(int A, int B): """ Computes A*B and checks for overflow. """ cdef int C = A*B return C @cython.boundscheck(False) @cython.wraparound(False) def zcsr_trace(object A, bool isherm): cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef int nrows = ptr.shape[0]-1 cdef size_t ii, jj cdef complex tr = 0 for ii in range(nrows): for jj in range(ptr[ii], ptr[ii+1]): if ind[jj] == ii: tr += data[jj] break if imag(tr) == 0 or isherm: return real(tr) else: return tr @cython.boundscheck(False) @cython.wraparound(False) def zcsr_proj(object A, bool is_ket=1): """ Computes the projection operator from a given ket or bra vector in CSR format. The flag 'is_ket' is True if passed a ket. This is ~3x faster than doing the conjugate transpose and sparse multiplication directly. Also, does not need a temp matrix. """ cdef complex[::1] data = A.data cdef int[::1] ind = A.indices cdef int[::1] ptr = A.indptr cdef int nrows cdef int nnz cdef int offset = 0, new_idx, count, change_idx cdef size_t jj, kk if is_ket: nrows = A.shape[0] nnz = ptr[nrows] else: nrows = A.shape[1] nnz = ptr[1] cdef CSR_Matrix out init_CSR(&out, nnz**2, nrows) if is_ket: #Compute new ptrs and inds for jj in range(nrows): out.indptr[jj] = ptr[jj]*nnz if ptr[jj+1] != ptr[jj]: new_idx = jj for kk in range(nnz): out.indices[offset+kk*nnz] = new_idx offset += 1 #set nnz in new ptr out.indptr[nrows] = nnz**2 #Compute the data for jj in range(nnz): for kk in range(nnz): out.data[jj*nnz+kk] = data[jj]*conj(data[kk]) else: count = nnz**2 new_idx = nrows for kk in range(nnz-1,-1,-1): for jj in range(nnz-1,-1,-1): out.indices[offset+jj] = ind[jj] out.data[kk*nnz+jj] = conj(data[kk])*data[jj] offset += nnz change_idx = ind[kk] while new_idx > change_idx: out.indptr[new_idx] = count new_idx -= 1 count -= nnz return CSR_to_scipy(&out) @cython.boundscheck(False) @cython.wraparound(False) def zcsr_inner(object A, object B, bool bra_ket): """ Computes the inner-product between ket-ket, or bra-ket vectors in sparse CSR format. """ cdef complex[::1] a_data = A.data cdef int[::1] a_ind = A.indices cdef int[::1] a_ptr = A.indptr cdef complex[::1] b_data = B.data cdef int[::1] b_ind = B.indices cdef int[::1] b_ptr = B.indptr cdef int nrows = B.shape[0] cdef double complex inner = 0 cdef size_t jj, kk cdef int a_idx, b_idx if bra_ket: for kk in range(a_ind.shape[0]): a_idx = a_ind[kk] for jj in range(nrows): if (b_ptr[jj+1]-b_ptr[jj]) != 0: if jj == a_idx: inner += a_data[kk]*b_data[b_ptr[jj]] break else: for kk in range(nrows): a_idx = a_ptr[kk] b_idx = b_ptr[kk] if (a_ptr[kk+1]-a_idx) != 0: if (b_ptr[kk+1]-b_idx) != 0: inner += conj(a_data[a_idx])*b_data[b_idx] return inner qutip-4.4.1/qutip/cy/src/000077500000000000000000000000001352460343600152345ustar00rootroot00000000000000qutip-4.4.1/qutip/cy/src/zspmv.cpp000066400000000000000000000161771352460343600171330ustar00rootroot00000000000000// This file is part of QuTiP: Quantum Toolbox in Python. // // Copyright (c) 2011 and later, QuSTaR. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names // of its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //############################################################################# #include #if defined(__GNUC__) && defined(__SSE3__) // Using GCC or CLANG and SSE3 #include void zspmvpy(const std::complex * __restrict__ data, const int * __restrict__ ind, const int * __restrict__ ptr, const std::complex * __restrict__ vec, const std::complex a, std::complex * __restrict__ out, const unsigned int nrows) { size_t row, jj; unsigned int row_start, row_end; __m128d num1, num2, num3, num4; for (row=0; row < nrows; row++) { num4 = _mm_setzero_pd(); row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj (data[jj])[0]); num2 = _mm_set_pd(std::imag(vec[ind[jj]]),std::real(vec[ind[jj]])); num3 = _mm_mul_pd(num2, num1); num1 = _mm_loaddup_pd(&reinterpret_cast(data[jj])[1]); num2 = _mm_shuffle_pd(num2, num2, 1); num2 = _mm_mul_pd(num2, num1); num3 = _mm_addsub_pd(num3, num2); num4 = _mm_add_pd(num3, num4); } num1 = _mm_loaddup_pd(&reinterpret_cast(a)[0]); num3 = _mm_mul_pd(num4, num1); num1 = _mm_loaddup_pd(&reinterpret_cast(a)[1]); num4 = _mm_shuffle_pd(num4, num4, 1); num4 = _mm_mul_pd(num4, num1); num3 = _mm_addsub_pd(num3, num4); num2 = _mm_loadu_pd((double *)&out[row]); num3 = _mm_add_pd(num2, num3); _mm_storeu_pd((double *)&out[row], num3); } } #elif defined(__GNUC__) // Using GCC or CLANG but no SSE3 void zspmvpy(const std::complex * __restrict__ data, const int * __restrict__ ind, const int * __restrict__ ptr, const std::complex * __restrict__ vec, const std::complex a, std::complex * __restrict__ out, const unsigned int nrows) { size_t row, jj; unsigned int row_start, row_end; std::complex dot; for (row=0; row < nrows; row++) { dot = 0; row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj void zspmvpy(const std::complex * __restrict data, const int * __restrict ind, const int * __restrict ptr, const std::complex * __restrict vec, const std::complex a, std::complex * __restrict out, const unsigned int nrows) { size_t row, jj; unsigned int row_start, row_end; __m128d num1, num2, num3, num4; for (row=0; row < nrows; row++) { num4 = _mm_setzero_pd(); row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj (data[jj])[0]); num2 = _mm_set_pd(std::imag(vec[ind[jj]]),std::real(vec[ind[jj]])); num3 = _mm_mul_pd(num2, num1); num1 = _mm_loaddup_pd(&reinterpret_cast(data[jj])[1]); num2 = _mm_shuffle_pd(num2, num2, 1); num2 = _mm_mul_pd(num2, num1); num3 = _mm_addsub_pd(num3, num2); num4 = _mm_add_pd(num3, num4); } num1 = _mm_loaddup_pd(&reinterpret_cast(a)[0]); num3 = _mm_mul_pd(num4, num1); num1 = _mm_loaddup_pd(&reinterpret_cast(a)[1]); num4 = _mm_shuffle_pd(num4, num4, 1); num4 = _mm_mul_pd(num4, num1); num3 = _mm_addsub_pd(num3, num4); num2 = _mm_loadu_pd((double *)&out[row]); num3 = _mm_add_pd(num2, num3); _mm_storeu_pd((double *)&out[row], num3); } } #elif defined(_MSC_VER) // Visual Studio no AVX void zspmvpy(const std::complex * __restrict data, const int * __restrict ind, const int * __restrict ptr, const std::complex * __restrict vec, const std::complex a, std::complex * __restrict out, const unsigned int nrows) { size_t row, jj; unsigned int row_start, row_end; std::complex dot; for (row=0; row < nrows; row++) { dot = 0; row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj * data, const int * ind, const int * ptr, const std::complex * vec, const std::complex a, std::complex * out, const unsigned int nrows) { size_t row, jj; unsigned int row_start, row_end; std::complex dot; for (row=0; row < nrows; row++) { dot = 0; row_start = ptr[row]; row_end = ptr[row+1]; for (jj=row_start; jj #ifdef __GNUC__ void zspmvpy(const std::complex * __restrict__ data, const int * __restrict__ ind, const int *__restrict__ ptr, const std::complex * __restrict__ vec, const std::complex a, std::complex * __restrict__ out, const unsigned int nrows); #elif defined(_MSC_VER) void zspmvpy(const std::complex * __restrict data, const int * __restrict ind, const int *__restrict ptr, const std::complex * __restrict vec, const std::complex a, std::complex * __restrict out, const unsigned int nrows); #else void zspmvpy(const std::complex * data, const int * ind, const int * ptr, const std::complex * vec, const std::complex a, std::complex * out, const unsigned int nrows); #endifqutip-4.4.1/qutip/cy/stochastic.pyx000066400000000000000000002512171352460343600173630ustar00rootroot00000000000000#!python #cython: language_level=3 # This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np cimport numpy as np cimport cython from libc.math cimport fabs from qutip.cy.cqobjevo cimport CQobjEvo from qutip.cy.brtools cimport ZHEEVR from qutip.qobj import Qobj from qutip.superoperator import vec2mat include "parameters.pxi" include "complex_math.pxi" import scipy.sparse as sp from scipy.sparse.linalg import LinearOperator from scipy.linalg.cython_blas cimport zaxpy, zdotu, zdotc, zcopy, zdscal, zscal from scipy.linalg.cython_blas cimport dznrm2 as raw_dznrm2 cdef int ZERO=0 cdef double DZERO=0 cdef complex ZZERO=0j cdef int ONE=1 """Some of blas wrapper""" @cython.boundscheck(False) cdef void _axpy(complex a, complex[::1] x, complex[::1] y): """ y += a*x""" cdef int l = x.shape[0] zaxpy(&l, &a, &x[0], &ONE, &y[0], &ONE) @cython.boundscheck(False) cdef void copy(complex[::1] x, complex[::1] y): """ y = x """ cdef int l = x.shape[0] zcopy(&l, &x[0], &ONE, &y[0], &ONE) @cython.boundscheck(False) cdef complex _dot(complex[::1] x, complex[::1] y): """ = x_i * y_i """ cdef int l = x.shape[0] return zdotu(&l, &x[0], &ONE, &y[0], &ONE) @cython.boundscheck(False) cdef complex _dotc(complex[::1] x, complex[::1] y): """ = conj(x_i) * y_i """ cdef int l = x.shape[0] return zdotc(&l, &x[0], &ONE, &y[0], &ONE) @cython.boundscheck(False) cdef double _dznrm2(complex[::1] vec): """ = sqrt( x_i**2 ) """ cdef int l = vec.shape[0] return raw_dznrm2(&l, &vec[0], &ONE) @cython.boundscheck(False) cdef void _scale(double a, complex[::1] x): """ x *= a """ cdef int l = x.shape[0] zdscal(&l, &a, &x[0], &ONE) @cython.boundscheck(False) cdef void _zscale(complex a, complex[::1] x): """ x *= a """ cdef int l = x.shape[0] zscal(&l, &a, &x[0], &ONE) @cython.boundscheck(False) cdef void _zero(complex[::1] x): """ x *= 0 """ cdef int l = x.shape[0] zdscal(&l, &DZERO, &x[0], &ONE) @cython.boundscheck(False) cdef void _zero_2d(complex[:,::1] x): """ x *= 0 """ cdef int l = x.shape[0]*x.shape[1] zdscal(&l, &DZERO, &x[0,0], &ONE) @cython.boundscheck(False) cdef void _zero_3d(complex[:,:,::1] x): """ x *= 0 """ cdef int l = x.shape[0]*x.shape[1]*x.shape[2] zdscal(&l, &DZERO, &x[0,0,0], &ONE) @cython.boundscheck(False) cdef void _zero_4d(complex[:,:,:,::1] x): """ x *= 0 """ cdef int l = x.shape[0]*x.shape[1]*x.shape[2]*x.shape[3] zdscal(&l, &DZERO, &x[0,0,0,0], &ONE) # %%%%%%%%%%%%%%%%%%%%%%%%% # functions for ensuring that the states stay physical @cython.cdivision(True) @cython.boundscheck(False) cdef void _normalize_inplace(complex[::1] vec): """ make norm of vec equal to 1""" cdef int l = vec.shape[0] cdef double norm = 1.0/_dznrm2(vec) zdscal(&l, &norm, &vec[0], &ONE) # to move eventually, 10x faster than scipy's norm. @cython.cdivision(True) @cython.boundscheck(False) def normalize_inplace(complex[::1] vec): """ make norm of vec equal to 1""" cdef int l = vec.shape[0] cdef double norm = 1.0/_dznrm2(vec) zdscal(&l, &norm, &vec[0], &ONE) return fabs(norm-1) @cython.cdivision(True) @cython.boundscheck(False) cdef void _normalize_rho(complex[::1] rho): """ Ensure that the density matrix trace is one and that the composing states are normalized. """ cdef int l = rho.shape[0] cdef int N = np.sqrt(l) cdef complex[::1,:] mat = np.reshape(rho, (N,N), order="F") cdef complex[::1,:] eivec = np.zeros((N,N), dtype=complex, order="F") cdef double[::1] eival = np.zeros(N) ZHEEVR(mat, &eival[0], eivec, N) _zero(rho) cdef int i, j, k cdef double sum sum = 0. for i in range(N): _normalize_inplace(eivec[:,i]) if eival[i] < 0: eival[i] = 0. sum += eival[i] if sum != 1.: for i in range(N): eival[i] /= sum for i in range(N): for j in range(N): for k in range(N): rho[j+N*k] += conj(eivec[k,i])*eivec[j,i]*eival[i] # Available solvers: cpdef enum Solvers: # order 0.5 EULER_SOLVER = 50 # order 0.5 strong, 1.0 weak? PC_SOLVER = 101 PC_2_SOLVER = 104 # order 1.0 PLATEN_SOLVER = 100 MILSTEIN_SOLVER = 102 MILSTEIN_IMP_SOLVER = 103 # order 1.5 EXPLICIT1_5_SOLVER = 150 TAYLOR1_5_SOLVER = 152 TAYLOR1_5_IMP_SOLVER = 153 # order 2.0 TAYLOR2_0_SOLVER = 202 # Special solvers PHOTOCURRENT_SOLVER = 60 PHOTOCURRENT_PC_SOLVER = 110 ROUCHON_SOLVER = 120 # For initialisation SOLVER_NOT_SET = 0 cdef class TaylorNoise: """ Object to build the Stratonovich integral for order 2.0 strong taylor. Complex enough that I fell it should be kept separated from the main solver. """ cdef: int p double rho, alpha double aFactor, bFactor double BFactor, CFactor double dt, dt_sqrt @cython.cdivision(True) def __init__(self, int p, double dt): self.p = p self.dt = dt self.dt_sqrt = dt**.5 cdef double pi = np.pi cdef int i cdef double rho = 0. for i in range(1,p+1): rho += (i+0.)**-2 rho = 1./3.-2*rho/(pi**2) self.rho = (rho)**.5 self.aFactor = -(2)**.5/pi cdef double alpha = 0. for i in range(1,p+1): alpha += (i+0.)**-4 alpha = pi/180-alpha/(2*pi**2)/pi self.alpha = (alpha)**.5 self.bFactor = (0.5)**.5/pi**2 self.BFactor = 1/(4*pi**2) self.CFactor = -1/(2*pi**2) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cpdef void order2(self, double[::1] noise, double[::1] dws): cdef int p = self.p cdef int r, l cdef double s = 1/6. cdef double a = 0 cdef double b = 0 cdef double AA = 0 cdef double BB = 0 cdef double CC = 0 for r in range(p): a += noise[3+r]/(r+1.) b += noise[3+r+p]/(r+1.)/(r+1.) BB += (1/(r+1.)/(r+1.)) *\ (noise[3+r]*noise[3+r]+noise[3+r+p]*noise[3+r+p]) for l in range(p): if r != l: CC += (r+1.)/((r+1.)*(r+1.)-(l+1.)*(l+1.)) *\ (1/(l+1.)*noise[3+r]*noise[3+l] - \ (l+1.)/(r+1.)*noise[3+r+p]*noise[3+l+p]) a = self.aFactor * a + self.rho * noise[1] b = self.bFactor * b + self.alpha * noise[2] AA = 0.25*a*a BB *= self.BFactor CC *= self.CFactor dws[0] = noise[0] # dw dws[1] = 0.5*(noise[0]+a) # dz dws[2] = noise[0]*(noise[0]*s -0.25*a -0.5*b) +BB +CC # j011 dws[3] = noise[0]*(noise[0]*s + b) -AA -BB # j101 dws[4] = noise[0]*(noise[0]*s +0.25*a -0.5*b) +AA -CC # j110 cdef class StochasticSolver: """ stochastic solver class base Does most of the initialisation, drive the simulation and contain the stochastic integration algorythm that do not depend on the physics. This class is not to be used as is, the function computing the evolution's derivative are specified in it's child class which define the deterministic and stochastic contributions. PYTHON METHODS: set_solver: Receive the data for the integration. Prepare buffers cy_sesolve_single_trajectory: Run one trajectory. INTERNAL METHODS make_noise: create the stochastic noise run: evolution between timestep (substep) solver's method: stochastic integration algorithm euler milstein taylor ... CHILD: SSESolver: stochastic schrodinger evolution SMESolver: stochastic master evolution PcSSESolver: photocurrent stochastic schrodinger evolution PcSMESolver: photocurrent stochastic master evolution PmSMESolver: positive map stochastic master evolution GenericSSolver: general (user defined) stochastic evolution CHILD METHODS: set_data: Read data about the system d1: deterministic part d2: non-deterministic part derivatives: d1, d2 and their derivatives up to dt**1.5 multiple sc_ops derivativesO2: d1, d2 and there derivatives up to dt**2.0 one sc_ops """ cdef int l_vec, num_ops cdef Solvers solver cdef int num_step, num_substeps, num_dw cdef int normalize cdef double dt cdef int noise_type cdef object custom_noise cdef double[::1] dW_factor cdef unsigned int[::1] seed cdef object sso # buffer to not redo the initialisation at each substep cdef complex[:, ::1] buffer_1d cdef complex[:, :, ::1] buffer_2d cdef complex[:, :, :, ::1] buffer_3d cdef complex[:, :, :, ::1] buffer_4d cdef complex[:, ::1] expect_buffer_1d cdef complex[:, ::1] expect_buffer_2d cdef complex[:, :, ::1] expect_buffer_3d cdef complex[:, ::1] func_buffer_1d cdef complex[:, ::1] func_buffer_2d cdef complex[:, :, ::1] func_buffer_3d cdef TaylorNoise order2noise def __init__(self): self.l_vec = 0 self.num_ops = 0 self.solver = SOLVER_NOT_SET def set_solver(self, sso): """ Prepare the solver from the info in StochasticSolverOptions Parameters ---------- sso : StochasticSolverOptions Data of the stochastic system """ self.set_data(sso) self.sso = sso self.solver = sso.solver_code self.dt = sso.dt self.num_substeps = sso.nsubsteps self.normalize = sso.normalize self.num_step = len(sso.times) self.num_dw = len(sso.sops) if self.solver in [EXPLICIT1_5_SOLVER, TAYLOR1_5_SOLVER, TAYLOR1_5_IMP_SOLVER]: self.num_dw *= 2 if self.solver in [TAYLOR2_0_SOLVER]: self.num_dw *= 3 + 2*sso.p self.order2noise = TaylorNoise(sso.p, self.dt) # prepare buffers for the solvers nb_solver = [0,0,0,0] nb_func = [0,0,0] nb_expect = [0,0,0] # %%%%%%%%%%%%%%%%%%%%%%%%% # Depending on the solver, determine the numbers of buffers of each # shape to prepare. (~30% slower when not preallocating buffer) # nb_solver : buffer to contain the states used by solver # nb_func : buffer for states used used by d1, d2 and derivative functions # nb_expect : buffer to store expectation values. if self.solver is EULER_SOLVER: nb_solver = [0,1,0,0] elif self.solver is PHOTOCURRENT_SOLVER: nb_solver = [1,0,0,0] nb_func = [1,0,0] elif self.solver is PLATEN_SOLVER: nb_solver = [2,5,0,0] elif self.solver is PC_SOLVER: nb_solver = [4,1,1,0] elif self.solver is MILSTEIN_SOLVER: nb_solver = [0,1,1,0] elif self.solver is MILSTEIN_IMP_SOLVER: nb_solver = [1,1,1,0] elif self.solver is PC_2_SOLVER: nb_solver = [5,1,1,0] elif self.solver is PHOTOCURRENT_PC_SOLVER: nb_solver = [1,1,0,0] nb_func = [1,0,0] elif self.solver is ROUCHON_SOLVER: nb_solver = [2,0,0,0] elif self.solver is EXPLICIT1_5_SOLVER: nb_solver = [5,8,3,0] elif self.solver is TAYLOR1_5_SOLVER: nb_solver = [2,3,1,1] elif self.solver is TAYLOR1_5_IMP_SOLVER: nb_solver = [2,3,1,1] elif self.solver is TAYLOR2_0_SOLVER: nb_solver = [11,0,0,0] if self.solver in [PC_SOLVER, MILSTEIN_SOLVER, MILSTEIN_IMP_SOLVER, PC_2_SOLVER, TAYLOR1_5_SOLVER, TAYLOR1_5_IMP_SOLVER]: if sso.me: nb_func = [1,0,0] nb_expect = [1,1,0] else: nb_func = [2,1,1] nb_expect = [2,1,1] elif self.solver in [PHOTOCURRENT_SOLVER, PHOTOCURRENT_PC_SOLVER]: nb_expect = [1,0,0] elif self.solver is TAYLOR2_0_SOLVER: if sso.me: nb_func = [2,0,0] nb_expect = [2,0,0] else: nb_func = [14,0,0] nb_expect = [0,0,0] elif self.solver is ROUCHON_SOLVER: nb_expect = [1,0,0] else: if not sso.me: nb_func = [1,0,0] self.buffer_1d = np.zeros((nb_solver[0], self.l_vec), dtype=complex) self.buffer_2d = np.zeros((nb_solver[1], self.num_ops, self.l_vec), dtype=complex) self.buffer_3d = np.zeros((nb_solver[2], self.num_ops, self.num_ops, self.l_vec), dtype=complex) if nb_solver[3]: self.buffer_4d = np.zeros((self.num_ops, self.num_ops, self.num_ops, self.l_vec), dtype=complex) self.expect_buffer_1d = np.zeros((nb_expect[0], self.num_ops), dtype=complex) if nb_expect[1]: self.expect_buffer_2d = np.zeros((self.num_ops, self.num_ops), dtype=complex) if nb_expect[2]: self.expect_buffer_3d = np.zeros((self.num_ops, self.num_ops, self.num_ops), dtype=complex) self.func_buffer_1d = np.zeros((nb_func[0], self.l_vec), dtype=complex) if nb_func[1]: self.func_buffer_2d = np.zeros((self.num_ops, self.l_vec), dtype=complex) if nb_func[2]: self.func_buffer_3d = np.zeros((self.num_ops, self.num_ops, self.l_vec), dtype=complex) self.noise_type = sso.noise_type self.dW_factor = np.array(sso.dW_factors, dtype=np.float64) if self.noise_type == 1: self.custom_noise = sso.noise elif self.noise_type == 0: self.seed = sso.noise def set_data(self, sso): """Set solver specific operator""" pass cdef np.ndarray[double, ndim=3] make_noise(self, int n): """Create the random numbers for the stochastic process""" if self.solver in [PHOTOCURRENT_SOLVER, PHOTOCURRENT_PC_SOLVER] and self.noise_type == 0: # photocurrent, just seed, np.random.seed(self.seed[n]) return np.zeros((self.num_step, self.num_substeps, self.num_dw)) if self.noise_type == 0: np.random.seed(self.seed[n]) return np.random.randn(self.num_step, self.num_substeps, self.num_dw) *\ np.sqrt(self.dt) elif self.noise_type == 1: return self.custom_noise[n,:,:,:] @cython.boundscheck(False) @cython.wraparound(False) def cy_sesolve_single_trajectory(self, int n): """ Run the one of the trajectories of the stochastic system. Parameters ---------- n : int Number of the iterations sso : StochasticSolverOptions Data of the stochastic system Returns ------- states_list : list of qobj State of the system at each time noise : array noise at each step of the solver measurements : array measurements value at each timestep for each m_ops expect : array expectation value at each timestep for each e_ops """ sso = self.sso cdef double[::1] times = sso.times cdef complex[::1] rho_t cdef double t cdef int m_idx, t_idx, e_idx cdef np.ndarray[double, ndim=3] noise = self.make_noise(n) cdef int tlast = times.shape[0] rho_t = sso.rho0.copy() dims = sso.state0.dims expect = np.zeros((len(sso.ce_ops), len(sso.times)), dtype=complex) measurements = np.zeros((len(times), len(sso.cm_ops)), dtype=complex) states_list = [] for t_idx, t in enumerate(times): if sso.ce_ops: for e_idx, e in enumerate(sso.ce_ops): s = e.compiled_qobjevo.expect(t, rho_t) expect[e_idx, t_idx] = s if sso.store_states or not sso.ce_ops: if sso.me: states_list.append(Qobj(vec2mat(np.asarray(rho_t)), dims=dims)) else: states_list.append(Qobj(np.asarray(rho_t), dims=dims)) if t_idx != tlast-1: rho_t = self.run(t, self.dt, noise[t_idx, :, :], rho_t, self.num_substeps) if sso.store_measurement: for m_idx, m in enumerate(sso.cm_ops): m_expt = m.compiled_qobjevo.expect(t, rho_t) measurements[t_idx, m_idx] = m_expt + self.dW_factor[m_idx] * \ sum(noise[t_idx, :, m_idx]) / (self.dt * self.num_substeps) if sso.method == 'heterodyne': measurements = measurements.reshape(len(times), len(sso.cm_ops)//2, 2) return states_list, noise, measurements, expect @cython.boundscheck(False) cdef complex[::1] run(self, double t, double dt, double[:, ::1] noise, complex[::1] vec, int num_substeps): """ Do one time full step""" cdef complex[::1] out = np.zeros(self.l_vec, dtype=complex) cdef int i if self.solver is EULER_SOLVER: for i in range(num_substeps): self.euler(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is PHOTOCURRENT_SOLVER: for i in range(num_substeps): self.photocurrent(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is PLATEN_SOLVER: for i in range(num_substeps): self.platen(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is PC_SOLVER: for i in range(num_substeps): self.pred_corr(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is MILSTEIN_SOLVER: for i in range(num_substeps): self.milstein(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is MILSTEIN_IMP_SOLVER: for i in range(num_substeps): self.milstein_imp(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is PC_2_SOLVER: for i in range(num_substeps): self.pred_corr_a(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is PHOTOCURRENT_PC_SOLVER: for i in range(num_substeps): self.photocurrent_pc(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is ROUCHON_SOLVER: for i in range(num_substeps): self.rouchon(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is EXPLICIT1_5_SOLVER: for i in range(num_substeps): self.platen15(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is TAYLOR1_5_SOLVER: for i in range(num_substeps): self.taylor15(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is TAYLOR1_5_IMP_SOLVER: for i in range(num_substeps): self.taylor15_imp(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out elif self.solver is TAYLOR2_0_SOLVER: for i in range(num_substeps): self.taylor20(t + i*dt, dt, noise[i, :], vec, out) out, vec = vec, out if self.normalize: self._normalize_inplace(vec) return vec cdef void _normalize_inplace(self, complex[::1] vec): _normalize_inplace(vec) # Dummy functions # Needed for compilation since ssesolve is not stand-alone cdef void d1(self, double t, complex[::1] v, complex[::1] out): """ deterministic part of the evolution depend on schrodinger vs master vs photocurrent """ pass cdef void d2(self, double t, complex[::1] v, complex[:, ::1] out): """ stochastic part of the evolution depend on schrodinger vs master vs photocurrent """ pass cdef void implicit(self, double t, np.ndarray[complex, ndim=1] dvec, complex[::1] out, np.ndarray[complex, ndim=1] guess): """ Do the step X(t+dt) = f(X(t+dt)) + g(X(t)) """ pass cdef void derivatives(self, double t, int deg, complex[::1] rho, complex[::1] a, complex[:, ::1] b, complex[:, :, ::1] Lb, complex[:,::1] La, complex[:, ::1] L0b, complex[:, :, :, ::1] LLb, complex[::1] L0a): """ Obtain the multiple terms for stochastic taylor expension Up to order 1.5 multiple sc_ops """ pass cdef void derivativesO2(self, double t, complex[::1] rho, complex[::1] a, complex[::1] b, complex[::1] Lb, complex[::1] La, complex[::1] L0b, complex[::1] LLb, complex[::1] L0a, complex[::1] LLa, complex[::1] LL0b, complex[::1] L0Lb, complex[::1] LLLb): """ Obtain the multiple terms for stochastic taylor expension Up to order 2.0 One sc_ops """ pass cdef void photocurrent(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """Special integration scheme: photocurrent collapse + euler evolution """ pass cdef void photocurrent_pc(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """Special integration scheme: photocurrent collapse + predictor-corrector evolution """ pass cdef void rouchon(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """Special integration scheme: Force valid density matrix using positive map Pierre Rouchon, Jason F. Ralph arXiv:1410.5345 [quant-ph] """ pass @cython.boundscheck(False) @cython.wraparound(False) cdef void euler(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """Integration scheme: Basic Euler order 0.5 dV = d1 dt + d2_i dW_i Numerical Solution of Stochastic Differential Equations By Peter E. Kloeden, Eckhard Platen """ cdef int i, j cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] _zero_2d(d2) copy(vec, out) self.d1(t, vec, out) self.d2(t, vec, d2) for i in range(self.num_ops): _axpy(noise[i], d2[i,:], out) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void platen(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """ Platen rhs function for both master eq and schrodinger eq. dV = -iH* (V+Vt)/2 * dt + (d1(V)+d1(Vt))/2 * dt + (2*d2_i(V)+d2_i(V+)+d2_i(V-))/4 * dW_i + (d2_i(V+)-d2_i(V-))/4 * (dW_i**2 -dt) * dt**(-.5) Vt = V -iH*V*dt + d1*dt + d2_i*dW_i V+/- = V -iH*V*dt + d1*dt +/- d2_i*dt**.5 The Theory of Open Quantum Systems Chapter 7 Eq. (7.47), H.-P Breuer, F. Petruccione """ cdef int i, j, k cdef double sqrt_dt = np.sqrt(dt) cdef double sqrt_dt_inv = 0.25/sqrt_dt cdef double dw, dw2 cdef complex[::1] d1 = self.buffer_1d[0,:] cdef complex[::1] Vt = self.buffer_1d[1,:] cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] cdef complex[:, ::1] Vm = self.buffer_2d[1,:,:] cdef complex[:, ::1] Vp = self.buffer_2d[2,:,:] cdef complex[:, ::1] d2p = self.buffer_2d[3,:,:] cdef complex[:, ::1] d2m = self.buffer_2d[4,:,:] _zero(d1) _zero_2d(d2) self.d1(t, vec, d1) self.d2(t, vec, d2) _axpy(1.0,vec,d1) copy(d1,Vt) copy(d1,out) _scale(0.5,out) for i in range(self.num_ops): copy(d1,Vp[i,:]) copy(d1,Vm[i,:]) _axpy( sqrt_dt,d2[i,:],Vp[i,:]) _axpy(-sqrt_dt,d2[i,:],Vm[i,:]) _axpy(noise[i],d2[i,:],Vt) _zero(d1) self.d1(t, Vt, d1) _axpy(0.5,d1,out) _axpy(0.5,vec,out) for i in range(self.num_ops): _zero_2d(d2p) _zero_2d(d2m) self.d2(t, Vp[i,:], d2p) self.d2(t, Vm[i,:], d2m) dw = noise[i] * 0.25 _axpy(dw,d2m[i,:],out) _axpy(2*dw,d2[i,:],out) _axpy(dw,d2p[i,:],out) for j in range(self.num_ops): if i == j: dw2 = sqrt_dt_inv * (noise[i]*noise[i] - dt) else: dw2 = sqrt_dt_inv * noise[i] * noise[j] _axpy(dw2,d2p[j,:],out) _axpy(-dw2,d2m[j,:],out) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef void pred_corr(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """ Chapter 15.5 Eq. (5.4) Numerical Solution of Stochastic Differential Equations By Peter E. Kloeden, Eckhard Platen """ # a=0. b=0.5 cdef double dt_2 = dt*0.5 cdef complex[::1] euler = self.buffer_1d[0,:] cdef complex[::1] a_pred = self.buffer_1d[1,:] cdef complex[::1] b_pred = self.buffer_1d[2,:] cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] cdef complex[:, :, ::1] dd2 = self.buffer_3d[0,:,:,:] _zero(a_pred) _zero(b_pred) _zero_2d(d2) _zero_3d(dd2) self.derivatives(t, 1, vec, a_pred, d2, dd2, None, None, None, None) copy(vec, euler) copy(vec, out) _axpy(1.0, a_pred, euler) for i in range(self.num_ops): _axpy(noise[i], d2[i,:], b_pred) _axpy(-dt_2, dd2[i,i,:], a_pred) _axpy(1.0, a_pred, out) _axpy(1.0, b_pred, euler) _axpy(0.5, b_pred, out) _zero_2d(d2) self.d2(t + dt, euler, d2) for i in range(self.num_ops): _axpy(noise[i]*0.5, d2[i,:], out) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef void pred_corr_a(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """ Chapter 15.5 Eq. (5.4) Numerical Solution of Stochastic Differential Equations By Peter E. Kloeden, Eckhard Platen """ # a=0.5, b=0.5 cdef int i, j, k cdef complex[::1] euler = self.buffer_1d[0,:] cdef complex[::1] a_pred = self.buffer_1d[1,:] _zero(a_pred) cdef complex[::1] a_corr = self.buffer_1d[2,:] _zero(a_corr) cdef complex[::1] b_pred = self.buffer_1d[3,:] _zero(b_pred) cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] _zero_2d(d2) cdef complex[:, :, ::1] dd2 = self.buffer_3d[0,:,:,:] _zero_3d(dd2) cdef double dt_2 = dt*0.5 self.derivatives(t, 1, vec, a_pred, d2, dd2, None, None, None, None) copy(vec, euler) _axpy(1.0, a_pred, euler) for i in range(self.num_ops): _axpy(noise[i], d2[i,:], b_pred) _axpy(-dt_2, dd2[i,i,:], a_pred) _axpy(1.0, b_pred, euler) copy(vec, out) _axpy(0.5, a_pred, out) _axpy(0.5, b_pred, out) _zero_2d(d2) _zero_3d(dd2) self.derivatives(t, 1, euler, a_corr, d2, dd2, None, None, None, None) for i in range(self.num_ops): _axpy(noise[i]*0.5, d2[i,:], out) _axpy(-dt_2, dd2[i,i,:], a_corr) _axpy(0.5, a_corr, out) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef void milstein(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """ Chapter 10.3 Eq. (3.1) Numerical Solution of Stochastic Differential Equations By Peter E. Kloeden, Eckhard Platen dV = -iH*V*dt + d1*dt + d2_i*dW_i + 0.5*d2_i' d2_j*(dW_i*dw_j -dt*delta_ij) """ cdef int i, j, k cdef double dw cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] cdef complex[:, :, ::1] dd2 = self.buffer_3d[0,:,:,:] _zero_2d(d2) _zero_3d(dd2) copy(vec,out) self.derivatives(t, 1, vec, out, d2, dd2, None, None, None, None) for i in range(self.num_ops): _axpy(noise[i],d2[i,:],out) for i in range(self.num_ops): for j in range(i, self.num_ops): if (i == j): dw = (noise[i] * noise[i] - dt) * 0.5 else: dw = (noise[i] * noise[j]) _axpy(dw,dd2[i,j,:],out) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef void milstein_imp(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """ Chapter 12.2 Eq. (2.9) Numerical Solution of Stochastic Differential Equations By Peter E. Kloeden, Eckhard Platen """ cdef int i, j, k cdef double dw cdef np.ndarray[complex, ndim=1] guess = np.zeros((self.l_vec, ), dtype=complex) cdef np.ndarray[complex, ndim=1] dvec = np.zeros((self.l_vec, ), dtype=complex) cdef complex[::1] a = self.buffer_1d[0,:] cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] cdef complex[:, :, ::1] dd2 = self.buffer_3d[0,:,:,:] _zero(a) _zero_2d(d2) _zero_3d(dd2) self.derivatives(t, 1, vec, a, d2, dd2, None, None, None, None) copy(vec, dvec) _axpy(0.5, a, dvec) for i in range(self.num_ops): _axpy(noise[i], d2[i,:], dvec) for i in range(self.num_ops): for j in range(i, self.num_ops): if (i == j): dw = (noise[i] * noise[i] - dt) * 0.5 else: dw = (noise[i] * noise[j]) _axpy(dw, dd2[i,j,:], dvec) copy(dvec, guess) _axpy(0.5, a, guess) self.implicit(t+dt, dvec, out, guess) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef void taylor15(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """ Chapter 12.2 Eq. (2.18), Numerical Solution of Stochastic Differential Equations By Peter E. Kloeden, Eckhard Platen """ cdef complex[::1] a = self.buffer_1d[0, :] cdef complex[:, ::1] b = self.buffer_2d[0, :, :] cdef complex[:, :, ::1] Lb = self.buffer_3d[0, :, :, :] cdef complex[:, ::1] L0b = self.buffer_2d[1,:,:] cdef complex[:, ::1] La = self.buffer_2d[2,:,:] cdef complex[:, :, :, ::1] LLb = self.buffer_4d[:, :, :, :] cdef complex[::1] L0a = self.buffer_1d[1, :] _zero(a) _zero_2d(b) _zero_3d(Lb) _zero_2d(L0b) _zero_2d(La) _zero_4d(LLb) _zero(L0a) self.derivatives(t, 2, vec, a, b, Lb, La, L0b, LLb, L0a) cdef int i,j,k cdef double[::1] dz, dw dw = np.empty(self.num_ops) dz = np.empty(self.num_ops) # The dt of dz is included in the d1 part (Ldt) and the noise (dt**.5) for i in range(self.num_ops): dw[i] = noise[i] dz[i] = 0.5 *(noise[i] + 1./np.sqrt(3) * noise[i+self.num_ops]) copy(vec,out) _axpy(1.0, a, out) _axpy(0.5, L0a, out) for i in range(self.num_ops): _axpy(dw[i], b[i,:], out) _axpy(0.5*(dw[i]*dw[i]-dt), Lb[i,i,:], out) _axpy(dz[i], La[i,:], out) _axpy(dw[i]-dz[i], L0b[i,:], out) _axpy(0.5 * ((1/3.) * dw[i] * dw[i] - dt) * dw[i], LLb[i,i,i,:], out) for j in range(i+1,self.num_ops): _axpy((dw[i]*dw[j]), Lb[i,j,:], out) _axpy(0.5*(dw[j]*dw[j]-dt)*dw[i], LLb[i,j,j,:], out) _axpy(0.5*(dw[i]*dw[i]-dt)*dw[j], LLb[i,i,j,:], out) for k in range(j+1,self.num_ops): _axpy(dw[i]*dw[j]*dw[k], LLb[i,j,k,:], out) @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) cdef void taylor15_imp(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """ Chapter 12.2 Eq. (2.18), Numerical Solution of Stochastic Differential Equations By Peter E. Kloeden, Eckhard Platen """ cdef complex[::1] a = self.buffer_1d[0, :] cdef complex[:, ::1] b = self.buffer_2d[0, :, :] cdef complex[:, :, ::1] Lb = self.buffer_3d[0, :, :, :] cdef complex[:, ::1] L0b = self.buffer_2d[1,:,:] cdef complex[:, ::1] La = self.buffer_2d[2,:,:] cdef complex[:, :, :, ::1] LLb = self.buffer_4d[:, :, :, :] cdef complex[::1] L0a = self.buffer_1d[1, :] _zero(a) _zero_2d(b) _zero_3d(Lb) _zero_2d(L0b) _zero_2d(La) _zero_4d(LLb) _zero(L0a) cdef np.ndarray[complex, ndim=1] guess = np.zeros((self.l_vec, ), dtype=complex) cdef np.ndarray[complex, ndim=1] vec_t = np.zeros((self.l_vec, ), dtype=complex) self.derivatives(t, 3, vec, a, b, Lb, La, L0b, LLb, L0a) cdef int i,j,k cdef double[::1] dz, dw dw = np.empty(self.num_ops) dz = np.empty(self.num_ops) # The dt of dz is included in the d1 part (Ldt) and the noise (dt**.5) for i in range(self.num_ops): dw[i] = noise[i] dz[i] = 0.5 *(noise[i] + 1./np.sqrt(3) * noise[i+self.num_ops]) copy(vec, vec_t) _axpy(0.5, a, vec_t) for i in range(self.num_ops): _axpy(dw[i], b[i,:], vec_t) _axpy(0.5*(dw[i]*dw[i]-dt), Lb[i,i,:], vec_t) _axpy(dz[i]-dw[i]*0.5, La[i,:], vec_t) _axpy(dw[i]-dz[i] , L0b[i,:], vec_t) _axpy(0.5 * ((1/3.) * dw[i] * dw[i] - dt) * dw[i], LLb[i,i,i,:], vec_t) for j in range(i+1,self.num_ops): _axpy((dw[i]*dw[j]), Lb[i,j,:], vec_t) _axpy(0.5*(dw[j]*dw[j]-dt)*dw[i], LLb[i,j,j,:], vec_t) _axpy(0.5*(dw[i]*dw[i]-dt)*dw[j], LLb[i,i,j,:], vec_t) for k in range(j+1,self.num_ops): _axpy(dw[i]*dw[j]*dw[k], LLb[i,j,k,:], vec_t) copy(vec_t, guess) _axpy(0.5, a, guess) self.implicit(t+dt, vec_t, out, guess) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void platen15(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): """ Chapter 11.2 Eq. (2.13) Numerical Solution of Stochastic Differential Equations By Peter E. Kloeden, Eckhard Platen """ cdef int i, j, k cdef double sqrt_dt = np.sqrt(dt) cdef double sqrt_dt_inv = 1./sqrt_dt cdef double ddz, ddw, ddd cdef double[::1] dz, dw dw = np.empty(self.num_ops) dz = np.empty(self.num_ops) for i in range(self.num_ops): dw[i] = noise[i] dz[i] = 0.5 *(noise[i] + 1./np.sqrt(3) * noise[i+self.num_ops]) cdef complex[::1] d1 = self.buffer_1d[0,:] cdef complex[::1] d1p = self.buffer_1d[1,:] cdef complex[::1] d1m = self.buffer_1d[2,:] cdef complex[::1] V = self.buffer_1d[3,:] cdef complex[:, ::1] d2 = self.buffer_2d[0,:,:] cdef complex[:, ::1] dd2 = self.buffer_2d[1,:,:] cdef complex[:, ::1] d2p = self.buffer_2d[2,:,:] cdef complex[:, ::1] d2m = self.buffer_2d[3,:,:] cdef complex[:, ::1] d2pp = self.buffer_2d[4,:,:] cdef complex[:, ::1] d2mm = self.buffer_2d[5,:,:] cdef complex[:, ::1] v2p = self.buffer_2d[6,:,:] cdef complex[:, ::1] v2m = self.buffer_2d[7,:,:] cdef complex[:, :, ::1] p2p = self.buffer_3d[0,:,:,:] cdef complex[:, : ,::1] p2m = self.buffer_3d[1,:,:,:] _zero(d1) _zero_2d(d2) _zero_2d(dd2) self.d1(t, vec, d1) self.d2(t, vec, d2) self.d2(t + dt, vec, dd2) # Euler part copy(vec,out) _axpy(1., d1, out) for i in range(self.num_ops): _axpy(dw[i], d2[i,:], out) _zero(V) _axpy(1., vec, V) _axpy(1./self.num_ops, d1, V) _zero_2d(v2p) _zero_2d(v2m) for i in range(self.num_ops): _axpy(1., V, v2p[i,:]) _axpy(sqrt_dt, d2[i,:], v2p[i,:]) _axpy(1., V, v2m[i,:]) _axpy(-sqrt_dt, d2[i,:], v2m[i,:]) _zero_3d(p2p) _zero_3d(p2m) for i in range(self.num_ops): _zero_2d(d2p) _zero_2d(d2m) self.d2(t, v2p[i,:], d2p) self.d2(t, v2m[i,:], d2m) ddw = (dw[i]*dw[i]-dt)*0.25/sqrt_dt # 1.0 _axpy( ddw, d2p[i,:], out) _axpy(-ddw, d2m[i,:], out) for j in range(self.num_ops): _axpy( 1., v2p[i,:], p2p[i,j,:]) _axpy( sqrt_dt, d2p[j,:], p2p[i,j,:]) _axpy( 1., v2p[i,:], p2m[i,j,:]) _axpy(-sqrt_dt, d2p[j,:], p2m[i,j,:]) _axpy(-0.5*(self.num_ops), d1, out) for i in range(self.num_ops): ddz = dz[i]*0.5/sqrt_dt # 1.5 ddd = 0.25*(dw[i]*dw[i]/3-dt)*dw[i]/dt # 1.5 _zero(d1p) _zero(d1m) _zero_2d(d2m) _zero_2d(d2p) _zero_2d(d2pp) _zero_2d(d2mm) self.d1(t + dt/self.num_ops, v2p[i,:], d1p) self.d1(t + dt/self.num_ops, v2m[i,:], d1m) self.d2(t, v2p[i,:], d2p) self.d2(t, v2m[i,:], d2m) self.d2(t, p2p[i,i,:], d2pp) self.d2(t, p2m[i,i,:], d2mm) _axpy( ddz+0.25, d1p, out) _axpy(-ddz+0.25, d1m, out) _axpy((dw[i]-dz[i]), dd2[i,:], out) _axpy((dz[i]-dw[i]), d2[i,:], out) _axpy( ddd, d2pp[i,:], out) _axpy(-ddd, d2mm[i,:], out) _axpy(-ddd, d2p[i,:], out) _axpy( ddd, d2m[i,:], out) for j in range(self.num_ops): ddw = 0.5*(dw[j]-dz[j]) # 1.5 _axpy(ddw, d2p[j,:], out) _axpy(-2*ddw, d2[j,:], out) _axpy(ddw, d2m[j,:], out) if j>i: ddw = 0.5*(dw[i]*dw[j])/sqrt_dt # 1.0 _axpy( ddw, d2p[j,:], out) _axpy(-ddw, d2m[j,:], out) ddw = 0.25*(dw[j]*dw[j]-dt)*dw[i]/dt # 1.5 _zero_2d(d2pp) _zero_2d(d2mm) self.d2(t, p2p[j,i,:], d2pp) self.d2(t, p2m[j,i,:], d2mm) _axpy( ddw, d2pp[j,:], out) _axpy(-ddw, d2mm[j,:], out) _axpy(-ddw, d2p[j,:], out) _axpy( ddw, d2m[j,:], out) for k in range(j+1,self.num_ops): ddw = 0.5*dw[i]*dw[j]*dw[k]/dt # 1.5 _axpy( ddw, d2pp[k,:], out) _axpy(-ddw, d2mm[k,:], out) _axpy(-ddw, d2p[k,:], out) _axpy( ddw, d2m[k,:], out) if j=0 d2 euler dwi a[:] a >=0 d1 euler dt Lb[i,i,:] bi'bi >=1 milstein (dwi^2-dt)/2 Lb[i,j,:] bj'bi >=1 milstein dwi*dwj L0b[i,:] ab' +db/dt +bbb"/2 >=2 taylor15 dwidt-dzi La[i,:] ba' >=2 taylor15 dzi LLb[i,i,i,:] bi(bibi"+bi'bi') >=2 taylor15 (dwi^2/3-dt)dwi/2 LLb[i,j,j,:] bi(bjbj"+bj'bj') >=2 taylor15 (dwj^2-dt)dwj/2 LLb[i,j,k,:] bi(bjbk"+bj'bk') >=2 taylor15 dwi*dwj*dwk L0a[:] aa' +da/dt +bba"/2 2 taylor15 dt^2/2 """ cdef int i, j, k, l cdef double dt = self.dt cdef CQobjEvo c_op cdef complex e, de_bb cdef complex[::1] e_real = self.expect_buffer_1d[0,:] cdef complex[:, ::1] de_b = self.expect_buffer_2d[:,:] cdef complex[::1] de_a = self.expect_buffer_1d[1,:] cdef complex[:, :, ::1] dde_bb = self.expect_buffer_3d[:,:,:] _zero_3d(dde_bb) cdef complex[:, ::1] Cvec = self.func_buffer_2d[:,:] cdef complex[:, :, ::1] Cb = self.func_buffer_3d[:,:,:] cdef complex[::1] temp = self.func_buffer_1d[0,:] cdef complex[::1] temp2 = self.func_buffer_1d[1,:] _zero(temp) _zero(temp2) _zero_2d(Cvec) _zero_3d(Cb) # a b self.L._mul_vec(t, &vec[0], &a[0]) for i in range(self.num_ops): c_op = self.c_ops[i] c_op._mul_vec(t, &vec[0], &Cvec[i,0]) e = _dotc(vec,Cvec[i,:]) e_real[i] = real(e) _axpy(1., Cvec[i,:], b[i,:]) _axpy(-e_real[i], vec, b[i,:]) _axpy(-0.5 * e_real[i] * e_real[i] * dt, vec, a[:]) _axpy(e_real[i] * dt, Cvec[i,:], a[:]) #Lb bb' if deg >= 1: for i in range(self.num_ops): c_op = self.c_ops[i] for j in range(self.num_ops): c_op._mul_vec(t, &b[j,0], &Cb[i,j,0]) for k in range(self.l_vec): temp[k] = conj(b[j,k]) temp2[k] = 0. c_op._mul_vec(t, &temp[0], &temp2[0]) de_b[i,j] = (_dotc(vec, Cb[i,j,:]) + _dot(b[j,:], Cvec[i,:]) + \ conj(_dotc(b[j,:], Cvec[i,:]) + _dotc(vec, temp2))) * 0.5 _axpy(1., Cb[i,j,:], Lb[i,j,:]) _axpy(-e_real[i], b[j,:], Lb[i,j,:]) _axpy(-de_b[i,j], vec, Lb[i,j,:]) for k in range(self.num_ops): dde_bb[i,j,k] += (_dot(b[j,:], Cb[i,k,:]) + \ _dot(b[k,:], Cb[i,j,:]) + \ conj(_dotc(b[k,:], temp2)))*.5 dde_bb[i,k,j] += conj(_dotc(b[k,:], temp2))*.5 #L0b La LLb if deg >= 2: for i in range(self.num_ops): #ba' self.L._mul_vec(t, &b[i,0], &La[i,0]) for j in range(self.num_ops): _axpy(-0.5 * e_real[j] * e_real[j] * dt, b[i,:], La[i,:]) _axpy(-e_real[j] * de_b[i,j] * dt, vec, La[i,:]) _axpy(e_real[j] * dt, Cb[i,j,:], La[i,:]) _axpy(de_b[i,j] * dt, Cvec[i,:], La[i,:]) #ab' + db/dt + bbb"/2 c_op = self.c_ops[i] c_op._mul_vec(t, &a[0], &L0b[i,0]) for k in range(self.l_vec): temp[k] = conj(a[k]) temp2[k] = 0. c_op._mul_vec(t, &temp[0], &temp2[0]) de_a[i] = (_dotc(vec, L0b[i,:]) + _dot(a, Cvec[i,:]) + \ conj(_dotc(a, Cvec[i,:]) + _dotc(vec, temp2))) * 0.5 _axpy(-e_real[i], a, L0b[i,:]) _axpy(-de_a[i], vec, L0b[i,:]) temp = np.zeros(self.l_vec, dtype=complex) c_op._mul_vec(t + self.dt, &vec[0], &temp[0]) e = _dotc(vec,temp) _axpy(1., temp, L0b[i,:]) _axpy(-real(e), vec, L0b[i,:]) _axpy(-1., b[i,:], L0b[i,:]) for j in range(self.num_ops): _axpy(-de_b[i,j]*dt, b[j,:], L0b[i,:]) _axpy(-dde_bb[i,j,j]*dt, vec, L0b[i,:]) #b(bb"+b'b') for j in range(i,self.num_ops): for k in range(j, self.num_ops): c_op._mul_vec(t, &Lb[j,k,0], &LLb[i,j,k,0]) for l in range(self.l_vec): temp[l] = conj(Lb[j,k,l]) temp2[l] = 0. c_op._mul_vec(t, &temp[0], &temp2[0]) de_bb = (_dotc(vec, LLb[i,j,k,:]) + \ _dot(Lb[j,k,:], Cvec[i,:]) + \ conj(_dotc(Lb[j,k,:], Cvec[i,:]) +\ _dotc(vec, temp2)))*0.5 _axpy(-e_real[i], Lb[j,k,:], LLb[i,j,k,:]) _axpy(-de_bb, vec, LLb[i,j,k,:]) _axpy(-dde_bb[i,j,k], vec, LLb[i,j,k,:]) _axpy(-de_b[i,j], b[k,:], LLb[i,j,k,:]) _axpy(-de_b[i,k], b[j,:], LLb[i,j,k,:]) #da/dt + aa' + bba" if deg == 2: self.d1(t + dt, vec, L0a) _axpy(-1.0, a, L0a) self.L._mul_vec(t, &a[0], &L0a[0]) for j in range(self.num_ops): c_op = self.c_ops[j] temp = np.zeros(self.l_vec, dtype=complex) c_op._mul_vec(t, &a[0], &temp[0]) _axpy(-0.5 * e_real[j] * e_real[j] * dt, a[:], L0a[:]) _axpy(-e_real[j] * de_a[j] * dt, vec, L0a[:]) _axpy(e_real[j] * dt, temp, L0a[:]) _axpy(de_a[j] * dt, Cvec[j,:], L0a[:]) for i in range(self.num_ops): _axpy(-0.5*(e_real[i] * dde_bb[i,j,j] + de_b[i,j] * de_b[i,j]) * dt * dt, vec, L0a[:]) _axpy(-e_real[i] * de_b[i,j] * dt * dt, b[j,:], L0a[:]) _axpy(0.5*dde_bb[i,j,j] * dt * dt, Cvec[i,:], L0a[:]) _axpy(de_b[i,j] * dt * dt, Cb[i,j,:], L0a[:]) @cython.boundscheck(False) @cython.wraparound(False) cdef void _c_vec_conj(self, double t, CQobjEvo c_op, complex[::1] vec, complex[::1] out): cdef int k cdef complex[::1] temp = self.func_buffer_1d[13,:] for k in range(self.l_vec): temp[k] = conj(vec[k]) out[k] = 0. c_op._mul_vec(t, &temp[0], &out[0]) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void derivativesO2(self, double t, complex[::1] psi, complex[::1] a, complex[::1] b, complex[::1] Lb, complex[::1] La, complex[::1] L0b, complex[::1] LLb, complex[::1] L0a, complex[::1] LLa, complex[::1] LL0b, complex[::1] L0Lb, complex[::1] LLLb): """ Combinaisons of a and b derivative for m sc_ops up to order dt**2.0 Use Stratonovich-Taylor expansion. One one sc_ops dY ~ a dt + bi dwi b[:] b d2 euler dw a[:] a- Lb/2 d1 euler dt Lb[:] b'b milstein dw^2/2 L0b[:] ab'- b'b'b/2 taylor1.5 dwdt-dz La[:] ba'- (b'b'b+b"bb)/2 taylor1.5 dz LLb[:] (b"bb+b'b'b) taylor1.5 dw^3/6 L0a[:] a_a'_ + da/dt -Lb_a'_/2 taylor1.5 dt^2/2 LLa[:] ... taylor2.0 dwdt-dz LL0b[:] ... taylor2.0 dz L0Lb[:] ... taylor2.0 dw^3/6 LLLb[:] ... taylor2.0 dt^2/2 """ cdef double dt = self.dt cdef CQobjEvo c_op = self.c_ops[0] cdef complex e, de_b, de_Lb, de_LLb, dde_bb, dde_bLb cdef complex de_a, dde_ba, de_La, de_L0b cdef complex[::1] Cpsi = self.func_buffer_1d[0,:] cdef complex[::1] Cb = self.func_buffer_1d[1,:] cdef complex[::1] Cbc = self.func_buffer_1d[2,:] cdef complex[::1] CLb = self.func_buffer_1d[3,:] cdef complex[::1] CLbc = self.func_buffer_1d[4,:] cdef complex[::1] CLLb = self.func_buffer_1d[5,:] cdef complex[::1] CLLbc = self.func_buffer_1d[6,:] cdef complex[::1] Ca = self.func_buffer_1d[7,:] cdef complex[::1] Cac = self.func_buffer_1d[8,:] cdef complex[::1] CLa = self.func_buffer_1d[9,:] cdef complex[::1] CLac = self.func_buffer_1d[10,:] cdef complex[::1] CL0b = self.func_buffer_1d[11,:] cdef complex[::1] CL0bc = self.func_buffer_1d[12,:] _zero(Cpsi) _zero(Cb) _zero(CLb) _zero(CLLb) _zero(Ca) _zero(CLa) _zero(CL0b) # b c_op._mul_vec(t, &psi[0], &Cpsi[0]) e = real(_dotc(psi, Cpsi)) _axpy(1., Cpsi, b) _axpy(-e, psi, b) # Lb c_op._mul_vec(t, &b[0], &Cb[0]) self._c_vec_conj(t, c_op, b, Cbc) de_b = (_dotc(psi, Cb) + _dot(b, Cpsi) + \ conj(_dotc(b, Cpsi) + _dotc(psi, Cbc))) * 0.5 _axpy(1., Cb, Lb) _axpy(-e, b, Lb) _axpy(-de_b, psi, Lb) # LLb = b'b'b + b"bb c_op._mul_vec(t, &Lb[0], &CLb[0]) self._c_vec_conj(t, c_op, Lb, CLbc) de_Lb = (_dotc(psi, CLb) + _dot(Lb, Cpsi) + \ conj(_dotc(Lb, Cpsi) + _dotc(psi, CLbc)))*0.5 _axpy(1, CLb, LLb) # b'b'b _axpy(-e, Lb, LLb) # b'b'b _axpy(-de_Lb, psi, LLb) # b'b'b dde_bb += (_dot(b, Cb) + conj(_dotc(b, Cbc))) _axpy(-dde_bb, psi, LLb) # b"bb _axpy(-de_b*2, b, LLb) # b"bb # LLLb = b"'bbb + 3* b"b'bb + b'(b"bb + b'b'b) c_op._mul_vec(t, &LLb[0], &CLLb[0]) self._c_vec_conj(t, c_op, LLb, CLLbc) de_LLb = (_dotc(psi, CLLb) + _dot(LLb, Cpsi) + \ conj(_dotc(LLb, Cpsi) + _dotc(psi, CLLbc)))*0.5 dde_bLb += (_dot(b, CLb) + _dot(Lb, Cb) + conj(_dotc(Lb, Cbc)) + \ conj(_dotc(b, CLbc)))*.5 _axpy(1, CLLb, LLLb) # b'(b"bb + b'b'b) _axpy(-e, LLb, LLLb) # b'(b"bb + b'b'b) _axpy(-de_LLb, psi, LLLb) # b'(b"bb + b'b'b) _axpy(-dde_bLb*3, psi, LLLb) # b"bLb _axpy(-de_Lb*3, b, LLLb) # b"bLb _axpy(-de_b*3, Lb, LLLb) # b"bLb _axpy(-dde_bb*3, b, LLLb) # b"'bbb # a self.L._mul_vec(t, &psi[0], &a[0]) _axpy(-0.5 * e * e * dt, psi, a) _axpy(e * dt, Cpsi, a) _axpy(-0.5 * dt, Lb, a) #La self.L._mul_vec(t, &b[0], &La[0]) _axpy(-0.5 * e * e * dt, b, La) _axpy(-e * de_b * dt, psi, La) _axpy(e * dt, Cb, La) _axpy(de_b * dt, Cpsi, La) _axpy(-0.5 * dt, LLb, La) #LLa _axpy(-2 * e * de_b * dt, b, LLa) _axpy(-de_b * de_b * dt, psi, LLa) _axpy(-e * dde_bb * dt, psi, LLa) _axpy( 2 * de_b * dt, Cb, LLa) _axpy( dde_bb * dt, Cpsi, LLa) self.L._mul_vec(t, &Lb[0], &LLa[0]) _axpy(-de_Lb * e * dt, psi, LLa) _axpy(-0.5 * e * e * dt, Lb, LLa) _axpy( de_Lb * dt, Cpsi, LLa) _axpy( e * dt, CLb, LLa) _axpy(-0.5 * dt, LLLb, LLa) # L0b = b'a c_op._mul_vec(t, &a[0], &Ca[0]) self._c_vec_conj(t, c_op, a, Cac) de_a = (_dotc(psi, Ca) + _dot(a, Cpsi) + \ conj(_dotc(a, Cpsi) + _dotc(psi, Cac))) * 0.5 _axpy(1.0, Ca, L0b) _axpy(-e, a, L0b) _axpy(-de_a, psi, L0b) # LL0b = b"ba + b'La dde_ba += (_dot(b, Ca) + _dot(a, Cb) + conj(_dotc(a, Cbc)) + \ conj(_dotc(b, Cac)))*.5 _axpy(-dde_ba, psi, LL0b) _axpy(-de_a, b, LL0b) _axpy(-de_b, a, LL0b) c_op._mul_vec(t, &La[0], &CLa[0]) self._c_vec_conj(t, c_op, La, CLac) de_La = (_dotc(psi, CLa) + _dot(La, Cpsi) + \ conj(_dotc(La, Cpsi) + _dotc(psi, CLac))) * 0.5 _axpy(1., CLa, LL0b) _axpy(-e, La, LL0b) _axpy(-de_La, psi, LL0b) # L0Lb = b"ba + b'L0b _axpy(-dde_ba, psi, L0Lb) _axpy(-de_a, b, L0Lb) _axpy(-de_b, a, L0Lb) c_op._mul_vec(t, &L0b[0], &CL0b[0]) self._c_vec_conj(t, c_op, L0b, CL0bc) de_L0b = (_dotc(psi, CL0b) + _dot(L0b, Cpsi) + \ conj(_dotc(L0b, Cpsi) + _dotc(psi, CL0bc))) * 0.5 _axpy(1., CL0b, L0Lb) _axpy(-e, L0b, L0Lb) _axpy(-de_L0b, psi, L0Lb) # _L0_ _a_ = da/dt + a'_a_ -_L0_Lb/2 self.d1(t + dt, psi, L0a) # da/dt _axpy(-0.5 * dt, Lb, L0a) # da/dt _axpy(-1.0, a, L0a) # da/dt self.L._mul_vec(t, &a[0], &L0a[0]) # a'_a_ _axpy(-0.5 * e * e * dt, a, L0a) # a'_a_ _axpy(-e * de_a * dt, psi, L0a) # a'_a_ _axpy(e * dt, Ca, L0a) # a'_a_ _axpy(de_a * dt, Cpsi, L0a) # a'_a_ _axpy(-0.5 * dt, L0Lb, L0a) # _L0_Lb/2 cdef void implicit(self, double t, np.ndarray[complex, ndim=1] dvec, complex[::1] out, np.ndarray[complex, ndim=1] guess): # np.ndarray to memoryview is OK but not the reverse # scipy function only take np array, not memoryview self.imp_t = t spout, check = sp.linalg.bicgstab(self.imp, dvec, x0=guess, tol=self.tol, atol=1e-12) cdef int i copy(spout, out) cdef class SMESolver(StochasticSolver): """stochastic master equation system""" cdef CQobjEvo L cdef object imp cdef object c_ops cdef int N_root cdef double tol def set_data(self, sso): L = sso.LH c_ops = sso.sops self.l_vec = L.cte.shape[0] self.num_ops = len(c_ops) self.L = L.compiled_qobjevo self.c_ops = [] self.N_root = np.sqrt(self.l_vec) for i, op in enumerate(c_ops): self.c_ops.append(op.compiled_qobjevo) if sso.solver_code in [MILSTEIN_IMP_SOLVER, TAYLOR1_5_IMP_SOLVER]: self.tol = sso.tol self.imp = sso.imp cdef void _normalize_inplace(self, complex[::1] vec): _normalize_rho(vec) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex expect(self, complex[::1] rho): cdef complex e = 0. cdef int k for k in range(self.N_root): e += rho[k*(self.N_root+1)] return e @cython.boundscheck(False) cdef void d1(self, double t, complex[::1] rho, complex[::1] out): self.L._mul_vec(t, &rho[0], &out[0]) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void d2(self, double t, complex[::1] rho, complex[:, ::1] out): cdef int i, k cdef CQobjEvo c_op cdef complex expect for i in range(self.num_ops): c_op = self.c_ops[i] c_op._mul_vec(t, &rho[0], &out[i,0]) expect = self.expect(out[i,:]) _axpy(-expect, rho, out[i,:]) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void derivatives(self, double t, int deg, complex[::1] rho, complex[::1] a, complex[:, ::1] b, complex[:, :, ::1] Lb, complex[:,::1] La, complex[:, ::1] L0b, complex[:, :, :, ::1] LLb, complex[::1] L0a): """ combinaisons of a and b derivative for m sc_ops up to order dt**1.5 dY ~ a dt + bi dwi deg use noise b[i.:] bi >=0 d2 euler dwi a[:] a >=0 d1 euler dt Lb[i,i,:] bi'bi >=1 milstein (dwi^2-dt)/2 Lb[i,j,:] bj'bi >=1 milstein dwi*dwj L0b[i,:] ab' +db/dt +bbb"/2 >=2 taylor15 dwidt-dzi La[i,:] ba' >=2 taylor15 dzi LLb[i,i,i,:] bi(bibi"+bi'bi') >=2 taylor15 (dwi^2/3-dt)dwi/2 LLb[i,j,j,:] bi(bjbj"+bj'bj') >=2 taylor15 (dwj^2-dt)dwj/2 LLb[i,j,k,:] bi(bjbk"+bj'bk') >=2 taylor15 dwi*dwj*dwk L0a[:] aa' +da/dt +bba"/2 2 taylor15 dt^2/2 """ cdef int i, j, k cdef CQobjEvo c_op cdef CQobjEvo c_opj cdef complex trApp, trAbb, trAa cdef complex[::1] trAp = self.expect_buffer_1d[0,:] cdef complex[:, ::1] trAb = self.expect_buffer_2d cdef complex[::1] temp = self.func_buffer_1d[0,:] #_zero(temp) # a self.L._mul_vec(t, &rho[0], &a[0]) # b for i in range(self.num_ops): c_op = self.c_ops[i] # bi c_op._mul_vec(t, &rho[0], &b[i,0]) trAp[i] = self.expect(b[i,:]) _axpy(-trAp[i], rho, b[i,:]) # Libj = bibj', i<=j # sc_ops must commute (Libj = Ljbi) if deg >= 1: for i in range(self.num_ops): c_op = self.c_ops[i] for j in range(i, self.num_ops): c_op._mul_vec(t, &b[j,0], &Lb[i,j,0]) trAb[i,j] = self.expect(Lb[i,j,:]) _axpy(-trAp[j], b[i,:], Lb[i,j,:]) _axpy(-trAb[i,j], rho, Lb[i,j,:]) # L0b La LLb if deg >= 2: for i in range(self.num_ops): c_op = self.c_ops[i] # Lia = bia' self.L._mul_vec(t, &b[i,0], &La[i,0]) # L0bi = abi' + dbi/dt + Sum_j bjbjbi"/2 # db/dt c_op._mul_vec(t + self.dt, &rho[0], &L0b[i,0]) trApp = self.expect(L0b[i,:]) _axpy(-trApp, rho, L0b[i,:]) _axpy(-1, b[i,:], L0b[i,:]) # ab' _zero(temp) # = np.zeros((self.l_vec, ), dtype=complex) c_op._mul_vec(t, &a[0], &temp[0]) trAa = self.expect(temp) _axpy(1., temp, L0b[i,:]) _axpy(-trAp[i], a[:], L0b[i,:]) _axpy(-trAa, rho, L0b[i,:]) # bbb" : trAb[i,j] only defined for j>=i for j in range(i): _axpy(-trAb[j,i]*self.dt, b[j,:], L0b[i,:]) # L contain dt for j in range(i,self.num_ops): _axpy(-trAb[i,j]*self.dt, b[j,:], L0b[i,:]) # L contain dt # LLb # LiLjbk = bi(bj'bk'+bjbk"), i<=j<=k # sc_ops must commute (LiLjbk = LjLibk = LkLjbi) for j in range(i,self.num_ops): for k in range(j,self.num_ops): c_op._mul_vec(t, &Lb[j,k,0], &LLb[i,j,k,0]) trAbb = self.expect(LLb[i,j,k,:]) _axpy(-trAp[i], Lb[j,k,:], LLb[i,j,k,:]) _axpy(-trAbb, rho, LLb[i,j,k,:]) _axpy(-trAb[i,k], b[j,:], LLb[i,j,k,:]) _axpy(-trAb[i,j], b[k,:], LLb[i,j,k,:]) # L0a = a'a + da/dt + bba"/2 (a" = 0) if deg == 2: self.L._mul_vec(t, &a[0], &L0a[0]) self.L._mul_vec(t+self.dt, &rho[0], &L0a[0]) _axpy(-1, a, L0a) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void derivativesO2(self, double t, complex[::1] rho, complex[::1] a, complex[::1] b, complex[::1] Lb, complex[::1] La, complex[::1] L0b, complex[::1] LLb, complex[::1] L0a, complex[::1] LLa, complex[::1] LL0b, complex[::1] L0Lb, complex[::1] LLLb): """ Combinaisons of a and b derivative for m sc_ops up to order dt**2.0 Use Stratonovich-Taylor expansion. One one sc_ops dY ~ a dt + bi dwi b[:] b d2 euler dw a[:] a- Lb/2 d1 euler dt Lb[:] b'b milstein dw^2/2 L0b[:] ab'- b'b'b/2 taylor1.5 dwdt-dz La[:] ba'- (b'b'b+b"bb)/2 taylor1.5 dz LLb[:] (b"bb+b'b'b) taylor1.5 dw^3/6 L0a[:] a_a'_ + da/dt -Lb_a'_/2 taylor1.5 dt^2/2 LLa[:] ... taylor2.0 dwdt-dz LL0b[:] ... taylor2.0 dz L0Lb[:] ... taylor2.0 dw^3/6 LLLb[:] ... taylor2.0 dt^2/2 """ cdef int i, j, k cdef CQobjEvo c_op = self.c_ops[0] cdef CQobjEvo c_opj cdef complex trAp, trApt cdef complex trAb, trALb, trALLb cdef complex trAa, trALa cdef complex trAL0b cdef complex[::1] temp = self.func_buffer_1d[0,:] cdef complex[::1] temp2 = self.func_buffer_1d[1,:] # b c_op._mul_vec(t, &rho[0], &b[0]) trAp = self.expect(b) _axpy(-trAp, rho, b) # Lb = b'b c_op._mul_vec(t, &b[0], &Lb[0]) trAb = self.expect(Lb) _axpy(-trAp, b, Lb) _axpy(-trAb, rho, Lb) # LLb = b'Lb+b"bb c_op._mul_vec(t, &Lb[0], &LLb[0]) trALb = self.expect(LLb) _axpy(-trAp, Lb, LLb) _axpy(-trALb, rho, LLb) _axpy(-trAb*2, b, LLb) # LLLb = b'LLb + 3 b"bLb + b"'bbb c_op._mul_vec(t, &LLb[0], &LLLb[0]) trALLb = self.expect(LLLb) _axpy(-trAp, LLb, LLLb) _axpy(-trALLb, rho, LLLb) _axpy(-trALb*3, b, LLLb) _axpy(-trAb*3, Lb, LLLb) # _a_ = a - Lb/2 self.L._mul_vec(t, &rho[0], &a[0]) _axpy(-0.5*self.dt, Lb, a) # L_a_ = ba' - LLb/2 self.L._mul_vec(t, &b[0], &La[0]) _axpy(-0.5*self.dt, LLb, La) # LL_a_ = b(La)' - LLLb/2 self.L._mul_vec(t, &Lb[0], &LLa[0]) _axpy(-0.5*self.dt, LLLb, LLa) # _L0_b = b'(_a_) c_op._mul_vec(t, &a[0], &L0b[0]) trAa = self.expect(L0b) _axpy(-trAp, a, L0b) _axpy(-trAa, rho, L0b) # _L0_Lb = b'(b'(_a_))+b"(_a_,b) c_op._mul_vec(t, &L0b[0], &L0Lb[0]) trAL0b = self.expect(L0Lb) _axpy(-trAp, L0b, L0Lb) _axpy(-trAL0b, rho, L0Lb) _axpy(-trAa, b, L0Lb) _axpy(-trAb, a, L0Lb) # L_L0_b = b'(_a_'(b))+b"(_a_,b) c_op._mul_vec(t, &La[0], &LL0b[0]) trAL0b = self.expect(LL0b) _axpy(-trAp, La, LL0b) _axpy(-trAL0b, rho, LL0b) _axpy(-trAa, b, LL0b) _axpy(-trAb, a, LL0b) # _L0_ _a_ = _L0_a - _L0_Lb/2 + da/dt self.L._mul_vec(t, &a[0], &L0a[0]) self.L._mul_vec(t+self.dt, &rho[0], &L0a[0]) _axpy(-0.5*self.dt, Lb, L0a) # _a_(t+dt) = a(t+dt)-0.5*Lb _axpy(-1, a, L0a) _axpy(-self.dt*0.5, L0Lb, L0a) cdef void implicit(self, double t, np.ndarray[complex, ndim=1] dvec, complex[::1] out, np.ndarray[complex, ndim=1] guess): # np.ndarray to memoryview is OK but not the reverse # scipy function only take np array, not memoryview spout, check = sp.linalg.bicgstab(self.imp(t, data=1), dvec, x0=guess, tol=self.tol, atol=1e-12) cdef int i copy(spout,out) cdef class PcSSESolver(StochasticSolver): """photocurrent for Schrodinger equation""" cdef CQobjEvo L cdef object c_ops cdef object cdc_ops def set_data(self, sso): L = sso.LH c_ops = sso.sops self.l_vec = L.cte.shape[0] self.num_ops = len(c_ops) self.L = L.compiled_qobjevo self.c_ops = [] self.cdc_ops = [] for i, op in enumerate(c_ops): self.c_ops.append(op[0].compiled_qobjevo) self.cdc_ops.append(op[1].compiled_qobjevo) @cython.boundscheck(False) @cython.wraparound(False) cdef void photocurrent(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): cdef CQobjEvo c_op cdef double rand cdef int i, which = -1 cdef complex[::1] expects = self.expect_buffer_1d[0,:] cdef complex[::1] d2 = self.buffer_1d[0,:] copy(vec, out) self.d1(t, vec, out) rand = np.random.rand() for i in range(self.num_ops): c_op = self.cdc_ops[i] expects[i] = c_op.expect(t, vec) if expects[i].real * dt >= 1e-15: rand -= expects[i].real *dt if rand < 0: which = i noise[i] = 1. break if which >= 0: self.collapse(t, which, expects[which].real, vec, d2) _axpy(1, d2, out) _axpy(-1, vec, out) @cython.boundscheck(False) @cython.wraparound(False) cdef void photocurrent_pc(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): cdef CQobjEvo c_op cdef double expect cdef int i, which=0, num_coll=0, did_collapse=0 cdef complex[::1] tmp = self.buffer_1d[0,:] cdef complex[::1] expects = self.expect_buffer_1d[0,:] cdef np.ndarray[int, ndim=1] colls # Collapses are computed first for i in range(self.num_ops): c_op = self.cdc_ops[i] expects[i] = c_op.expect(t, vec).real if expects[i].real > 0: did_collapse = np.random.poisson(expects[i].real * dt) num_coll += did_collapse if did_collapse: which = i noise[i] = did_collapse * 1. else: noise[i] = 0. if num_coll == 0: pass elif num_coll == 1: # Do one collapse self.collapse(t, which, expects[which].real, vec, out) copy(out, vec) elif num_coll and noise[which] == num_coll: # Do many collapse of one sc_ops. # Recompute the expectation value, but only to check for zero. c_op = self.cdc_ops[which] for i in range(num_coll): expect = c_op.expect(t, vec).real if expect * dt >= 1e-15: self.collapse(t, which, expect, vec, out) copy(out,vec) elif num_coll >= 2: # 2 or more collapses of different operators # Ineficient, should be rare coll = [] for i in range(self.num_ops): coll += [i]*int(noise[i]) np.random.shuffle(coll) for i in coll: c_op = self.cdc_ops[i] expect = c_op.expect(t, vec).real if expect * dt >= 1e-15: self.collapse(t, i, expect, vec, out) copy(out,vec) copy(vec,tmp) copy(vec,out) self.d1(t, vec, tmp) self.d1(t+dt, tmp, out) _scale(0.5, out) _axpy(0.5, tmp, out) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void d1(self, double t, complex[::1] vec, complex[::1] out): self.L._mul_vec(t, &vec[0], &out[0]) cdef int i cdef complex e cdef CQobjEvo c_op cdef complex[::1] temp = self.func_buffer_1d[0,:] for i in range(self.num_ops): _zero(temp) c_op = self.c_ops[i] c_op._mul_vec(t, &vec[0], &temp[0]) e = _dznrm2(temp) _axpy(0.5 * e * e * self.dt, vec, out) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void d2(self, double t, complex[::1] vec, complex[:, ::1] out): cdef int i cdef CQobjEvo c_op cdef complex expect for i in range(self.num_ops): c_op = self.c_ops[i] c_op._mul_vec(t, &vec[0], &out[i,0]) expect = _dznrm2(out[i,:]) if expect.real >= 1e-15: _zscale(1/expect, out[i,:]) else: _zero(out[i,:]) _axpy(-1, vec, out[i,:]) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void collapse(self, double t, int which, double expect, complex[::1] vec, complex[::1] out): cdef CQobjEvo c_op c_op = self.c_ops[which] _zero(out) c_op._mul_vec(t, &vec[0], &out[0]) _zscale(1/expect, out) cdef class PcSMESolver(StochasticSolver): """photocurrent for master equation""" cdef CQobjEvo L cdef object cdcr_cdcl_ops cdef object cdcl_ops cdef object clcdr_ops cdef int N_root def set_data(self, sso): L = sso.LH c_ops = sso.sops self.l_vec = L.cte.shape[0] self.num_ops = len(c_ops) self.L = L.compiled_qobjevo self.cdcr_cdcl_ops = [] self.cdcl_ops = [] self.clcdr_ops = [] self.N_root = np.sqrt(self.l_vec) for i, op in enumerate(c_ops): self.cdcr_cdcl_ops.append(op[0].compiled_qobjevo) self.cdcl_ops.append(op[1].compiled_qobjevo) self.clcdr_ops.append(op[2].compiled_qobjevo) cdef void _normalize_inplace(self, complex[::1] vec): _normalize_rho(vec) @cython.boundscheck(False) @cython.wraparound(False) cdef void photocurrent(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): cdef CQobjEvo c_op cdef double rand cdef int i, which = -1 cdef complex[::1] expects = self.expect_buffer_1d[0,:] cdef complex[::1] d2 = self.buffer_1d[0,:] copy(vec, out) self.d1(t, vec, out) rand = np.random.rand() for i in range(self.num_ops): c_op = self.clcdr_ops[i] expects[i] = c_op.expect(t, vec) if expects[i].real * dt >= 1e-15: rand -= expects[i].real *dt if rand < 0: which = i noise[i] = 1. break if which >= 0: self.collapse(t, which, expects[which].real, vec, d2) _axpy(1, d2, out) _axpy(-1, vec, out) @cython.boundscheck(False) @cython.wraparound(False) cdef void photocurrent_pc(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): cdef CQobjEvo c_op cdef int i, which, num_coll=0, did_collapse cdef complex[::1] expects = self.expect_buffer_1d[0,:] cdef complex[::1] tmp = self.buffer_1d[0,:] cdef double expect cdef np.ndarray[int, ndim=1] colls # Collapses are computed first for i in range(self.num_ops): c_op = self.clcdr_ops[i] expects[i] = c_op.expect(t, vec).real if expects[i].real > 0: did_collapse = np.random.poisson(expects[i].real* dt) num_coll += did_collapse if did_collapse: which = i noise[i] = did_collapse * 1. else: noise[i] = 0. if num_coll == 0: pass elif num_coll == 1: # Do one collapse self.collapse(t, which, expects[which].real, vec, out) copy(out,vec) elif noise[which] == num_coll: # Do many collapse of one sc_ops. # Recompute the expectation value, but only to check for zero. c_op = self.clcdr_ops[which] for i in range(num_coll): expect = c_op.expect(t, vec).real if expect * dt >= 1e-15: self.collapse(t, which, expect, vec, out) copy(out,vec) elif num_coll >= 2: # 2 or more collapses of different operators # Ineficient, should be rare coll = [] for i in range(self.num_ops): coll += [i] * int(noise[i]) np.random.shuffle(coll) for i in coll: c_op = self.clcdr_ops[i] expect = c_op.expect(t, vec).real if expect * dt >= 1e-15: self.collapse(t, i, expect, vec, out) copy(out,vec) copy(vec,tmp) copy(vec,out) self.d1(t, vec, tmp) self.d1(t+dt, tmp, out) _scale(0.5, out) _axpy(0.5, tmp, out) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex expect(self, complex[::1] rho): cdef complex e = 0. cdef int k for k in range(self.N_root): e += rho[k*(self.N_root+1)] return e @cython.boundscheck(False) cdef void d1(self, double t, complex[::1] rho, complex[::1] out): cdef int i cdef CQobjEvo c_op cdef complex[::1] crho = self.func_buffer_1d[0,:] cdef complex expect self.L._mul_vec(t, &rho[0], &out[0]) for i in range(self.num_ops): c_op = self.cdcr_cdcl_ops[i] _zero(crho) c_op._mul_vec(t, &rho[0], &crho[0]) expect = self.expect(crho) _axpy(0.5*expect* self.dt, rho, out) _axpy(-0.5* self.dt, crho, out) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void d2(self, double t, complex[::1] rho, complex[:, ::1] out): cdef int i cdef CQobjEvo c_op cdef complex expect for i in range(self.num_ops): c_op = self.clcdr_ops[i] c_op._mul_vec(t, &rho[0], &out[i,0]) expect = self.expect(out[i,:]) if expect.real >= 1e-15: _zscale((1.+0j)/expect, out[i,:]) else: _zero(out[i,:]) _axpy(-1, rho, out[i,:]) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef void collapse(self, double t, int which, double expect, complex[::1] vec, complex[::1] out): cdef CQobjEvo c_op c_op = self.clcdr_ops[which] _zero(out) c_op._mul_vec(t, &vec[0], &out[0]) _zscale(1./expect, out) cdef class PmSMESolver(StochasticSolver): """positive map for master equation""" cdef object L cdef CQobjEvo pp_ops cdef CQobjEvo preLH cdef CQobjEvo postLH cdef object sops cdef object preops cdef object postops cdef object preops2 cdef object postops2 cdef int N_root def set_data(self, sso): c_ops = sso.sops self.l_vec = sso.pp.cte.shape[0] self.num_ops = len(c_ops) self.preLH = sso.preLH.compiled_qobjevo self.postLH = sso.postLH.compiled_qobjevo self.pp_ops = sso.pp.compiled_qobjevo self.sops = [op.compiled_qobjevo for op in sso.sops] self.preops = [op.compiled_qobjevo for op in sso.preops] self.postops = [op.compiled_qobjevo for op in sso.postops] self.preops2 = [op.compiled_qobjevo for op in sso.preops2] self.postops2 = [op.compiled_qobjevo for op in sso.postops2] self.N_root = np.sqrt(self.l_vec) cdef void _normalize_inplace(self, complex[::1] vec): _normalize_rho(vec) @cython.boundscheck(False) @cython.wraparound(False) cdef void rouchon(self, double t, double dt, double[:] noise, complex[::1] vec, complex[::1] out): cdef complex[::1] dy = self.expect_buffer_1d[0,:] cdef complex[::1] temp = self.buffer_1d[0,:] cdef complex[::1] temp2 = self.buffer_1d[1,:] cdef int i, j, k cdef CQobjEvo c_op, c_opj cdef complex ddw, tr _zero(out) _zero(temp) self.preLH._mul_vec(t, &vec[0], &temp[0]) for i in range(self.num_ops): c_op = self.sops[i] dy[i] = c_op._expect_super(t, &vec[0]) + noise[i] c_op = self.preops[i] _zero(temp2) c_op._mul_vec(t, &vec[0], &temp2[0]) _axpy(dy[i], temp2, temp) k = 0 for i in range(self.num_ops): for j in range(i, self.num_ops): c_op = self.preops2[k] if i == j: ddw = (dy[i]*dy[j] - dt) *0.5 else: ddw = (dy[i]*dy[j]) _zero(temp2) c_op._mul_vec(t, &vec[0], &temp2[0]) _axpy(ddw, temp2, temp) k += 1 self.postLH._mul_vec(t, &temp[0], &out[0]) for i in range(self.num_ops): dy[i] = conj(dy[i]) c_op = self.postops[i] _zero(temp2) c_op._mul_vec(t, &temp[0], &temp2[0]) _axpy(dy[i], temp2, out) k = 0 for i in range(self.num_ops): for j in range(i, self.num_ops): c_op = self.postops2[k] if i == j: ddw = (dy[i]*dy[j] - dt) *0.5 else: ddw = (dy[i]*dy[j]) _zero(temp2) c_op._mul_vec(t, &temp[0], &temp2[0]) _axpy(ddw, temp2, out) k += 1 self.pp_ops._mul_vec(t, &vec[0], &out[0]) tr = self.expect(out) _zscale(1./tr, out) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef complex expect(self, complex[::1] rho): cdef complex e = 0. cdef int k for k in range(self.N_root): e += rho[k*(self.N_root+1)] return e cdef class GenericSSolver(StochasticSolver): """support for user defined system""" cdef object d1_func, d2_func def set_data(self, sso): self.l_vec = sso.rho0.shape[0] self.num_ops = len(sso.sops) self.d1_func = sso.d1 self.d2_func = sso.d2 cdef void d1(self, double t, complex[::1] rho, complex[::1] out): cdef np.ndarray[complex, ndim=1] in_np cdef np.ndarray[complex, ndim=1] out_np in_np = np.zeros((self.l_vec, ), dtype=complex) copy(rho, in_np) out_np = self.d1_func(t, in_np) _axpy(self.dt, out_np, out) # d1 is += and * dt @cython.boundscheck(False) cdef void d2(self, double t, complex[::1] rho, complex[:, ::1] out): cdef np.ndarray[complex, ndim=1] in_np cdef np.ndarray[complex, ndim=2] out_np cdef int i in_np = np.zeros((self.l_vec, ), dtype=complex) copy(rho, in_np) out_np = self.d2_func(t, in_np) for i in range(self.num_ops): copy(out_np[i,:], out[i,:]) qutip-4.4.1/qutip/cy/utilities.py000066400000000000000000000045021352460343600170330ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import os def _cython_build_cleanup(tdname, build_dir=None): if build_dir is None: build_dir = os.path.join(os.path.expanduser('~'), '.pyxbld') # Remove tdname.pyx pyx_file = tdname + ".pyx" try: os.remove(pyx_file) except: pass # Remove temp build files for dirpath, subdirs, files in os.walk(build_dir): for f in files: if f.startswith(tdname): try: os.remove(os.path.join(dirpath,f)) except: pass qutip-4.4.1/qutip/dimensions.py000066400000000000000000000267451352460343600165720ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ Internal use module for manipulating dims specifications. """ __all__ = [] # Everything should be explicitly imported, not made available # by default. import numpy as np from operator import getitem from functools import partial def is_scalar(dims): """ Returns True if a dims specification is effectively a scalar (has dimension 1). """ return np.prod(flatten(dims)) == 1 def is_vector(dims): return ( isinstance(dims, list) and isinstance(dims[0], (int, np.integer)) ) def is_vectorized_oper(dims): return ( isinstance(dims, list) and isinstance(dims[0], list) ) def type_from_dims(dims, enforce_square=True): bra_like, ket_like = map(is_scalar, dims) if bra_like: if is_vector(dims[1]): return 'bra' elif is_vectorized_oper(dims[1]): return 'operator-bra' if ket_like: if is_vector(dims[0]): return 'ket' elif is_vectorized_oper(dims[0]): return 'operator-ket' elif is_vector(dims[0]) and (dims[0] == dims[1] or not enforce_square): return 'oper' elif ( is_vectorized_oper(dims[0]) and ( ( dims[0] == dims[1] and dims[0][0] == dims[1][0] ) or not enforce_square ) ): return 'super' return 'other' def flatten(l): """Flattens a list of lists to the first level. Given a list containing a mix of scalars and lists, flattens down to a list of the scalars within the original list. Examples -------- >>> print(flatten([[[0], 1], 2])) [0, 1, 2] """ if not isinstance(l, list): return [l] else: return sum(map(flatten, l), []) def deep_remove(l, *what): """Removes scalars from all levels of a nested list. Given a list containing a mix of scalars and lists, returns a list of the same structure, but where one or more scalars have been removed. Examples -------- >>> print(deep_remove([[[[0, 1, 2]], [3, 4], [5], [6, 7]]], 0, 5)) [[[[1, 2]], [3, 4], [], [6, 7]]] """ if isinstance(l, list): # Make a shallow copy at this level. l = l[:] for to_remove in what: if to_remove in l: l.remove(to_remove) else: l = list(map(lambda elem: deep_remove(elem, to_remove), l)) return l def unflatten(l, idxs): """Unflattens a list by a given structure. Given a list of scalars and a deep list of indices as produced by `flatten`, returns an "unflattened" form of the list. This perfectly inverts `flatten`. Examples -------- >>> l = [[[10, 20, 30], [40, 50, 60]], [[70, 80, 90], [100, 110, 120]]] >>> idxs = enumerate_flat(l) >>> print(unflatten(flatten(l)), idxs) == l True """ acc = [] for idx in idxs: if isinstance(idx, list): acc.append(unflatten(l, idx)) else: acc.append(l[idx]) return acc def _enumerate_flat(l, idx=0): if not isinstance(l, list): # Found a scalar, so return and increment. return idx, idx + 1 else: # Found a list, so append all the scalars # from it and recurse to keep the increment # correct. acc = [] for elem in l: labels, idx = _enumerate_flat(elem, idx) acc.append(labels) return acc, idx def _collapse_composite_index(dims): """ Given the dimensions specification for a composite index (e.g.: [2, 3] for the right index of a ket with dims [[1], [2, 3]]), returns a dimensions specification for an index of the same shape, but collapsed to a single "leg." In the previous example, [2, 3] would collapse to [6]. """ return [np.prod(dims)] def _collapse_dims_to_level(dims, level=1): """ Recursively collapses all indices in a dimensions specification appearing at a given level, such that the returned dimensions specification does not represent any composite systems. """ if level == 0: return _collapse_composite_index(dims) else: return [_collapse_dims_to_level(index, level=level - 1) for index in dims] def collapse_dims_oper(dims): """ Given the dimensions specifications for a ket-, bra- or oper-type Qobj, returns a dimensions specification describing the same shape by collapsing all composite systems. For instance, the bra-type dimensions specification ``[[2, 3], [1]]`` collapses to ``[[6], [1]]``. Parameters ---------- dims : list of lists of ints Dimensions specifications to be collapsed. Returns ------- collapsed_dims : list of lists of ints Collapsed dimensions specification describing the same shape such that ``len(collapsed_dims[0]) == len(collapsed_dims[1]) == 1``. """ return _collapse_dims_to_level(dims, 1) def collapse_dims_super(dims): """ Given the dimensions specifications for an operator-ket-, operator-bra- or super-type Qobj, returns a dimensions specification describing the same shape by collapsing all composite systems. For instance, the super-type dimensions specification ``[[[2, 3], [2, 3]], [[2, 3], [2, 3]]]`` collapses to ``[[[6], [6]], [[6], [6]]]``. Parameters ---------- dims : list of lists of ints Dimensions specifications to be collapsed. Returns ------- collapsed_dims : list of lists of ints Collapsed dimensions specification describing the same shape such that ``len(collapsed_dims[i][j]) == 1`` for ``i`` and ``j`` in ``range(2)``. """ return _collapse_dims_to_level(dims, 2) def enumerate_flat(l): """Labels the indices at which scalars occur in a flattened list. Given a list containing a mix of scalars and lists, returns a list of the same structure, where each scalar has been replaced by an index into the flattened list. Examples -------- >>> print(enumerate_flat([[[10], [20, 30]], 40])) [[[0], [1, 2]], 3] """ return _enumerate_flat(l)[0] def deep_map(fn, collection, over=(tuple, list)): if isinstance(collection, over): return type(collection)(deep_map(fn, el, over) for el in collection) else: return fn(collection) def dims_to_tensor_perm(dims): """ Given the dims of a Qobj instance, returns a list representing a permutation from the flattening of that dims specification to the corresponding tensor indices. Parameters ---------- dims : list Dimensions specification for a Qobj. Returns ------- perm : list A list such that ``data[flatten(dims)[idx]]`` gives the index of the tensor ``data`` corresponding to the ``idx``th dimension of ``dims``. """ # We figure out the type of the dims specification, # relaxing the requirement that operators be square. # This means that dims_type need not coincide with # Qobj.type, but that works fine for our purposes here. dims_type = type_from_dims(dims, enforce_square=False) perm = enumerate_flat(dims) # If type is oper, ket or bra, we don't need to do anything. if dims_type in ('oper', 'ket', 'bra'): return flatten(perm) # If the type is other, we need to figure out if the # dims is superlike on its outputs and inputs # This is the case if the dims type for left or right # are, respectively, oper-like. if dims_type == 'other': raise NotImplementedError("Not yet implemented for type='other'.") # If we're still here, the story is more complicated. We'll # follow the strategy of creating a permutation by using # enumerate_flat then transforming the result to swap # input and output indices of vectorized matrices, then flattening # the result. We'll then rebuild indices using this permutation. if dims_type in ('operator-ket', 'super'): # Swap the input and output spaces of the right part of # perm. perm[1] = list(reversed(perm[1])) if dims_type in ('operator-bra', 'super'): # Ditto, but for the left indices. perm[0] = list(reversed(perm[0])) return flatten(perm) def dims_to_tensor_shape(dims): """ Given the dims of a Qobj instance, returns the shape of the corresponding tensor. This helps, for instance, resolve the column-stacking convention for superoperators. Parameters ---------- dims : list Dimensions specification for a Qobj. Returns ------- tensor_shape : tuple NumPy shape of the corresponding tensor. """ perm = dims_to_tensor_perm(dims) dims = flatten(dims) return tuple(map(partial(getitem, dims), perm)) def dims_idxs_to_tensor_idxs(dims, indices): """ Given the dims of a Qobj instance, and some indices into dims, returns the corresponding tensor indices. This helps resolve, for instance, that column-stacking for superoperators, oper-ket and oper-bra implies that the input and output tensor indices are reversed from their order in dims. Parameters ---------- dims : list Dimensions specification for a Qobj. indices : int, list or tuple Indices to convert to tensor indices. Can be specified as a single index, or as a collection of indices. In the latter case, this can be nested arbitrarily deep. For instance, [0, [0, (2, 3)]]. Returns ------- tens_indices : int, list or tuple Container of the same structure as indices containing the tensor indices for each element of indices. """ perm = dims_to_tensor_perm(dims) return deep_map(partial(getitem, perm), indices) qutip-4.4.1/qutip/distributions.py000066400000000000000000000363001352460343600173100ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ This module provides classes and functions for working with spatial distributions, such as Wigner distributions, etc. .. note:: Experimental. """ __all__ = ['Distribution', 'WignerDistribution', 'QDistribution', 'TwoModeQuadratureCorrelation', 'HarmonicOscillatorWaveFunction', 'HarmonicOscillatorProbabilityFunction'] import numpy as np from numpy import pi, exp, sqrt from scipy.special import hermite, factorial from qutip.qobj import isket from qutip.wigner import wigner, qfunc from qutip.states import ket2dm, state_number_index try: import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D except: pass class Distribution: """A class for representation spatial distribution functions. The Distribution class can be used to prepresent spatial distribution functions of arbitray dimension (although only 1D and 2D distributions are used so far). It is indented as a base class for specific distribution function, and provide implementation of basic functions that are shared among all Distribution functions, such as visualization, calculating marginal distributions, etc. Parameters ---------- data : array_like Data for the distribution. The dimensions must match the lengths of the coordinate arrays in xvecs. xvecs : list List of arrays that spans the space for each coordinate. xlabels : list List of labels for each coordinate. """ def __init__(self, data=None, xvecs=[], xlabels=[]): self.data = data self.xvecs = xvecs self.xlabels = xlabels def visualize(self, fig=None, ax=None, figsize=(8, 6), colorbar=True, cmap=None, style="colormap", show_xlabel=True, show_ylabel=True): """ Visualize the data of the distribution in 1D or 2D, depending on the dimensionality of the underlaying distribution. Parameters: fig : matplotlib Figure instance If given, use this figure instance for the visualization, ax : matplotlib Axes instance If given, render the visualization using this axis instance. figsize : tuple Size of the new Figure instance, if one needs to be created. colorbar: Bool Whether or not the colorbar (in 2D visualization) should be used. cmap: matplotlib colormap instance If given, use this colormap for 2D visualizations. style : string Type of visualization: 'colormap' (default) or 'surface'. Returns ------- fig, ax : tuple A tuple of matplotlib figure and axes instances. """ n = len(self.xvecs) if n == 2: if style == "colormap": return self.visualize_2d_colormap(fig=fig, ax=ax, figsize=figsize, colorbar=colorbar, cmap=cmap, show_xlabel=show_xlabel, show_ylabel=show_ylabel) else: return self.visualize_2d_surface(fig=fig, ax=ax, figsize=figsize, colorbar=colorbar, cmap=cmap, show_xlabel=show_xlabel, show_ylabel=show_ylabel) elif n == 1: return self.visualize_1d(fig=fig, ax=ax, figsize=figsize, show_xlabel=show_xlabel, show_ylabel=show_ylabel) else: raise NotImplementedError("Distribution visualization in " + "%d dimensions is not implemented." % n) def visualize_2d_colormap(self, fig=None, ax=None, figsize=(8, 6), colorbar=True, cmap=None, show_xlabel=True, show_ylabel=True): if not fig and not ax: fig, ax = plt.subplots(1, 1, figsize=figsize) if cmap is None: cmap = mpl.cm.get_cmap('RdBu') lim = abs(self.data).max() cf = ax.contourf(self.xvecs[0], self.xvecs[1], self.data, 100, norm=mpl.colors.Normalize(-lim, lim), cmap=cmap) if show_xlabel: ax.set_xlabel(self.xlabels[0], fontsize=12) if show_ylabel: ax.set_ylabel(self.xlabels[1], fontsize=12) if colorbar: cb = fig.colorbar(cf, ax=ax) return fig, ax def visualize_2d_surface(self, fig=None, ax=None, figsize=(8, 6), colorbar=True, cmap=None, show_xlabel=True, show_ylabel=True): if not fig and not ax: fig = plt.figure(figsize=figsize) ax = Axes3D(fig, azim=-62, elev=25) if cmap is None: cmap = mpl.cm.get_cmap('RdBu') lim = abs(self.data).max() X, Y = np.meshgrid(self.xvecs[0], self.xvecs[1]) s = ax.plot_surface(X, Y, self.data, norm=mpl.colors.Normalize(-lim, lim), rstride=5, cstride=5, cmap=cmap, lw=0.1) if show_xlabel: ax.set_xlabel(self.xlabels[0], fontsize=12) if show_ylabel: ax.set_ylabel(self.xlabels[1], fontsize=12) if colorbar: cb = fig.colorbar(s, ax=ax, shrink=0.5) return fig, ax def visualize_1d(self, fig=None, ax=None, figsize=(8, 6), show_xlabel=True, show_ylabel=True): if not fig and not ax: fig, ax = plt.subplots(1, 1, figsize=figsize) p = ax.plot(self.xvecs[0], self.data) if show_xlabel: ax.set_xlabel(self.xlabels[0], fontsize=12) if show_ylabel: ax.set_ylabel("Marginal distribution", fontsize=12) return fig, ax def marginal(self, dim=0): """ Calculate the marginal distribution function along the dimension `dim`. Return a new Distribution instance describing this reduced- dimensionality distribution. Parameters ---------- dim : int The dimension (coordinate index) along which to obtain the marginal distribution. Returns ------- d : Distributions A new instances of Distribution that describes the marginal distribution. """ return Distribution(data=self.data.mean(axis=dim), xvecs=[self.xvecs[dim]], xlabels=[self.xlabels[dim]]) def project(self, dim=0): """ Calculate the projection (max value) distribution function along the dimension `dim`. Return a new Distribution instance describing this reduced-dimensionality distribution. Parameters ---------- dim : int The dimension (coordinate index) along which to obtain the projected distribution. Returns ------- d : Distributions A new instances of Distribution that describes the projection. """ return Distribution(data=self.data.max(axis=dim), xvecs=[self.xvecs[dim]], xlabels=[self.xlabels[dim]]) class WignerDistribution(Distribution): def __init__(self, rho=None, extent=[[-5, 5], [-5, 5]], steps=250): self.xvecs = [np.linspace(extent[0][0], extent[0][1], steps), np.linspace(extent[1][0], extent[1][1], steps)] self.xlabels = [r'$\rm{Re}(\alpha)$', r'$\rm{Im}(\alpha)$'] if rho: self.update(rho) def update(self, rho): self.data = wigner(rho, self.xvecs[0], self.xvecs[1]) class QDistribution(Distribution): def __init__(self, rho=None, extent=[[-5, 5], [-5, 5]], steps=250): self.xvecs = [np.linspace(extent[0][0], extent[0][1], steps), np.linspace(extent[1][0], extent[1][1], steps)] self.xlabels = [r'$\rm{Re}(\alpha)$', r'$\rm{Im}(\alpha)$'] if rho: self.update(rho) def update(self, rho): self.data = qfunc(rho, self.xvecs[0], self.xvecs[1]) class TwoModeQuadratureCorrelation(Distribution): def __init__(self, state=None, theta1=0.0, theta2=0.0, extent=[[-5, 5], [-5, 5]], steps=250): self.xvecs = [np.linspace(extent[0][0], extent[0][1], steps), np.linspace(extent[1][0], extent[1][1], steps)] self.xlabels = [r'$X_1(\theta_1)$', r'$X_2(\theta_2)$'] self.theta1 = theta1 self.theta2 = theta2 self.update(state) def update(self, state): """ calculate probability distribution for quadrature measurement outcomes given a two-mode wavefunction or density matrix """ if isket(state): self.update_psi(state) else: self.update_rho(state) def update_psi(self, psi): """ calculate probability distribution for quadrature measurement outcomes given a two-mode wavefunction """ X1, X2 = np.meshgrid(self.xvecs[0], self.xvecs[1]) p = np.zeros((len(self.xvecs[0]), len(self.xvecs[1])), dtype=complex) N = psi.dims[0][0] for n1 in range(N): kn1 = exp(-1j * self.theta1 * n1) / \ sqrt(sqrt(pi) * 2 ** n1 * factorial(n1)) * \ exp(-X1 ** 2 / 2.0) * np.polyval(hermite(n1), X1) for n2 in range(N): kn2 = exp(-1j * self.theta2 * n2) / \ sqrt(sqrt(pi) * 2 ** n2 * factorial(n2)) * \ exp(-X2 ** 2 / 2.0) * np.polyval(hermite(n2), X2) i = state_number_index([N, N], [n1, n2]) p += kn1 * kn2 * psi.data[i, 0] self.data = abs(p) ** 2 def update_rho(self, rho): """ calculate probability distribution for quadrature measurement outcomes given a two-mode density matrix """ X1, X2 = np.meshgrid(self.xvecs[0], self.xvecs[1]) p = np.zeros((len(self.xvecs[0]), len(self.xvecs[1])), dtype=complex) N = rho.dims[0][0] M1 = np.zeros( (N, N, len(self.xvecs[0]), len(self.xvecs[1])), dtype=complex) M2 = np.zeros( (N, N, len(self.xvecs[0]), len(self.xvecs[1])), dtype=complex) for m in range(N): for n in range(N): M1[m, n] = exp(-1j * self.theta1 * (m - n)) / \ sqrt(pi * 2 ** (m + n) * factorial(n) * factorial(m)) * \ exp(-X1 ** 2) * np.polyval( hermite(m), X1) * np.polyval(hermite(n), X1) M2[m, n] = exp(-1j * self.theta2 * (m - n)) / \ sqrt(pi * 2 ** (m + n) * factorial(n) * factorial(m)) * \ exp(-X2 ** 2) * np.polyval( hermite(m), X2) * np.polyval(hermite(n), X2) for n1 in range(N): for n2 in range(N): i = state_number_index([N, N], [n1, n2]) for p1 in range(N): for p2 in range(N): j = state_number_index([N, N], [p1, p2]) p += M1[n1, p1] * M2[n2, p2] * rho.data[i, j] self.data = p class HarmonicOscillatorWaveFunction(Distribution): def __init__(self, psi=None, omega=1.0, extent=[-5, 5], steps=250): self.xvecs = [np.linspace(extent[0], extent[1], steps)] self.xlabels = [r'$x$'] self.omega = omega if psi: self.update(psi) def update(self, psi): """ Calculate the wavefunction for the given state of an harmonic oscillator """ self.data = np.zeros(len(self.xvecs[0]), dtype=complex) N = psi.shape[0] for n in range(N): k = pow(self.omega / pi, 0.25) / \ sqrt(2 ** n * factorial(n)) * \ exp(-self.xvecs[0] ** 2 / 2.0) * \ np.polyval(hermite(n), self.xvecs[0]) self.data += k * psi.data[n, 0] class HarmonicOscillatorProbabilityFunction(Distribution): def __init__(self, rho=None, omega=1.0, extent=[-5, 5], steps=250): self.xvecs = [np.linspace(extent[0], extent[1], steps)] self.xlabels = [r'$x$'] self.omega = omega if rho: self.update(rho) def update(self, rho): """ Calculate the probability function for the given state of an harmonic oscillator (as density matrix) """ if isket(rho): rho = ket2dm(rho) self.data = np.zeros(len(self.xvecs[0]), dtype=complex) M, N = rho.shape for m in range(M): k_m = pow(self.omega / pi, 0.25) / \ sqrt(2 ** m * factorial(m)) * \ exp(-self.xvecs[0] ** 2 / 2.0) * \ np.polyval(hermite(m), self.xvecs[0]) for n in range(N): k_n = pow(self.omega / pi, 0.25) / \ sqrt(2 ** n * factorial(n)) * \ exp(-self.xvecs[0] ** 2 / 2.0) * \ np.polyval(hermite(n), self.xvecs[0]) self.data += np.conjugate(k_n) * k_m * rho.data[m, n] qutip-4.4.1/qutip/entropy.py000066400000000000000000000236741352460343600161200ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['entropy_vn', 'entropy_linear', 'entropy_mutual', 'negativity', 'concurrence', 'entropy_conditional', 'entangling_power'] from numpy import e, real, sort, sqrt from scipy import log, log2 from qutip.qobj import ptrace from qutip.states import ket2dm from qutip.tensor import tensor from qutip.operators import sigmay from qutip.sparse import sp_eigs from qutip.qip.gates import swap from qutip.partial_transpose import partial_transpose def entropy_vn(rho, base=e, sparse=False): """ Von-Neumann entropy of density matrix Parameters ---------- rho : qobj Density matrix. base : {e,2} Base of logarithm. sparse : {False,True} Use sparse eigensolver. Returns ------- entropy : float Von-Neumann entropy of `rho`. Examples -------- >>> rho=0.5*fock_dm(2,0)+0.5*fock_dm(2,1) >>> entropy_vn(rho,2) 1.0 """ if rho.type == 'ket' or rho.type == 'bra': rho = ket2dm(rho) vals = sp_eigs(rho.data, rho.isherm, vecs=False, sparse=sparse) nzvals = vals[vals != 0] if base == 2: logvals = log2(nzvals) elif base == e: logvals = log(nzvals) else: raise ValueError("Base must be 2 or e.") return float(real(-sum(nzvals * logvals))) def entropy_linear(rho): """ Linear entropy of a density matrix. Parameters ---------- rho : qobj sensity matrix or ket/bra vector. Returns ------- entropy : float Linear entropy of rho. Examples -------- >>> rho=0.5*fock_dm(2,0)+0.5*fock_dm(2,1) >>> entropy_linear(rho) 0.5 """ if rho.type == 'ket' or rho.type == 'bra': rho = ket2dm(rho) return float(real(1.0 - (rho ** 2).tr())) def concurrence(rho): """ Calculate the concurrence entanglement measure for a two-qubit state. Parameters ---------- state : qobj Ket, bra, or density matrix for a two-qubit state. Returns ------- concur : float Concurrence References ---------- .. [1] http://en.wikipedia.org/wiki/Concurrence_(quantum_computing) """ if rho.isket and rho.dims != [[2, 2], [1, 1]]: raise Exception("Ket must be tensor product of two qubits.") elif rho.isbra and rho.dims != [[1, 1], [2, 2]]: raise Exception("Bra must be tensor product of two qubits.") elif rho.isoper and rho.dims != [[2, 2], [2, 2]]: raise Exception("Density matrix must be tensor product of two qubits.") if rho.isket or rho.isbra: rho = ket2dm(rho) sysy = tensor(sigmay(), sigmay()) rho_tilde = (rho * sysy) * (rho.conj() * sysy) evals = rho_tilde.eigenenergies() # abs to avoid problems with sqrt for very small negative numbers evals = abs(sort(real(evals))) lsum = sqrt(evals[3]) - sqrt(evals[2]) - sqrt(evals[1]) - sqrt(evals[0]) return max(0, lsum) def negativity(rho, subsys, method='tracenorm', logarithmic=False): """ Compute the negativity for a multipartite quantum system described by the density matrix rho. The subsys argument is an index that indicates which system to compute the negativity for. .. note:: Experimental. """ mask = [idx == subsys for idx, n in enumerate(rho.dims[0])] rho_pt = partial_transpose(rho, mask) if method == 'tracenorm': N = ((rho_pt.dag() * rho_pt).sqrtm().tr().real - 1)/2.0 elif method == 'eigenvalues': l = rho_pt.eigenenergies() N = ((abs(l)-l)/2).sum() else: raise ValueError("Unknown method %s" % method) if logarithmic: return log2(2 * N + 1) else: return N def entropy_mutual(rho, selA, selB, base=e, sparse=False): """ Calculates the mutual information S(A:B) between selection components of a system density matrix. Parameters ---------- rho : qobj Density matrix for composite quantum systems selA : int/list `int` or `list` of first selected density matrix components. selB : int/list `int` or `list` of second selected density matrix components. base : {e,2} Base of logarithm. sparse : {False,True} Use sparse eigensolver. Returns ------- ent_mut : float Mutual information between selected components. """ if isinstance(selA, int): selA = [selA] if isinstance(selB, int): selB = [selB] if rho.type != 'oper': raise TypeError("Input must be a density matrix.") if (len(selA) + len(selB)) != len(rho.dims[0]): raise TypeError("Number of selected components must match " + "total number.") rhoA = ptrace(rho, selA) rhoB = ptrace(rho, selB) out = (entropy_vn(rhoA, base, sparse=sparse) + entropy_vn(rhoB, base, sparse=sparse) - entropy_vn(rho, base, sparse=sparse)) return out def _entropy_relative(rho, sigma, base=e, sparse=False): """ ****NEEDS TO BE WORKED ON**** Calculates the relative entropy S(rho||sigma) between two density matrices. Parameters ---------- rho : qobj First density matrix. sigma : qobj Second density matrix. base : {e,2} Base of logarithm. Returns ------- rel_ent : float Value of relative entropy. """ if rho.type != 'oper' or sigma.type != 'oper': raise TypeError("Inputs must be density matrices..") # sigma terms svals = sp_eigs(sigma.data, sigma.isherm, vecs=False, sparse=sparse) snzvals = svals[svals != 0] if base == 2: slogvals = log2(snzvals) elif base == e: slogvals = log(snzvals) else: raise ValueError("Base must be 2 or e.") # rho terms rvals = sp_eigs(rho.data, rho.isherm, vecs=False, sparse=sparse) rnzvals = rvals[rvals != 0] # calculate tr(rho*log sigma) rel_trace = float(real(sum(rnzvals * slogvals))) return -entropy_vn(rho, base, sparse) - rel_trace def entropy_conditional(rho, selB, base=e, sparse=False): """ Calculates the conditional entropy :math:`S(A|B)=S(A,B)-S(B)` of a selected density matrix component. Parameters ---------- rho : qobj Density matrix of composite object selB : int/list Selected components for density matrix B base : {e,2} Base of logarithm. sparse : {False,True} Use sparse eigensolver. Returns ------- ent_cond : float Value of conditional entropy """ if rho.type != 'oper': raise TypeError("Input must be density matrix.") if isinstance(selB, int): selB = [selB] B = ptrace(rho, selB) out = (entropy_vn(rho, base, sparse=sparse) - entropy_vn(B, base, sparse=sparse)) return out def participation_ratio(rho): """ Returns the effective number of states for a density matrix. The participation is unity for pure states, and maximally N, where N is the Hilbert space dimensionality, for completely mixed states. Parameters ---------- rho : qobj Density matrix Returns ------- pr : float Effective number of states in the density matrix """ if rho.type == 'ket' or rho.type == 'bra': return 1.0 else: return 1.0 / (rho ** 2).tr() def entangling_power(U): """ Calculate the entangling power of a two-qubit gate U, which is zero of nonentangling gates and 1 and 2/9 for maximally entangling gates. Parameters ---------- U : qobj Qobj instance representing a two-qubit gate. Returns ------- ep : float The entanglement power of U (real number between 0 and 1) References: Explorations in Quantum Computing, Colin P. Williams (Springer, 2011) """ if not U.isoper: raise Exception("U must be an operator.") if U.dims != [[2, 2], [2, 2]]: raise Exception("U must be a two-qubit gate.") a = (tensor(U, U).dag() * swap(N=4, targets=[1, 3]) * tensor(U, U) * swap(N=4, targets=[1, 3])) b = (tensor(swap() * U, swap() * U).dag() * swap(N=4, targets=[1, 3]) * tensor(swap() * U, swap() * U) * swap(N=4, targets=[1, 3])) return 5.0/9 - 1.0/36 * (a.tr() + b.tr()).real qutip-4.4.1/qutip/eseries.py000066400000000000000000000326301352460343600160470ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['eseries', 'esval', 'esspec', 'estidy'] import numpy as np import scipy.sparse as sp from qutip.qobj import Qobj class eseries(): """ Class representation of an exponential-series expansion of time-dependent quantum objects. Attributes ---------- ampl : ndarray Array of amplitudes for exponential series. rates : ndarray Array of rates for exponential series. dims : list Dimensions of exponential series components shape : list Shape corresponding to exponential series components Methods ------- value(tlist) Evaluate an exponential series at the times listed in tlist spec(wlist) Evaluate the spectrum of an exponential series at frequencies in wlist. tidyup() Returns a tidier version of the exponential series """ __array_priority__ = 101 def __init__(self, q=None, s=np.array([])): if isinstance(s, (int, float, complex)): s = np.array([s]) if q is None: self.ampl = np.array([]) self.rates = np.array([]) self.dims = [[1, 1]] self.shape = [1, 1] elif (len(s) == 0): if isinstance(q, eseries): self.ampl = q.ampl self.rates = q.rates self.dims = q.dims self.shape = q.shape elif isinstance(q, (np.ndarray, list)): q = np.asarray(q, dtype=object) ind = np.shape(q) num = ind[0] # number of elements in q if any([Qobj(x).shape != Qobj(q[0]).shape for x in q]): raise TypeError('All amplitudes must have same dimension.') self.ampl = np.array([x for x in q], dtype=object) self.rates = np.zeros(ind) self.dims = self.ampl[0].dims self.shape = self.ampl[0].shape elif isinstance(q, Qobj): qo = Qobj(q) self.ampl = np.array([qo], dtype=object) self.rates = np.array([0]) self.dims = qo.dims self.shape = qo.shape else: self.ampl = np.array([q]) self.rates = np.array([0]) self.dims = [[1, 1]] self.shape = [1, 1] elif len(s) != 0: if isinstance(q, (np.ndarray, list)): q = np.asarray(q, dtype=object) ind = np.shape(q) num = ind[0] if any([Qobj(x).shape != Qobj(q[0]).shape for x in q]): raise TypeError('All amplitudes must have same dimension.') self.ampl = np.array([Qobj(q[x]) for x in range(0, num)], dtype=object) self.dims = self.ampl[0].dims self.shape = self.ampl[0].shape else: num = 1 self.ampl = np.array([Qobj(q)], dtype=object) self.dims = self.ampl[0].dims self.shape = self.ampl[0].shape if isinstance(s, (int, complex, float)): if num != 1: raise TypeError('Number of rates must match number ' + 'of members in object array.') self.rates = np.array([s]) elif isinstance(s, (np.ndarray, list)): if len(s) != num: raise TypeError('Number of rates must match number ' + ' of members in object array.') self.rates = np.array(s) if len(self.ampl) != 0: # combine arrays so that they can be sorted together zipped = list(zip(self.rates, self.ampl)) zipped.sort() # sort rates from lowest to highest rates, ampl = list(zip(*zipped)) # get back rates and ampl self.ampl = np.array(ampl, dtype=object) self.rates = np.array(rates) def __str__(self): # string of ESERIES information self.tidyup() s = "ESERIES object: " + str(len(self.ampl)) + " terms\n" s += "Hilbert space dimensions: " + str(self.dims) + "\n" for k in range(0, len(self.ampl)): s += "Exponent #" + str(k) + " = " + str(self.rates[k]) + "\n" if isinstance(self.ampl[k], sp.spmatrix): s += str(self.ampl[k]) + "\n" else: s += str(self.ampl[k]) + "\n" return s def __repr__(self): return self.__str__() # Addition with ESERIES on left (ex. ESERIES+5) def __add__(self, other): right = eseries(other) if self.dims != right.dims: raise TypeError("Incompatible operands for ESERIES addition") out = eseries() out.dims = self.dims out.shape = self.shape out.ampl = np.append(self.ampl, right.ampl) out.rates = np.append(self.rates, right.rates) return out # Addition with ESERIES on right(ex. 5+ESERIES) def __radd__(self, other): return self + other # define negation of ESERIES def __neg__(self): out = eseries() out.dims = self.dims out.shape = self.shape out.ampl = -self.ampl out.rates = self.rates return out # Subtraction with ESERIES on left (ex. ESERIES-5) def __sub__(self, other): return self + (-other) # Subtraction with ESERIES on right (ex. 5-ESERIES) def __rsub__(self, other): return other + (-self) # Multiplication with ESERIES on left (ex. ESERIES*other) def __mul__(self, other): if isinstance(other, eseries): out = eseries() out.dims = self.dims out.shape = self.shape for i in range(len(self.rates)): for j in range(len(other.rates)): out += eseries(self.ampl[i] * other.ampl[j], self.rates[i] + other.rates[j]) return out else: out = eseries() out.dims = self.dims out.shape = self.shape out.ampl = self.ampl * other out.rates = self.rates return out # Multiplication with ESERIES on right (ex. other*ESERIES) def __rmul__(self, other): out = eseries() out.dims = self.dims out.shape = self.shape out.ampl = other * self.ampl out.rates = self.rates return out # # todo: # select_ampl, select_rate: functions to select some terms given the ampl # or rate. This is done with {ampl} or (rate) in qotoolbox. we should use # functions with descriptive names for this. # # # evaluate the eseries for a list of times # def value(self, tlist): """ Evaluates an exponential series at the times listed in ``tlist``. Parameters ---------- tlist : ndarray Times at which to evaluate exponential series. Returns ------- val_list : ndarray Values of exponential at times in ``tlist``. """ if self.ampl is None or len(self.ampl) == 0: # no terms, evalue to zero return np.zeros(np.shape(tlist)) if isinstance(tlist, float) or isinstance(tlist, int): tlist = [tlist] if isinstance(self.ampl[0], Qobj): # amplitude vector contains quantum objects val_list = [] for j in range(len(tlist)): exp_factors = np.exp(np.array(self.rates) * tlist[j]) val = 0 for i in range(len(self.ampl)): val += self.ampl[i] * exp_factors[i] val_list.append(val) val_list = np.array(val_list, dtype=object) else: # the amplitude vector contains c numbers val_list = np.zeros(np.size(tlist), dtype=complex) for j in range(len(tlist)): exp_factors = np.exp(np.array(self.rates) * tlist[j]) val_list[j] = np.sum(np.dot(self.ampl, exp_factors)) if all(np.imag(val_list) == 0): val_list = np.real(val_list) if len(tlist) == 1: return val_list[0] else: return val_list def spec(self, wlist): """ Evaluate the spectrum of an exponential series at frequencies in ``wlist``. Parameters ---------- wlist : array_like Array/list of frequenies. Returns ------- val_list : ndarray Values of exponential series at frequencies in ``wlist``. """ val_list = np.zeros(np.size(wlist)) for i in range(len(wlist)): val_list[i] = 2 * np.real( np.dot(self.ampl, 1. / (1.0j * wlist[i] - self.rates))) return val_list def tidyup(self, *args): """ Returns a tidier version of exponential series. """ # # combine duplicate entries (same rate) # rate_tol = 1e-10 ampl_tol = 1e-10 ampl_dict = {} unique_rates = {} ur_len = 0 for r_idx in range(len(self.rates)): # look for a matching rate in the list of unique rates idx = -1 for ur_key in unique_rates.keys(): if abs(self.rates[r_idx] - unique_rates[ur_key]) < rate_tol: idx = ur_key break if idx == -1: # no matching rate, add it unique_rates[ur_len] = self.rates[r_idx] ampl_dict[ur_len] = [self.ampl[r_idx]] ur_len = len(unique_rates) else: # found matching rate, append amplitude to its list ampl_dict[idx].append(self.ampl[r_idx]) # create new amplitude and rate list with only unique rates, and # nonzero amplitudes self.rates = np.array([]) self.ampl = np.array([]) for ur_key in unique_rates.keys(): total_ampl = np.sum(np.asarray(ampl_dict[ur_key], dtype=object)) if (isinstance(total_ampl, float) or isinstance(total_ampl, complex)): if abs(total_ampl) > ampl_tol: self.rates = np.append(self.rates, unique_rates[ur_key]) self.ampl = np.append(self.ampl, total_ampl) else: if abs(total_ampl.full()).max() > ampl_tol: self.rates = np.append(self.rates, unique_rates[ur_key]) self.ampl = np.append(self.ampl, np.asarray([total_ampl], dtype=object)) return self # ----------------------------------------------------------------------------- # # wrapper functions for accessing the class methods (for compatibility with # quantum optics toolbox) # def esval(es, tlist): """ Evaluates an exponential series at the times listed in ``tlist``. Parameters ---------- tlist : ndarray Times at which to evaluate exponential series. Returns ------- val_list : ndarray Values of exponential at times in ``tlist``. """ return es.value(tlist) def esspec(es, wlist): """Evaluate the spectrum of an exponential series at frequencies in ``wlist``. Parameters ---------- wlist : array_like Array/list of frequenies. Returns ------- val_list : ndarray Values of exponential series at frequencies in ``wlist``. """ return es.spec(wlist) def estidy(es, *args): """ Returns a tidier version of exponential series. """ return es.tidyup() qutip-4.4.1/qutip/essolve.py000066400000000000000000000152261352460343600160720ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['essolve', 'ode2es'] import numpy as np import scipy.linalg as la import scipy.sparse as sp from qutip.qobj import Qobj, issuper, isket, isoper from qutip.eseries import eseries, estidy, esval from qutip.expect import expect from qutip.superoperator import liouvillian, mat2vec, vec2mat from qutip.solver import Result from qutip.operators import qzero # ----------------------------------------------------------------------------- # pass on to wavefunction solver or master equation solver depending on whether # any collapse operators were given. # def essolve(H, rho0, tlist, c_op_list, e_ops): """ Evolution of a state vector or density matrix (`rho0`) for a given Hamiltonian (`H`) and set of collapse operators (`c_op_list`), by expressing the ODE as an exponential series. The output is either the state vector at arbitrary points in time (`tlist`), or the expectation values of the supplied operators (`e_ops`). Parameters ---------- H : qobj/function_type System Hamiltonian. rho0 : :class:`qutip.qobj` Initial state density matrix. tlist : list/array ``list`` of times for :math:`t`. c_op_list : list of :class:`qutip.qobj` ``list`` of :class:`qutip.qobj` collapse operators. e_ops : list of :class:`qutip.qobj` ``list`` of :class:`qutip.qobj` operators for which to evaluate expectation values. Returns ------- expt_array : array Expectation values of wavefunctions/density matrices for the times specified in ``tlist``. .. note:: This solver does not support time-dependent Hamiltonians. """ n_expt_op = len(e_ops) n_tsteps = len(tlist) # Calculate the Liouvillian if (c_op_list is None or len(c_op_list) == 0) and isket(rho0): L = H else: L = liouvillian(H, c_op_list) es = ode2es(L, rho0) # evaluate the expectation values if n_expt_op == 0: results = [Qobj()] * n_tsteps else: results = np.zeros([n_expt_op, n_tsteps], dtype=complex) for n, e in enumerate(e_ops): results[n, :] = expect(e, esval(es, tlist)) data = Result() data.solver = "essolve" data.times = tlist data.expect = [np.real(results[n, :]) if e.isherm else results[n, :] for n, e in enumerate(e_ops)] return data # ----------------------------------------------------------------------------- # # def ode2es(L, rho0): """Creates an exponential series that describes the time evolution for the initial density matrix (or state vector) `rho0`, given the Liouvillian (or Hamiltonian) `L`. Parameters ---------- L : qobj Liouvillian of the system. rho0 : qobj Initial state vector or density matrix. Returns ------- eseries : :class:`qutip.eseries` ``eseries`` represention of the system dynamics. """ if issuper(L): # check initial state if isket(rho0): # Got a wave function as initial state: convert to density matrix. rho0 = rho0 * rho0.dag() # check if state is below error threshold if abs(rho0.full().sum()) < 1e-10 + 1e-24: # enforce zero operator return eseries(qzero(rho0.dims[0])) w, v = L.eigenstates() v = np.hstack([ket.full() for ket in v]) # w[i] = eigenvalue i # v[:,i] = eigenvector i rlen = np.prod(rho0.shape) r0 = mat2vec(rho0.full()) v0 = la.solve(v, r0) vv = v * sp.spdiags(v0.T, 0, rlen, rlen) out = None for i in range(rlen): qo = Qobj(vec2mat(vv[:, i]), dims=rho0.dims, shape=rho0.shape) if out: out += eseries(qo, w[i]) else: out = eseries(qo, w[i]) elif isoper(L): if not isket(rho0): raise TypeError('Second argument must be a ket if first' + 'is a Hamiltonian.') # check if state is below error threshold if abs(rho0.full().sum()) < 1e-5 + 1e-20: # enforce zero operator dims = rho0.dims return eseries(Qobj(sp.csr_matrix((dims[0][0], dims[1][0]), dtype=complex))) w, v = L.eigenstates() v = np.hstack([ket.full() for ket in v]) # w[i] = eigenvalue i # v[:,i] = eigenvector i rlen = np.prod(rho0.shape) r0 = rho0.full() v0 = la.solve(v, r0) vv = v * sp.spdiags(v0.T, 0, rlen, rlen) out = None for i in range(rlen): qo = Qobj(np.matrix(vv[:, i]).T, dims=rho0.dims, shape=rho0.shape) if out: out += eseries(qo, -1.0j * w[i]) else: out = eseries(qo, -1.0j * w[i]) else: raise TypeError('First argument must be a Hamiltonian or Liouvillian.') return estidy(out) qutip-4.4.1/qutip/expect.py000066400000000000000000000136751352460343600157100ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['expect', 'variance'] import numpy as np import scipy.sparse as sp from qutip.qobj import Qobj, isoper from qutip.eseries import eseries from qutip.cy.spmatfuncs import (cy_expect_rho_vec, cy_expect_psi, cy_spmm_tr, expect_csr_ket) expect_rho_vec = cy_expect_rho_vec expect_psi = cy_expect_psi def expect(oper, state): '''Calculates the expectation value for operator(s) and state(s). Parameters ---------- oper : qobj/array-like A single or a `list` or operators for expectation value. state : qobj/array-like A single or a `list` of quantum states or density matrices. Returns ------- expt : float/complex/array-like Expectation value. ``real`` if `oper` is Hermitian, ``complex`` otherwise. A (nested) array of expectaction values of state or operator are arrays. Examples -------- >>> expect(num(4), basis(4, 3)) 3 ''' if isinstance(state, Qobj) and isinstance(oper, Qobj): return _single_qobj_expect(oper, state) elif isinstance(oper, Qobj) and isinstance(state, eseries): return _single_eseries_expect(oper, state) elif isinstance(oper, (list, np.ndarray)): if isinstance(state, Qobj): if (all([op.isherm for op in oper]) and (state.isket or state.isherm)): return np.array([_single_qobj_expect(o, state) for o in oper]) else: return np.array([_single_qobj_expect(o, state) for o in oper], dtype=complex) else: return [expect(o, state) for o in oper] elif isinstance(state, (list, np.ndarray)): if oper.isherm and all([(op.isherm or op.type == 'ket') for op in state]): return np.array([_single_qobj_expect(oper, x) for x in state]) else: return np.array([_single_qobj_expect(oper, x) for x in state], dtype=complex) else: raise TypeError('Arguments must be quantum objects or eseries') def _single_qobj_expect(oper, state): """ Private function used by expect to calculate expectation values of Qobjs. """ if isoper(oper): if oper.dims[1] != state.dims[0]: raise Exception('Operator and state do not have same tensor ' + 'structure: %s and %s' % (oper.dims[1], state.dims[0])) if state.type == 'oper': # calculates expectation value via TR(op*rho) return cy_spmm_tr(oper.data, state.data, oper.isherm and state.isherm) elif state.type == 'ket': # calculates expectation value via return expect_csr_ket(oper.data, state.data, oper.isherm) else: raise TypeError('Invalid operand types') def _single_eseries_expect(oper, state): """ Private function used by expect to calculate expectation values for eseries. """ out = eseries() if isoper(state.ampl[0]): out.rates = state.rates out.ampl = np.array([expect(oper, a) for a in state.ampl]) else: out.rates = np.array([]) out.ampl = np.array([]) for m in range(len(state.rates)): op_m = state.ampl[m].data.conj().T * oper.data for n in range(len(state.rates)): a = op_m * state.ampl[n].data if isinstance(a, sp.spmatrix): a = a.todense() out.rates = np.append(out.rates, state.rates[n] - state.rates[m]) out.ampl = np.append(out.ampl, a) return out def variance(oper, state): """ Variance of an operator for the given state vector or density matrix. Parameters ---------- oper : qobj Operator for expectation value. state : qobj/list A single or `list` of quantum states or density matrices.. Returns ------- var : float Variance of operator 'oper' for given state. """ return expect(oper ** 2, state) - expect(oper, state) ** 2 qutip-4.4.1/qutip/fastsparse.py000066400000000000000000000411161352460343600165620ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, The QuTiP Project. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np import operator from scipy.sparse import (_sparsetools, isspmatrix, isspmatrix_csr, csr_matrix, coo_matrix, csc_matrix, dia_matrix) from scipy.sparse.sputils import (upcast, upcast_char, to_native, isdense, isshape, getdtype, isscalarlike, get_index_dtype) from scipy.sparse.base import spmatrix, isspmatrix, SparseEfficiencyWarning from warnings import warn class fast_csr_matrix(csr_matrix): """ A subclass of scipy.sparse.csr_matrix that skips the data format checks that are run everytime a new csr_matrix is created. """ def __init__(self, args=None, shape=None, dtype=None, copy=False): if args is None: #Build zero matrix if shape is None: raise Exception('Shape must be given when building zero matrix.') self.data = np.array([], dtype=complex) self.indices = np.array([], dtype=np.int32) self.indptr = np.zeros(shape[0]+1, dtype=np.int32) self._shape = tuple(int(s) for s in shape) else: if args[0].shape[0] and args[0].dtype != complex: raise TypeError('fast_csr_matrix allows only complex data.') if args[1].shape[0] and args[1].dtype != np.int32: raise TypeError('fast_csr_matrix allows only int32 indices.') if args[2].shape[0] and args[1].dtype != np.int32: raise TypeError('fast_csr_matrix allows only int32 indptr.') self.data = np.array(args[0], dtype=complex, copy=copy) self.indices = np.array(args[1], dtype=np.int32, copy=copy) self.indptr = np.array(args[2], dtype=np.int32, copy=copy) if shape is None: self._shape = tuple([len(self.indptr)-1]*2) else: self._shape = tuple(int(s) for s in shape) self.dtype = complex self.maxprint = 50 self.format = 'csr' def _binopt(self, other, op): """ Do the binary operation fn to two sparse matrices using fast_csr_matrix only when other is also a fast_csr_matrix. """ # e.g. csr_plus_csr, csr_minus_csr, etc. if not isinstance(other, fast_csr_matrix): other = csr_matrix(other) # e.g. csr_plus_csr, csr_minus_csr, etc. fn = getattr(_sparsetools, self.format + op + self.format) maxnnz = self.nnz + other.nnz idx_dtype = get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=maxnnz) indptr = np.empty(self.indptr.shape, dtype=idx_dtype) indices = np.empty(maxnnz, dtype=idx_dtype) bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] if op in bool_ops: data = np.empty(maxnnz, dtype=np.bool_) else: data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype)) fn(self.shape[0], self.shape[1], np.asarray(self.indptr, dtype=idx_dtype), np.asarray(self.indices, dtype=idx_dtype), self.data, np.asarray(other.indptr, dtype=idx_dtype), np.asarray(other.indices, dtype=idx_dtype), other.data, indptr, indices, data) actual_nnz = indptr[-1] indices = indices[:actual_nnz] data = data[:actual_nnz] if actual_nnz < maxnnz // 2: # too much waste, trim arrays indices = indices.copy() data = data.copy() if isinstance(other, fast_csr_matrix) and (not op in bool_ops): A = fast_csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape) else: A = csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape) return A def multiply(self, other): """Point-wise multiplication by another matrix, vector, or scalar. """ # Scalar multiplication. if isscalarlike(other): return self._mul_scalar(other) # Sparse matrix or vector. if isspmatrix(other): if self.shape == other.shape: if not isinstance(other, fast_csr_matrix): other = csr_matrix(other) return self._binopt(other, '_elmul_') # Single element. elif other.shape == (1,1): return self._mul_scalar(other.toarray()[0, 0]) elif self.shape == (1,1): return other._mul_scalar(self.toarray()[0, 0]) # A row times a column. elif self.shape[1] == other.shape[0] and self.shape[1] == 1: return self._mul_sparse_matrix(other.tocsc()) elif self.shape[0] == other.shape[1] and self.shape[0] == 1: return other._mul_sparse_matrix(self.tocsc()) # Row vector times matrix. other is a row. elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: other = dia_matrix((other.toarray().ravel(), [0]), shape=(other.shape[1], other.shape[1])) return self._mul_sparse_matrix(other) # self is a row. elif self.shape[0] == 1 and self.shape[1] == other.shape[1]: copy = dia_matrix((self.toarray().ravel(), [0]), shape=(self.shape[1], self.shape[1])) return other._mul_sparse_matrix(copy) # Column vector times matrix. other is a column. elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: other = dia_matrix((other.toarray().ravel(), [0]), shape=(other.shape[0], other.shape[0])) return other._mul_sparse_matrix(self) # self is a column. elif self.shape[1] == 1 and self.shape[0] == other.shape[0]: copy = dia_matrix((self.toarray().ravel(), [0]), shape=(self.shape[0], self.shape[0])) return copy._mul_sparse_matrix(other) else: raise ValueError("inconsistent shapes") # Dense matrix. if isdense(other): if self.shape == other.shape: ret = self.tocoo() ret.data = np.multiply(ret.data, other[ret.row, ret.col] ).view(np.ndarray).ravel() return ret # Single element. elif other.size == 1: return self._mul_scalar(other.flat[0]) # Anything else. return np.multiply(self.todense(), other) def _mul_sparse_matrix(self, other): """ Do the sparse matrix mult returning fast_csr_matrix only when other is also fast_csr_matrix. """ M, K1 = self.shape K2, N = other.shape major_axis = self._swap((M,N))[0] if isinstance(other, fast_csr_matrix): A = zcsr_mult(self, other, sorted=1) return A other = csr_matrix(other) # convert to this format idx_dtype = get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=M*N) indptr = np.empty(major_axis + 1, dtype=idx_dtype) fn = getattr(_sparsetools, self.format + '_matmat_pass1') fn(M, N, np.asarray(self.indptr, dtype=idx_dtype), np.asarray(self.indices, dtype=idx_dtype), np.asarray(other.indptr, dtype=idx_dtype), np.asarray(other.indices, dtype=idx_dtype), indptr) nnz = indptr[-1] idx_dtype = get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=nnz) indptr = np.asarray(indptr, dtype=idx_dtype) indices = np.empty(nnz, dtype=idx_dtype) data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype)) fn = getattr(_sparsetools, self.format + '_matmat_pass2') fn(M, N, np.asarray(self.indptr, dtype=idx_dtype), np.asarray(self.indices, dtype=idx_dtype), self.data, np.asarray(other.indptr, dtype=idx_dtype), np.asarray(other.indices, dtype=idx_dtype), other.data, indptr, indices, data) A = csr_matrix((data,indices,indptr),shape=(M,N)) return A def _scalar_binopt(self, other, op): """Scalar version of self._binopt, for cases in which no new nonzeros are added. Produces a new spmatrix in canonical form. """ self.sum_duplicates() res = self._with_data(op(self.data, other), copy=True) res.eliminate_zeros() return res def __eq__(self, other): # Scalar other. if isscalarlike(other): if np.isnan(other): return csr_matrix(self.shape, dtype=np.bool_) if other == 0: warn("Comparing a sparse matrix with 0 using == is inefficient" ", try using != instead.", SparseEfficiencyWarning) all_true = _all_true(self.shape) inv = self._scalar_binopt(other, operator.ne) return all_true - inv else: return self._scalar_binopt(other, operator.eq) # Dense other. elif isdense(other): return self.todense() == other # Sparse other. elif isspmatrix(other): warn("Comparing sparse matrices using == is inefficient, try using" " != instead.", SparseEfficiencyWarning) #TODO sparse broadcasting if self.shape != other.shape: return False elif self.format != other.format: other = other.asformat(self.format) res = self._binopt(other,'_ne_') all_true = _all_true(self.shape) return all_true - res else: return False def __ne__(self, other): # Scalar other. if isscalarlike(other): if np.isnan(other): warn("Comparing a sparse matrix with nan using != is inefficient", SparseEfficiencyWarning) all_true = _all_true(self.shape) return all_true elif other != 0: warn("Comparing a sparse matrix with a nonzero scalar using !=" " is inefficient, try using == instead.", SparseEfficiencyWarning) all_true = _all_true(self.shape) inv = self._scalar_binopt(other, operator.eq) return all_true - inv else: return self._scalar_binopt(other, operator.ne) # Dense other. elif isdense(other): return self.todense() != other # Sparse other. elif isspmatrix(other): #TODO sparse broadcasting if self.shape != other.shape: return True elif self.format != other.format: other = other.asformat(self.format) return self._binopt(other,'_ne_') else: return True def _inequality(self, other, op, op_name, bad_scalar_msg): # Scalar other. if isscalarlike(other): if 0 == other and op_name in ('_le_', '_ge_'): raise NotImplementedError(" >= and <= don't work with 0.") elif op(0, other): warn(bad_scalar_msg, SparseEfficiencyWarning) other_arr = np.empty(self.shape, dtype=np.result_type(other)) other_arr.fill(other) other_arr = csr_matrix(other_arr) return self._binopt(other_arr, op_name) else: return self._scalar_binopt(other, op) # Dense other. elif isdense(other): return op(self.todense(), other) # Sparse other. elif isspmatrix(other): #TODO sparse broadcasting if self.shape != other.shape: raise ValueError("inconsistent shapes") elif self.format != other.format: other = other.asformat(self.format) if op_name not in ('_ge_', '_le_'): return self._binopt(other, op_name) warn("Comparing sparse matrices using >= and <= is inefficient, " "using <, >, or !=, instead.", SparseEfficiencyWarning) all_true = _all_true(self.shape) res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_') return all_true - res else: raise ValueError("Operands could not be compared.") def _with_data(self,data,copy=True): """Returns a matrix with the same sparsity structure as self, but with different data. By default the structure arrays (i.e. .indptr and .indices) are copied. """ # We need this just in case something like abs(data) gets called # does nothing if data.dtype is complex. data = np.asarray(data, dtype=complex) if copy: return fast_csr_matrix((data,self.indices.copy(),self.indptr.copy()), shape=self.shape,dtype=data.dtype) else: return fast_csr_matrix((data,self.indices,self.indptr), shape=self.shape,dtype=data.dtype) def transpose(self): """ Returns the transpose of the matrix, keeping it in fast_csr format. """ return zcsr_transpose(self) def trans(self): """ Same as transpose """ return zcsr_transpose(self) def getH(self): """ Returns the conjugate-transpose of the matrix, keeping it in fast_csr format. """ return zcsr_adjoint(self) def adjoint(self): """ Same as getH """ return zcsr_adjoint(self) def csr2fast(A, copy=False): if (not isinstance(A, fast_csr_matrix)) or copy: # Do not need to do any type checking here # since fast_csr_matrix does that. return fast_csr_matrix((A.data,A.indices,A.indptr), shape=A.shape,copy=copy) else: return A def fast_identity(N): """Generates a sparse identity matrix in fast_csr format. """ data = np.ones(N, dtype=complex) ind = np.arange(N, dtype=np.int32) ptr = np.arange(N+1, dtype=np.int32) ptr[-1] = N return fast_csr_matrix((data,ind,ptr),shape=(N,N)) #Convenience functions #-------------------- def _all_true(shape): A = csr_matrix((np.ones(np.prod(shape), dtype=np.bool_), np.tile(np.arange(shape[1],dtype=np.int32),shape[0]), np.arange(0,np.prod(shape)+1,shape[1],dtype=np.int32)), shape=shape) return A #Need to do some trailing imports here #------------------------------------- from qutip.cy.spmath import (zcsr_transpose, zcsr_adjoint, zcsr_mult) qutip-4.4.1/qutip/fileio.py000066400000000000000000000225231352460343600156570ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['file_data_store', 'file_data_read', 'qsave', 'qload'] import pickle import numpy as np import sys from qutip.qobj import Qobj from qutip.solver import Result # ----------------------------------------------------------------------------- # Write matrix data to a file # def file_data_store(filename, data, numtype="complex", numformat="decimal", sep=","): """Stores a matrix of data to a file to be read by an external program. Parameters ---------- filename : str Name of data file to be stored, including extension. data: array_like Data to be written to file. numtype : str {'complex, 'real'} Type of numerical data. numformat : str {'decimal','exp'} Format for written data. sep : str Single-character field seperator. Usually a tab, space, comma, or semicolon. """ if filename is None or data is None: raise ValueError("filename or data is unspecified") M, N = np.shape(data) f = open(filename, "w") f.write("# Generated by QuTiP: %dx%d %s matrix " % (M, N, numtype) + "in %s format ['%s' separated values].\n" % (numformat, sep)) if numtype == "complex": if numformat == "exp": for m in range(M): for n in range(N): if np.imag(data[m, n]) >= 0.0: f.write("%.10e+%.10ej" % (np.real(data[m, n]), np.imag(data[m, n]))) else: f.write("%.10e%.10ej" % (np.real(data[m, n]), np.imag(data[m, n]))) if n != N - 1: f.write(sep) f.write("\n") elif numformat == "decimal": for m in range(M): for n in range(N): if np.imag(data[m, n]) >= 0.0: f.write("%.10f+%.10fj" % (np.real(data[m, n]), np.imag(data[m, n]))) else: f.write("%.10f%.10fj" % (np.real(data[m, n]), np.imag(data[m, n]))) if n != N - 1: f.write(sep) f.write("\n") else: raise ValueError("Illegal numformat value (should be " + "'exp' or 'decimal')") elif numtype == "real": if numformat == "exp": for m in range(M): for n in range(N): f.write("%.10e" % (np.real(data[m, n]))) if n != N - 1: f.write(sep) f.write("\n") elif numformat == "decimal": for m in range(M): for n in range(N): f.write("%.10f" % (np.real(data[m, n]))) if n != N - 1: f.write(sep) f.write("\n") else: raise ValueError("Illegal numformat value (should be " + "'exp' or 'decimal')") else: raise ValueError("Illegal numtype value (should be " + "'complex' or 'real')") f.close() # ----------------------------------------------------------------------------- # Read matrix data from a file # def file_data_read(filename, sep=None): """Retrieves an array of data from the requested file. Parameters ---------- filename : str Name of file containing reqested data. sep : str Seperator used to store data. Returns ------- data : array_like Data from selected file. """ if filename is None: raise ValueError("filename is unspecified") f = open(filename, "r") # # first count lines and numbers of # M = N = 0 for line in f: # skip comment lines if line[0] == '#' or line[0] == '%': continue # find delim if N == 0 and sep is None: if len(line.rstrip().split(",")) > 1: sep = "," elif len(line.rstrip().split(";")) > 1: sep = ";" elif len(line.rstrip().split(":")) > 1: sep = ":" elif len(line.rstrip().split("|")) > 1: sep = "|" elif len(line.rstrip().split()) > 1: # sepical case for a mix of white space deliminators sep = None else: raise ValueError("Unrecognized column deliminator") # split the line line_vec = line.split(sep) n = len(line_vec) if N == 0 and n > 0: N = n # check type if ("j" in line_vec[0]) or ("i" in line_vec[0]): numtype = "complex" else: numtype = "np.real" # check format if ("e" in line_vec[0]) or ("E" in line_vec[0]): numformat = "exp" else: numformat = "decimal" elif N != n: raise ValueError("Badly formatted data file: " + "unequal number of columns") M += 1 # # read data and store in a matrix # f.seek(0) if numtype == "complex": data = np.zeros((M, N), dtype="complex") m = n = 0 for line in f: # skip comment lines if line[0] == '#' or line[0] == '%': continue n = 0 for item in line.rstrip().split(sep): data[m, n] = complex(item) n += 1 m += 1 else: data = np.zeros((M, N), dtype="float") m = n = 0 for line in f: # skip comment lines if line[0] == '#' or line[0] == '%': continue n = 0 for item in line.rstrip().split(sep): data[m, n] = float(item) n += 1 m += 1 f.close() return data def qsave(data, name='qutip_data'): """ Saves given data to file named 'filename.qu' in current directory. Parameters ---------- data : instance/array_like Input Python object to be stored. filename : str Name of output data file. """ # open the file for writing fileObject = open(name + '.qu', 'wb') # this writes the object a to the file named 'filename.qu' pickle.dump(data, fileObject) fileObject.close() def qload(name): """ Loads data file from file named 'filename.qu' in current directory. Parameters ---------- name : str Name of data file to be loaded. Returns ------- qobject : instance / array_like Object retrieved from requested file. """ fileObject = open(name + '.qu', 'rb') # open the file for reading if sys.version_info >= (3, 0): out = pickle.load(fileObject, encoding='latin1') # return the object from the file else: out = pickle.load(fileObject) if isinstance(out, Qobj): # for quantum objects print('Loaded Qobj object:') str1 = "Quantum object: " + "dims = " + str(out.dims) \ + ", shape = " + str(out.shape) + ", type = " + out.type if out.type == 'oper' or out.type == 'super': str1 += ", isHerm = " + str(out.isherm) + "\n" else: str1 += "\n" print(str1) elif isinstance(out, Result): print('Loaded Result object:') print(out) else: print("Loaded " + str(type(out).__name__) + " object.") return out qutip-4.4.1/qutip/floquet.py000066400000000000000000000726431352460343600160770ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['floquet_modes', 'floquet_modes_t', 'floquet_modes_table', 'floquet_modes_t_lookup', 'floquet_states', 'floquet_states_t', 'floquet_wavefunction', 'floquet_wavefunction_t', 'floquet_state_decomposition', 'fsesolve', 'floquet_master_equation_rates', 'floquet_collapse_operators', 'floquet_master_equation_tensor', 'floquet_master_equation_steadystate', 'floquet_basis_transform', 'floquet_markov_mesolve', 'fmmesolve'] import numpy as np import scipy.linalg as la import scipy from scipy import angle, pi, exp, sqrt from types import FunctionType from qutip.qobj import Qobj, isket from qutip.superoperator import vec2mat_index, mat2vec, vec2mat #from qutip.mesolve import mesolve from qutip.sesolve import sesolve from qutip.rhs_generate import rhs_clear from qutip.steadystate import steadystate from qutip.states import ket2dm from qutip.states import projection from qutip.solver import Options from qutip.propagator import propagator from qutip.solver import Result, _solver_safety_check from qutip.cy.spmatfuncs import cy_ode_rhs from qutip.expect import expect from qutip.utilities import n_thermal def floquet_modes(H, T, args=None, sort=False, U=None): """ Calculate the initial Floquet modes Phi_alpha(0) for a driven system with period T. Returns a list of :class:`qutip.qobj` instances representing the Floquet modes and a list of corresponding quasienergies, sorted by increasing quasienergy in the interval [-pi/T, pi/T]. The optional parameter `sort` decides if the output is to be sorted in increasing quasienergies or not. Parameters ---------- H : :class:`qutip.qobj` system Hamiltonian, time-dependent with period `T` args : dictionary dictionary with variables required to evaluate H T : float The period of the time-dependence of the hamiltonian. The default value 'None' indicates that the 'tlist' spans a single period of the driving. U : :class:`qutip.qobj` The propagator for the time-dependent Hamiltonian with period `T`. If U is `None` (default), it will be calculated from the Hamiltonian `H` using :func:`qutip.propagator.propagator`. Returns ------- output : list of kets, list of quasi energies Two lists: the Floquet modes as kets and the quasi energies. """ if U is None: # get the unitary propagator U = propagator(H, T, [], args) # find the eigenstates for the propagator evals, evecs = la.eig(U.full()) eargs = angle(evals) # make sure that the phase is in the interval [-pi, pi], so that # the quasi energy is in the interval [-pi/T, pi/T] where T is the # period of the driving. eargs += (eargs <= -2*pi) * (2*pi) + # (eargs > 0) * (-2*pi) eargs += (eargs <= -pi) * (2 * pi) + (eargs > pi) * (-2 * pi) e_quasi = -eargs / T # sort by the quasi energy if sort: order = np.argsort(-e_quasi) else: order = list(range(len(evals))) # prepare a list of kets for the floquet states new_dims = [U.dims[0], [1] * len(U.dims[0])] new_shape = [U.shape[0], 1] kets_order = [Qobj(np.matrix(evecs[:, o]).T, dims=new_dims, shape=new_shape) for o in order] return kets_order, e_quasi[order] def floquet_modes_t(f_modes_0, f_energies, t, H, T, args=None): """ Calculate the Floquet modes at times tlist Phi_alpha(tlist) propagting the initial Floquet modes Phi_alpha(0) Parameters ---------- f_modes_0 : list of :class:`qutip.qobj` (kets) Floquet modes at :math:`t` f_energies : list Floquet energies. t : float The time at which to evaluate the floquet modes. H : :class:`qutip.qobj` system Hamiltonian, time-dependent with period `T` args : dictionary dictionary with variables required to evaluate H T : float The period of the time-dependence of the hamiltonian. Returns ------- output : list of kets The Floquet modes as kets at time :math:`t` """ # find t in [0,T] such that t_orig = t + n * T for integer n t = t - int(t / T) * T f_modes_t = [] # get the unitary propagator from 0 to t if t > 0.0: U = propagator(H, t, [], args) for n in np.arange(len(f_modes_0)): f_modes_t.append(U * f_modes_0[n] * exp(1j * f_energies[n] * t)) else: f_modes_t = f_modes_0 return f_modes_t def floquet_modes_table(f_modes_0, f_energies, tlist, H, T, args=None): """ Pre-calculate the Floquet modes for a range of times spanning the floquet period. Can later be used as a table to look up the floquet modes for any time. Parameters ---------- f_modes_0 : list of :class:`qutip.qobj` (kets) Floquet modes at :math:`t` f_energies : list Floquet energies. tlist : array The list of times at which to evaluate the floquet modes. H : :class:`qutip.qobj` system Hamiltonian, time-dependent with period `T` T : float The period of the time-dependence of the hamiltonian. args : dictionary dictionary with variables required to evaluate H Returns ------- output : nested list A nested list of Floquet modes as kets for each time in `tlist` """ # truncate tlist to the driving period tlist_period = tlist[np.where(tlist <= T)] f_modes_table_t = [[] for t in tlist_period] opt = Options() opt.rhs_reuse = True rhs_clear() for n, f_mode in enumerate(f_modes_0): output = sesolve(H, f_mode, tlist_period, [], args, opt) for t_idx, f_state_t in enumerate(output.states): f_modes_table_t[t_idx].append( f_state_t * exp(1j * f_energies[n] * tlist_period[t_idx])) return f_modes_table_t def floquet_modes_t_lookup(f_modes_table_t, t, T): """ Lookup the floquet mode at time t in the pre-calculated table of floquet modes in the first period of the time-dependence. Parameters ---------- f_modes_table_t : nested list of :class:`qutip.qobj` (kets) A lookup-table of Floquet modes at times precalculated by :func:`qutip.floquet.floquet_modes_table`. t : float The time for which to evaluate the Floquet modes. T : float The period of the time-dependence of the hamiltonian. Returns ------- output : nested list A list of Floquet modes as kets for the time that most closely matching the time `t` in the supplied table of Floquet modes. """ # find t_wrap in [0,T] such that t = t_wrap + n * T for integer n t_wrap = t - int(t / T) * T # find the index in the table that corresponds to t_wrap (= tlist[t_idx]) t_idx = int(t_wrap / T * len(f_modes_table_t)) # XXX: might want to give a warning if the cast of t_idx to int discard # a significant fraction in t_idx, which would happen if the list of time # values isn't perfect matching the driving period # if debug: print "t = %f -> t_wrap = %f @ %d of %d" % (t, t_wrap, t_idx, # N) return f_modes_table_t[t_idx] def floquet_states(f_modes_t, f_energies, t): """ Evaluate the floquet states at time t given the Floquet modes at that time. Parameters ---------- f_modes_t : list of :class:`qutip.qobj` (kets) A list of Floquet modes for time :math:`t`. f_energies : array The Floquet energies. t : float The time for which to evaluate the Floquet states. Returns ------- output : list A list of Floquet states for the time :math:`t`. """ return [(f_modes_t[i] * exp(-1j * f_energies[i] * t)) for i in np.arange(len(f_energies))] def floquet_states_t(f_modes_0, f_energies, t, H, T, args=None): """ Evaluate the floquet states at time t given the initial Floquet modes. Parameters ---------- f_modes_t : list of :class:`qutip.qobj` (kets) A list of initial Floquet modes (for time :math:`t=0`). f_energies : array The Floquet energies. t : float The time for which to evaluate the Floquet states. H : :class:`qutip.qobj` System Hamiltonian, time-dependent with period `T`. T : float The period of the time-dependence of the hamiltonian. args : dictionary Dictionary with variables required to evaluate H. Returns ------- output : list A list of Floquet states for the time :math:`t`. """ f_modes_t = floquet_modes_t(f_modes_0, f_energies, t, H, T, args) return [(f_modes_t[i] * exp(-1j * f_energies[i] * t)) for i in np.arange(len(f_energies))] def floquet_wavefunction(f_modes_t, f_energies, f_coeff, t): """ Evaluate the wavefunction for a time t using the Floquet state decompositon, given the Floquet modes at time `t`. Parameters ---------- f_modes_t : list of :class:`qutip.qobj` (kets) A list of initial Floquet modes (for time :math:`t=0`). f_energies : array The Floquet energies. f_coeff : array The coefficients for Floquet decomposition of the initial wavefunction. t : float The time for which to evaluate the Floquet states. Returns ------- output : :class:`qutip.qobj` The wavefunction for the time :math:`t`. """ return sum([f_modes_t[i] * exp(-1j * f_energies[i] * t) * f_coeff[i] for i in np.arange(len(f_energies))]) def floquet_wavefunction_t(f_modes_0, f_energies, f_coeff, t, H, T, args=None): """ Evaluate the wavefunction for a time t using the Floquet state decompositon, given the initial Floquet modes. Parameters ---------- f_modes_t : list of :class:`qutip.qobj` (kets) A list of initial Floquet modes (for time :math:`t=0`). f_energies : array The Floquet energies. f_coeff : array The coefficients for Floquet decomposition of the initial wavefunction. t : float The time for which to evaluate the Floquet states. H : :class:`qutip.qobj` System Hamiltonian, time-dependent with period `T`. T : float The period of the time-dependence of the hamiltonian. args : dictionary Dictionary with variables required to evaluate H. Returns ------- output : :class:`qutip.qobj` The wavefunction for the time :math:`t`. """ f_states_t = floquet_states_t(f_modes_0, f_energies, t, H, T, args) return sum([f_states_t[i] * f_coeff[i] for i in np.arange(len(f_energies))]) def floquet_state_decomposition(f_states, f_energies, psi): """ Decompose the wavefunction `psi` (typically an initial state) in terms of the Floquet states, :math:`\psi = \sum_\\alpha c_\\alpha \psi_\\alpha(0)`. Parameters ---------- f_states : list of :class:`qutip.qobj` (kets) A list of Floquet modes. f_energies : array The Floquet energies. psi : :class:`qutip.qobj` The wavefunction to decompose in the Floquet state basis. Returns ------- output : array The coefficients :math:`c_\\alpha` in the Floquet state decomposition. """ # [:1,:1][0, 0] patch around scipy 1.3.0 bug return [(f_states[i].dag() * psi).data[:1,:1][0, 0] for i in np.arange(len(f_energies))] def fsesolve(H, psi0, tlist, e_ops=[], T=None, args={}, Tsteps=100): """ Solve the Schrodinger equation using the Floquet formalism. Parameters ---------- H : :class:`qutip.qobj.Qobj` System Hamiltonian, time-dependent with period `T`. psi0 : :class:`qutip.qobj` Initial state vector (ket). tlist : *list* / *array* list of times for :math:`t`. e_ops : list of :class:`qutip.qobj` / callback function list of operators for which to evaluate expectation values. If this list is empty, the state vectors for each time in `tlist` will be returned instead of expectation values. T : float The period of the time-dependence of the hamiltonian. args : dictionary Dictionary with variables required to evaluate H. Tsteps : integer The number of time steps in one driving period for which to precalculate the Floquet modes. `Tsteps` should be an even number. Returns ------- output : :class:`qutip.solver.Result` An instance of the class :class:`qutip.solver.Result`, which contains either an *array* of expectation values or an array of state vectors, for the times specified by `tlist`. """ if not T: # assume that tlist span exactly one period of the driving T = tlist[-1] # find the floquet modes for the time-dependent hamiltonian f_modes_0, f_energies = floquet_modes(H, T, args) # calculate the wavefunctions using the from the floquet modes f_modes_table_t = floquet_modes_table(f_modes_0, f_energies, np.linspace(0, T, Tsteps + 1), H, T, args) # setup Result for storing the results output = Result() output.times = tlist output.solver = "fsesolve" if isinstance(e_ops, FunctionType): output.num_expect = 0 expt_callback = True elif isinstance(e_ops, list): output.num_expect = len(e_ops) expt_callback = False if output.num_expect == 0: output.states = [] else: output.expect = [] for op in e_ops: if op.isherm: output.expect.append(np.zeros(len(tlist))) else: output.expect.append(np.zeros(len(tlist), dtype=complex)) else: raise TypeError("e_ops must be a list Qobj or a callback function") psi0_fb = psi0.transform(f_modes_0) for t_idx, t in enumerate(tlist): f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T) f_states_t = floquet_states(f_modes_t, f_energies, t) psi_t = psi0_fb.transform(f_states_t, True) if expt_callback: # use callback method e_ops(t, psi_t) else: # calculate all the expectation values, or output psi if # no expectation value operators where defined if output.num_expect == 0: output.states.append(Qobj(psi_t)) else: for e_idx, e in enumerate(e_ops): output.expect[e_idx][t_idx] = expect(e, psi_t) return output def floquet_master_equation_rates(f_modes_0, f_energies, c_op, H, T, args, J_cb, w_th, kmax=5, f_modes_table_t=None): """ Calculate the rates and matrix elements for the Floquet-Markov master equation. Parameters ---------- f_modes_0 : list of :class:`qutip.qobj` (kets) A list of initial Floquet modes. f_energies : array The Floquet energies. c_op : :class:`qutip.qobj` The collapse operators describing the dissipation. H : :class:`qutip.qobj` System Hamiltonian, time-dependent with period `T`. T : float The period of the time-dependence of the hamiltonian. args : dictionary Dictionary with variables required to evaluate H. J_cb : callback functions A callback function that computes the noise power spectrum, as a function of frequency, associated with the collapse operator `c_op`. w_th : float The temperature in units of frequency. k_max : int The truncation of the number of sidebands (default 5). f_modes_table_t : nested list of :class:`qutip.qobj` (kets) A lookup-table of Floquet modes at times precalculated by :func:`qutip.floquet.floquet_modes_table` (optional). Returns ------- output : list A list (Delta, X, Gamma, A) containing the matrices Delta, X, Gamma and A used in the construction of the Floquet-Markov master equation. """ N = len(f_energies) M = 2 * kmax + 1 omega = (2 * pi) / T Delta = np.zeros((N, N, M)) X = np.zeros((N, N, M), dtype=complex) Gamma = np.zeros((N, N, M)) A = np.zeros((N, N)) nT = 100 dT = T / nT tlist = np.arange(dT, T + dT / 2, dT) if f_modes_table_t is None: f_modes_table_t = floquet_modes_table(f_modes_0, f_energies, np.linspace(0, T, nT + 1), H, T, args) for t in tlist: # TODO: repeated invocations of floquet_modes_t is # inefficient... make a and b outer loops and use the mesolve # instead of the propagator. # f_modes_t = floquet_modes_t(f_modes_0, f_energies, t, H, T, args) f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T) for a in range(N): for b in range(N): k_idx = 0 for k in range(-kmax, kmax + 1, 1): # [:1,:1][0, 0] patch around scipy 1.3.0 bug X[a, b, k_idx] += (dT / T) * exp(-1j * k * omega * t) * \ (f_modes_t[a].dag() * c_op * f_modes_t[b])[:1,:1][0, 0] k_idx += 1 Heaviside = lambda x: ((np.sign(x) + 1) / 2.0) for a in range(N): for b in range(N): k_idx = 0 for k in range(-kmax, kmax + 1, 1): Delta[a, b, k_idx] = f_energies[a] - f_energies[b] + k * omega Gamma[a, b, k_idx] = 2 * pi * Heaviside(Delta[a, b, k_idx]) * \ J_cb(Delta[a, b, k_idx]) * abs(X[a, b, k_idx]) ** 2 k_idx += 1 for a in range(N): for b in range(N): for k in range(-kmax, kmax + 1, 1): k1_idx = k + kmax k2_idx = -k + kmax A[a, b] += Gamma[a, b, k1_idx] + \ n_thermal(abs(Delta[a, b, k1_idx]), w_th) * \ (Gamma[a, b, k1_idx] + Gamma[b, a, k2_idx]) return Delta, X, Gamma, A def floquet_collapse_operators(A): """ Construct collapse operators corresponding to the Floquet-Markov master-equation rate matrix `A`. .. note:: Experimental. """ c_ops = [] N, M = np.shape(A) # # Here we really need a master equation on Bloch-Redfield form, or perhaps # we can use the Lindblad form master equation with some rotating frame # approximations? ... # for a in range(N): for b in range(N): if a != b and abs(A[a, b]) > 0.0: # only relaxation terms included... c_ops.append(sqrt(A[a, b]) * projection(N, a, b)) return c_ops def floquet_master_equation_tensor(Alist, f_energies): """ Construct a tensor that represents the master equation in the floquet basis (with constant Hamiltonian and collapse operators). Simplest RWA approximation [Grifoni et al, Phys.Rep. 304 229 (1998)] Parameters ---------- Alist : list A list of Floquet-Markov master equation rate matrices. f_energies : array The Floquet energies. Returns ------- output : array The Floquet-Markov master equation tensor `R`. """ if isinstance(Alist, list): # Alist can be a list of rate matrices corresponding # to different operators that couple to the environment N, M = np.shape(Alist[0]) else: # or a simple rate matrix, in which case we put it in a list Alist = [Alist] N, M = np.shape(Alist[0]) Rdata_lil = scipy.sparse.lil_matrix((N * N, N * N), dtype=complex) for I in range(N * N): a, b = vec2mat_index(N, I) for J in range(N * N): c, d = vec2mat_index(N, J) R = -1.0j * (f_energies[a] - f_energies[b])*(a == c)*(b == d) Rdata_lil[I, J] = R for A in Alist: s1 = s2 = 0 for n in range(N): s1 += A[a, n] * (n == c) * (n == d) - A[n, a] * \ (a == c) * (a == d) s2 += (A[n, a] + A[n, b]) * (a == c) * (b == d) dR = (a == b) * s1 - 0.5 * (1 - (a == b)) * s2 if dR != 0.0: Rdata_lil[I, J] += dR return Qobj(Rdata_lil, [[N, N], [N, N]], [N*N, N*N]) def floquet_master_equation_steadystate(H, A): """ Returns the steadystate density matrix (in the floquet basis!) for the Floquet-Markov master equation. """ c_ops = floquet_collapse_operators(A) rho_ss = steadystate(H, c_ops) return rho_ss def floquet_basis_transform(f_modes, f_energies, rho0): """ Make a basis transform that takes rho0 from the floquet basis to the computational basis. """ return rho0.transform(f_modes, True) # ----------------------------------------------------------------------------- # Floquet-Markov master equation # # def floquet_markov_mesolve(R, ekets, rho0, tlist, e_ops, f_modes_table=None, options=None, floquet_basis=True): """ Solve the dynamics for the system using the Floquet-Markov master equation. """ if options is None: opt = Options() else: opt = options if opt.tidy: R.tidyup() # # check initial state # if isket(rho0): # Got a wave function as initial state: convert to density matrix. rho0 = ket2dm(rho0) # # prepare output array # n_tsteps = len(tlist) dt = tlist[1] - tlist[0] output = Result() output.solver = "fmmesolve" output.times = tlist if isinstance(e_ops, FunctionType): n_expt_op = 0 expt_callback = True elif isinstance(e_ops, list): n_expt_op = len(e_ops) expt_callback = False if n_expt_op == 0: output.states = [] else: if not f_modes_table: raise TypeError("The Floquet mode table has to be provided " + "when requesting expectation values.") output.expect = [] output.num_expect = n_expt_op for op in e_ops: if op.isherm: output.expect.append(np.zeros(n_tsteps)) else: output.expect.append(np.zeros(n_tsteps, dtype=complex)) else: raise TypeError("Expectation parameter must be a list or a function") # # transform the initial density matrix to the eigenbasis: from # computational basis to the floquet basis # if ekets is not None: rho0 = rho0.transform(ekets) # # setup integrator # initial_vector = mat2vec(rho0.full()) r = scipy.integrate.ode(cy_ode_rhs) r.set_f_params(R.data.data, R.data.indices, R.data.indptr) r.set_integrator('zvode', method=opt.method, order=opt.order, atol=opt.atol, rtol=opt.rtol, max_step=opt.max_step) r.set_initial_value(initial_vector, tlist[0]) # # start evolution # rho = Qobj(rho0) t_idx = 0 for t in tlist: if not r.successful(): break rho = Qobj(vec2mat(r.y), rho0.dims, rho0.shape) if expt_callback: # use callback method if floquet_basis: e_ops(t, Qobj(rho)) else: f_modes_table_t, T = f_modes_table f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T) e_ops(t, Qobj(rho).transform(f_modes_t, True)) else: # calculate all the expectation values, or output rho if # no operators if n_expt_op == 0: if floquet_basis: output.states.append(Qobj(rho)) else: f_modes_table_t, T = f_modes_table f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T) output.states.append(Qobj(rho).transform(f_modes_t, True)) else: f_modes_table_t, T = f_modes_table f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T) for m in range(0, n_expt_op): output.expect[m][t_idx] = \ expect(e_ops[m], rho.transform(f_modes_t, False)) r.integrate(r.t + dt) t_idx += 1 return output # ----------------------------------------------------------------------------- # Solve the Floquet-Markov master equation # # def fmmesolve(H, rho0, tlist, c_ops=[], e_ops=[], spectra_cb=[], T=None, args={}, options=Options(), floquet_basis=True, kmax=5, _safe_mode=True): """ Solve the dynamics for the system using the Floquet-Markov master equation. .. note:: This solver currently does not support multiple collapse operators. Parameters ---------- H : :class:`qutip.qobj` system Hamiltonian. rho0 / psi0 : :class:`qutip.qobj` initial density matrix or state vector (ket). tlist : *list* / *array* list of times for :math:`t`. c_ops : list of :class:`qutip.qobj` list of collapse operators. e_ops : list of :class:`qutip.qobj` / callback function list of operators for which to evaluate expectation values. spectra_cb : list callback functions List of callback functions that compute the noise power spectrum as a function of frequency for the collapse operators in `c_ops`. T : float The period of the time-dependence of the hamiltonian. The default value 'None' indicates that the 'tlist' spans a single period of the driving. args : *dictionary* dictionary of parameters for time-dependent Hamiltonians and collapse operators. This dictionary should also contain an entry 'w_th', which is the temperature of the environment (if finite) in the energy/frequency units of the Hamiltonian. For example, if the Hamiltonian written in units of 2pi GHz, and the temperature is given in K, use the following conversion >>> temperature = 25e-3 # unit K >>> h = 6.626e-34 >>> kB = 1.38e-23 >>> args['w_th'] = temperature * (kB / h) * 2 * pi * 1e-9 options : :class:`qutip.solver` options for the ODE solver. k_max : int The truncation of the number of sidebands (default 5). Returns ------- output : :class:`qutip.solver` An instance of the class :class:`qutip.solver`, which contains either an *array* of expectation values for the times specified by `tlist`. """ if _safe_mode: _solver_safety_check(H, rho0, c_ops, e_ops, args) if T is None: T = max(tlist) if len(spectra_cb) == 0: # add white noise callbacks if absent spectra_cb = [lambda w: 1.0] * len(c_ops) f_modes_0, f_energies = floquet_modes(H, T, args) f_modes_table_t = floquet_modes_table(f_modes_0, f_energies, np.linspace(0, T, 500 + 1), H, T, args) # get w_th from args if it exists if 'w_th' in args: w_th = args['w_th'] else: w_th = 0 # TODO: loop over input c_ops and spectra_cb, calculate one R for each set # calculate the rate-matrices for the floquet-markov master equation Delta, X, Gamma, Amat = floquet_master_equation_rates( f_modes_0, f_energies, c_ops[0], H, T, args, spectra_cb[0], w_th, kmax, f_modes_table_t) # the floquet-markov master equation tensor R = floquet_master_equation_tensor(Amat, f_energies) return floquet_markov_mesolve(R, f_modes_0, rho0, tlist, e_ops, f_modes_table=(f_modes_table_t, T), options=options, floquet_basis=floquet_basis) qutip-4.4.1/qutip/graph.py000066400000000000000000000232531352460343600155120ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ This module contains a collection of graph theory routines used mainly to reorder matrices for iterative steady state solvers. """ __all__ = ['graph_degree', 'column_permutation', 'breadth_first_search', 'reverse_cuthill_mckee', 'maximum_bipartite_matching', 'weighted_bipartite_matching'] import numpy as np import scipy.sparse as sp from qutip.cy.graph_utils import ( _breadth_first_search, _node_degrees, _reverse_cuthill_mckee, _maximum_bipartite_matching, _weighted_bipartite_matching) def graph_degree(A): """ Returns the degree for the nodes (rows) of a symmetric graph in sparse CSR or CSC format, or a qobj. Parameters ---------- A : qobj, csr_matrix, csc_matrix Input quantum object or csr_matrix. Returns ------- degree : array Array of integers giving the degree for each node (row). """ if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)): raise TypeError('Input must be CSC or CSR sparse matrix.') return np.asarray(_node_degrees(A.indices, A.indptr, A.shape[0])) def breadth_first_search(A, start): """ Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs. This function requires a matrix with symmetric structure. Use A+trans(A) if original matrix is not symmetric or not sure. Parameters ---------- A : csc_matrix, csr_matrix Input graph in CSC or CSR matrix format start : int Staring node for BFS traversal. Returns ------- order : array Order in which nodes are traversed from starting node. levels : array Level of the nodes in the order that they are traversed. """ if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)): raise TypeError('Input must be CSC or CSR sparse matrix.') num_rows = A.shape[0] start = int(start) order, levels = _breadth_first_search(A.indices, A.indptr, num_rows, start) # since maybe not all nodes are in search, check for unused entires in # arrays return order[order != -1], levels[levels != -1] def column_permutation(A): """ Finds the non-symmetric column permutation of A such that the columns are given in ascending order according to the number of nonzero entries. This is sometimes useful for decreasing the fill-in of sparse LU factorization. Parameters ---------- A : csc_matrix Input sparse CSC sparse matrix. Returns ------- perm : array Array of permuted row and column indices. """ if not sp.isspmatrix_csc(A): A = sp.csc_matrix(A) count = np.diff(A.indptr) perm = np.argsort(count) return perm def reverse_cuthill_mckee(A, sym=False): """ Returns the permutation array that orders a sparse CSR or CSC matrix in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric, this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default). It is assumed by default (*sym=False*) that the input matrix is not symmetric. This is because it is faster to do A+Trans(A) than it is to check for symmetry for a generic matrix. If you are guaranteed that the matrix is symmetric in structure (values of matrix element do not matter) then set *sym=True* Parameters ---------- A : csc_matrix, csr_matrix Input sparse CSC or CSR sparse matrix format. sym : bool {False, True} Flag to set whether input matrix is symmetric. Returns ------- perm : array Array of permuted row and column indices. Notes ----- This routine is used primarily for internal reordering of Lindblad superoperators for use in iterative solver routines. References ---------- E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices", ACM '69 Proceedings of the 1969 24th national conference, (1969). """ if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)): raise TypeError('Input must be CSC or CSR sparse matrix.') nrows = A.shape[0] if not sym: A = A + A.transpose() return _reverse_cuthill_mckee(A.indices, A.indptr, nrows) def maximum_bipartite_matching(A, perm_type='row'): """ Returns an array of row or column permutations that removes nonzero elements from the diagonal of a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at the structure of the matrix only. The input matrix will be converted to CSC matrix format if necessary. Parameters ---------- A : sparse matrix Input matrix perm_type : str {'row', 'column'} Type of permutation to generate. Returns ------- perm : array Array of row or column permutations. Notes ----- This function relies on a maximum cardinality bipartite matching algorithm based on a breadth-first search (BFS) of the underlying graph[1]_. References ---------- I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw. 38, no. 2, (2011). """ nrows = A.shape[0] if A.shape[0] != A.shape[1]: raise ValueError( 'Maximum bipartite matching requires a square matrix.') if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A): A = A.tocsc() elif not sp.isspmatrix_csc(A): raise TypeError("matrix must be in CSC, CSR, or COO format.") if perm_type == 'column': A = A.transpose().tocsc() perm = _maximum_bipartite_matching(A.indices, A.indptr, nrows) if np.any(perm == -1): raise Exception('Possibly singular input matrix.') return perm def weighted_bipartite_matching(A, perm_type='row'): """ Returns an array of row permutations that attempts to maximize the product of the ABS values of the diagonal elements in a nonsingular square CSC sparse matrix. Such a permutation is always possible provided that the matrix is nonsingular. This function looks at both the structure and ABS values of the underlying matrix. Parameters ---------- A : csc_matrix Input matrix perm_type : str {'row', 'column'} Type of permutation to generate. Returns ------- perm : array Array of row or column permutations. Notes ----- This function uses a weighted maximum cardinality bipartite matching algorithm based on breadth-first search (BFS). The columns are weighted according to the element of max ABS value in the associated rows and are traversed in descending order by weight. When performing the BFS traversal, the row associated to a given column is the one with maximum weight. Unlike other techniques[1]_, this algorithm does not guarantee the product of the diagonal is maximized. However, this limitation is offset by the substantially faster runtime of this method. References ---------- I. S. Duff and J. Koster, "The design and use of algorithms for permuting large entries to the diagonal of sparse matrices", SIAM J. Matrix Anal. and Applics. 20, no. 4, 889 (1997). """ nrows = A.shape[0] if A.shape[0] != A.shape[1]: raise ValueError('weighted_bfs_matching requires a square matrix.') if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A): A = A.tocsc() elif not sp.isspmatrix_csc(A): raise TypeError("matrix must be in CSC, CSR, or COO format.") if perm_type == 'column': A = A.transpose().tocsc() perm = _weighted_bipartite_matching( np.asarray(np.abs(A.data), dtype=float), A.indices, A.indptr, nrows) if np.any(perm == -1): raise Exception('Possibly singular input matrix.') return perm qutip-4.4.1/qutip/hardware_info.py000066400000000000000000000121161352460343600172150ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### __all__ = ['hardware_info'] import os import sys import multiprocessing import numpy as np def _mac_hardware_info(): info = dict() results = dict() for l in [l.split(':') for l in os.popen('sysctl hw').readlines()[1:]]: info[l[0].strip(' "').replace(' ', '_').lower().strip('hw.')] = \ l[1].strip('.\n ') results.update({'cpus': int(info['physicalcpu'])}) results.update({'cpu_freq': int(float(os.popen('sysctl -n machdep.cpu.brand_string') .readlines()[0].split('@')[1][:-4])*1000)}) results.update({'memsize': int(int(info['memsize']) / (1024 ** 2))}) # add OS information results.update({'os': 'Mac OSX'}) return results def _linux_hardware_info(): results = {} # get cpu number sockets = 0 cores_per_socket = 0 frequency = 0.0 for l in [l.split(':') for l in open("/proc/cpuinfo").readlines()]: if (l[0].strip() == "physical id"): sockets = np.maximum(sockets,int(l[1].strip())+1) if (l[0].strip() == "cpu cores"): cores_per_socket = int(l[1].strip()) if (l[0].strip() == "cpu MHz"): frequency = float(l[1].strip()) / 1000. results.update({'cpus': sockets * cores_per_socket}) # get cpu frequency directly (bypasses freq scaling) try: file = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq" line = open(file).readlines()[0] frequency = float(line.strip('\n')) / 1000000. except: pass results.update({'cpu_freq': frequency}) # get total amount of memory mem_info = dict() for l in [l.split(':') for l in open("/proc/meminfo").readlines()]: mem_info[l[0]] = l[1].strip('.\n ').strip('kB') results.update({'memsize': int(mem_info['MemTotal']) / 1024}) # add OS information results.update({'os': 'Linux'}) return results def _freebsd_hardware_info(): results = {} results.update({'cpus': int(os.popen('sysctl -n hw.ncpu').readlines()[0])}) results.update({'cpu_freq': int(os.popen('sysctl -n dev.cpu.0.freq').readlines()[0])}) results.update({'memsize': int(os.popen('sysctl -n hw.realmem').readlines()[0]) / 1024}) results.update({'os': 'FreeBSD'}) return results def _win_hardware_info(): try: from comtypes.client import CoGetObject winmgmts_root = CoGetObject("winmgmts:root\cimv2") cpus = winmgmts_root.ExecQuery("Select * from Win32_Processor") ncpus = 0 for cpu in cpus: ncpus += int(cpu.Properties_['NumberOfCores'].Value) except: ncpus = int(multiprocessing.cpu_count()) return {'os': 'Windows', 'cpus': ncpus} def hardware_info(): """ Returns basic hardware information about the computer. Gives actual number of CPU's in the machine, even when hyperthreading is turned on. Returns ------- info : dict Dictionary containing cpu and memory information. """ if sys.platform == 'darwin': out = _mac_hardware_info() elif sys.platform == 'win32': out = _win_hardware_info() elif sys.platform in ['linux', 'linux2']: out = _linux_hardware_info() elif sys.platform.startswith('freebsd'): out = _freebsd_hardware_info() else: out = {} return out if __name__ == '__main__': print(hardware_info()) qutip-4.4.1/qutip/interpolate.py000066400000000000000000000116711352460343600167400ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### import numpy as np import scipy.linalg as la from qutip.cy.interpolate import (interp, arr_interp, zinterp, arr_zinterp) __all__ = ['Cubic_Spline'] class Cubic_Spline(object): ''' Calculates coefficients for a cubic spline interpolation of a given data set. This function assumes that the data is sampled uniformly over a given interval. Parameters ---------- a : float Lower bound of the interval. b : float Upper bound of the interval. y : ndarray Function values at interval points. alpha : float Second-order derivative at a. Default is 0. beta : float Second-order derivative at b. Default is 0. Attributes ---------- a : float Lower bound of the interval. b : float Upper bound of the interval. coeffs : ndarray Array of coeffcients defining cubic spline. Notes ----- This object can be called like a normal function with a single or array of input points at which to evaluate the interplating function. Habermann & Kindermann, "Multidimensional Spline Interpolation: Theory and Applications", Comput Econ 30, 153 (2007). ''' def __init__(self, a, b, y, alpha=0, beta=0): y = np.asarray(y) n = y.shape[0] - 1 h = (b - a)/n coeff = np.zeros(n + 3, dtype=y.dtype) # Solutions to boundary coeffcients of spline coeff[1] = 1/6. * (y[0] - (alpha * h**2)/6) #C2 in paper coeff[n + 1] = 1/6. * (y[n] - (beta * h**2)/6) #cn+2 in paper # Compressed tridiagonal matrix ab = np.ones((3, n - 1), dtype=float) ab[0,0] = 0 # Because top row is upper diag with one less elem ab[1, :] = 4 ab[-1,-1] = 0 # Because bottom row is lower diag with one less elem B = y[1:-1].copy() #grabs elements y[1] - > y[n-2] for reduced array B[0] -= coeff[1] B[-1] -= coeff[n + 1] coeff[2:-2] = la.solve_banded((1, 1), ab, B, overwrite_ab=True, overwrite_b=True, check_finite=False) coeff[0] = alpha * h**2/6. + 2 * coeff[1] - coeff[2] coeff[-1] = beta * h**2/6. + 2 * coeff[-2] - coeff[-3] self.a = a # Lower-bound of domain self.b = b # Uppser-bound of domain self.coeffs = coeff # Spline coefficients self.is_complex = (y.dtype == complex) #Tells which dtype solver to use def __call__(self, pnts, *args): #If requesting a single return value if isinstance(pnts, (int, float, complex)): if self.is_complex: return zinterp(pnts, self.a, self.b, self.coeffs) else: return interp(pnts, self.a, self.b, self.coeffs) #If requesting multiple return values from array_like elif isinstance(pnts, (np.ndarray,list)): pnts = np.asarray(pnts) if self.is_complex: return arr_zinterp(pnts, self.a, self.b, self.coeffs) else: return arr_interp(pnts, self.a, self.b, self.coeffs) qutip-4.4.1/qutip/ipynbtools.py000066400000000000000000000331601352460343600166110ustar00rootroot00000000000000# This file is part of QuTiP: Quantum Toolbox in Python. # # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names # of its contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ############################################################################### """ This module contains utility functions for using QuTiP with IPython notebooks. """ from qutip.ui.progressbar import BaseProgressBar from qutip.utilities import _blas_info import IPython #IPython parallel routines moved to ipyparallel in V4 #IPython parallel routines not in Anaconda by default if IPython.version_info[0] >= 4: try: from ipyparallel import Client __all__ = ['version_table', 'parfor', 'plot_animation', 'parallel_map', 'HTMLProgressBar'] except: __all__ = ['version_table', 'plot_animation', 'HTMLProgressBar'] else: try: from IPython.parallel import Client __all__ = ['version_table', 'parfor', 'plot_animation', 'parallel_map', 'HTMLProgressBar'] except: __all__ = ['version_table', 'plot_animation', 'HTMLProgressBar'] from IPython.display import HTML, Javascript, display import matplotlib.pyplot as plt from matplotlib import animation from base64 import b64encode import datetime import uuid import sys import os import time import inspect import qutip import numpy import scipy import Cython import matplotlib import IPython def version_table(verbose=False): """ Print an HTML-formatted table with version numbers for QuTiP and its dependencies. Use it in a IPython notebook to show which versions of different packages that were used to run the notebook. This should make it possible to reproduce the environment and the calculation later on. Returns -------- version_table: string Return an HTML-formatted string containing version information for QuTiP dependencies. """ html = "" html += "" packages = [("QuTiP", qutip.__version__), ("Numpy", numpy.__version__), ("SciPy", scipy.__version__), ("matplotlib", matplotlib.__version__), ("Cython", Cython.__version__), ("Number of CPUs", qutip.hardware_info.hardware_info()['cpus']), ("BLAS Info", _blas_info()), ("IPython", IPython.__version__), ("Python", sys.version), ("OS", "%s [%s]" % (os.name, sys.platform)) ] for name, version in packages: html += "" % (name, version) if verbose: html += "" qutip_install_path = os.path.dirname(inspect.getsourcefile(qutip)) html += ("" % qutip_install_path) try: import getpass html += ("" % getpass.getuser()) except: pass html += "" % time.strftime( '%a %b %d %H:%M:%S %Y %Z') html += "
SoftwareVersion
%s%s
Additional information
Installation path%s
User%s
%s
" return HTML(html) class HTMLProgressBar(BaseProgressBar): """ A simple HTML progress bar for using in IPython notebooks. Based on IPython ProgressBar demo notebook: https://github.com/ipython/ipython/tree/master/examples/notebooks Example usage: n_vec = linspace(0, 10, 100) pbar = HTMLProgressBar(len(n_vec)) for n in n_vec: pbar.update(n) compute_with_n(n) """ def __init__(self, iterations=0, chunk_size=1.0): self.divid = str(uuid.uuid4()) self.textid = str(uuid.uuid4()) self.pb = HTML("""\
 

""" % (self.divid, self.textid)) display(self.pb) super(HTMLProgressBar, self).start(iterations, chunk_size) def start(self, iterations=0, chunk_size=1.0): super(HTMLProgressBar, self).start(iterations, chunk_size) def update(self, n): p = (n / self.N) * 100.0 if p >= self.p_chunk: lbl = ("Elapsed time: %s. " % self.time_elapsed() + "Est. remaining time: %s." % self.time_remaining_est(p)) js_code = ("$('div#%s').width('%i%%');" % (self.divid, p) + "$('p#%s').text('%s');" % (self.textid, lbl)) display(Javascript(js_code)) # display(Javascript("$('div#%s').width('%i%%')" % (self.divid, # p))) self.p_chunk += self.p_chunk_size def finished(self): self.t_done = time.time() lbl = "Elapsed time: %s" % self.time_elapsed() js_code = ("$('div#%s').width('%i%%');" % (self.divid, 100.0) + "$('p#%s').text('%s');" % (self.textid, lbl)) display(Javascript(js_code)) def _visualize_parfor_data(metadata): """ Visualizing the task scheduling meta data collected from AsyncResults. """ res = numpy.array(metadata) fig, ax = plt.subplots(figsize=(10, res.shape[1])) yticks = [] yticklabels = [] tmin = min(res[:, 1]) for n, pid in enumerate(numpy.unique(res[:, 0])): yticks.append(n) yticklabels.append("%d" % pid) for m in numpy.where(res[:, 0] == pid)[0]: ax.add_patch(plt.Rectangle((res[m, 1] - tmin, n - 0.25), res[m, 2] - res[m, 1], 0.5, color="green", alpha=0.5)) ax.set_ylim(-.5, n + .5) ax.set_xlim(0, max(res[:, 2]) - tmin + 0.) ax.set_yticks(yticks) ax.set_yticklabels(yticklabels) ax.set_ylabel("Engine") ax.set_xlabel("seconds") ax.set_title("Task schedule") def parfor(task, task_vec, args=None, client=None, view=None, show_scheduling=False, show_progressbar=False): """ Call the function ``tast`` for each value in ``task_vec`` using a cluster of IPython engines. The function ``task`` should have the signature ``task(value, args)`` or ``task(value)`` if ``args=None``. The ``client`` and ``view`` are the IPython.parallel client and load-balanced view that will be used in the parfor execution. If these are ``None``, new instances will be created. Parameters ---------- task: a Python function The function that is to be called for each value in ``task_vec``. task_vec: array / list The list or array of values for which the ``task`` function is to be evaluated. args: list / dictionary The optional additional argument to the ``task`` function. For example a dictionary with parameter values. client: IPython.parallel.Client The IPython.parallel Client instance that will be used in the parfor execution. view: a IPython.parallel.Client view The view that is to be used in scheduling the tasks on the IPython cluster. Preferably a load-balanced view, which is obtained from the IPython.parallel.Client instance client by calling, view = client.load_balanced_view(). show_scheduling: bool {False, True}, default False Display a graph showing how the tasks (the evaluation of ``task`` for for the value in ``task_vec1``) was scheduled on the IPython engine cluster. show_progressbar: bool {False, True}, default False Display a HTML-based progress bar duing the execution of the parfor loop. Returns -------- result : list The result list contains the value of ``task(value, args)`` for each value in ``task_vec``, that is, it should be equivalent to ``[task(v, args) for v in task_vec]``. """ if show_progressbar: progress_bar = HTMLProgressBar() else: progress_bar = None return parallel_map(task, task_vec, task_args=args, client=client, view=view, progress_bar=progress_bar, show_scheduling=show_scheduling) def parallel_map(task, values, task_args=None, task_kwargs=None, client=None, view=None, progress_bar=None, show_scheduling=False, **kwargs): """ Call the function ``task`` for each value in ``values`` using a cluster of IPython engines. The function ``task`` should have the signature ``task(value, *args, **kwargs)``. The ``client`` and ``view`` are the IPython.parallel client and load-balanced view that will be used in the parfor execution. If these are ``None``, new instances will be created. Parameters ---------- task: a Python function The function that is to be called for each value in ``task_vec``. values: array / list The list or array of values for which the ``task`` function is to be evaluated. task_args: list / dictionary The optional additional argument to the ``task`` function. task_kwargs: list / dictionary The optional additional keyword argument to the ``task`` function. client: IPython.parallel.Client The IPython.parallel Client instance that will be used in the parfor execution. view: a IPython.parallel.Client view The view that is to be used in scheduling the tasks on the IPython cluster. Preferably a load-balanced view, which is obtained from the IPython.parallel.Client instance client by calling, view = client.load_balanced_view(). show_scheduling: bool {False, True}, default False Display a graph showing how the tasks (the evaluation of ``task`` for for the value in ``task_vec1``) was scheduled on the IPython engine cluster. show_progressbar: bool {False, True}, default False Display a HTML-based progress bar during the execution of the parfor loop. Returns -------- result : list The result list contains the value of ``task(value, task_args, task_kwargs)`` for each value in ``values``. """ submitted = datetime.datetime.now() if task_args is None: task_args = tuple() if task_kwargs is None: task_kwargs = {} if client is None: client = Client() # make sure qutip is available at engines dview = client[:] dview.block = True dview.execute("from qutip import *") if view is None: view = client.load_balanced_view() ar_list = [view.apply_async(task, value, *task_args, **task_kwargs) for value in values] if progress_bar is None: view.wait(ar_list) else: if progress_bar is True: progress_bar = HTMLProgressBar() n = len(ar_list) progress_bar.start(n) while True: n_finished = sum([ar.progress for ar in ar_list]) progress_bar.update(n_finished) if view.wait(ar_list, timeout=0.5): progress_bar.update(n) break progress_bar.finished() if show_scheduling: metadata = [[ar.engine_id, (ar.started - submitted).total_seconds(), (ar.completed - submitted).total_seconds()] for ar in ar_list] _visualize_parfor_data(metadata) return [ar.get() for ar in ar_list] def plot_animation(plot_setup_func, plot_func, result, name="movie", writer="avconv", codec="libx264", verbose=False): """ Create an animated plot of a Result object, as returned by one of the qutip evolution solvers. .. note :: experimental """ fig, axes = plot_setup_func(result) def update(n): return plot_func(result, n, fig=fig, axes=axes) anim = animation.FuncAnimation( fig, update, frames=len(result.times), blit=True) anim.save(name + '.mp4', fps=10, writer=writer, codec=codec) plt.close(fig) if verbose: print("Created %s.m4v" % name) video = open(name + '.mp4', "rb").read() video_encoded = b64encode(video).decode("ascii") video_tag = '