Skip to content

Commit

Permalink
Absorb ITensors.ITensorMPS and ITensorTDVP.jl (#31)
Browse files Browse the repository at this point in the history
  • Loading branch information
mtfishman authored Oct 25, 2024
1 parent df0bab2 commit a2e3cad
Show file tree
Hide file tree
Showing 150 changed files with 19,414 additions and 266 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,5 @@ Manifest.toml

# Vi/Vim backup files
.*.swp

.DS_Store
48 changes: 42 additions & 6 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,17 +1,53 @@
name = "ITensorMPS"
uuid = "0d1a4710-d33b-49a5-8f18-73bdf49b47e2"
authors = ["Matthew Fishman <[email protected]>", "Miles Stoudenmire <[email protected]>"]
version = "0.2.6"
version = "0.3.0"

[deps]
ITensorTDVP = "25707e16-a4db-4a07-99d9-4d67b7af0342"
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
IsApprox = "28f27b66-4bd8-47e7-9110-e2746eb8bed7"
KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SerializedElementArrays = "d3ce8812-9567-47e9-a7b5-65a6d70a3065"
TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6"

[weakdeps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0"
PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d"
ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444"

[extensions]
ITensorMPSChainRulesCoreExt = "ChainRulesCore"
ITensorMPSHDF5Ext = "HDF5"
ITensorMPSObserversExt = "Observers"
ITensorMPSPackageCompilerExt = "PackageCompiler"
ITensorMPSZygoteRulesExt = ["ChainRulesCore", "ZygoteRules"]

[compat]
ITensorTDVP = "0.4.1"
ITensors = "0.6.7"
Reexport = "1"
Adapt = "4.1.0"
ChainRulesCore = "1.10"
Compat = "4.16.0"
HDF5 = "0.14, 0.15, 0.16, 0.17"
ITensors = "0.7"
IsApprox = "2.0.0"
KrylovKit = "0.8.1"
LinearAlgebra = "1.10"
NDTensors = "0.3.46"
Observers = "0.2"
PackageCompiler = "1, 2"
Printf = "1.10"
Random = "1.10"
SerializedElementArrays = "0.1.0"
Test = "1.10"
TupleTools = "1.5.0"
ZygoteRules = "0.2.2"
julia = "1.10"

[extras]
Expand Down
16 changes: 7 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,22 +1,20 @@
# ITensorMPS.jl

[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://itensor.github.io/ITensorMPS.jl/stable/)
[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://itensor.github.io/ITensorMPS.jl/dev/)
[![Docs](https://img.shields.io/badge/docs-latest-blue.svg)](https://itensor.github.io/ITensors.jl/dev/)
[![Build Status](https://github.com/ITensor/ITensorMPS.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/ITensor/ITensorMPS.jl/actions/workflows/CI.yml?query=branch%3Amain)
[![Coverage](https://codecov.io/gh/ITensor/ITensorMPS.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/ITensor/ITensorMPS.jl)
[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle)

Finite MPS and MPO methods based on ITensor (ITensors.jl).
Finite MPS and MPO methods based on the Julia version of [ITensor](https://www.itensor.org) ([ITensors.jl](https://github.com/ITensor/ITensors.jl)). See the [ITensors.jl documentation](https://itensor.github.io/ITensors.jl/dev/) for more details.

This package currently re-exports the MPS and MPO functionality of the [ITensors.jl](https://github.com/ITensor/ITensors.jl), including functionality like DMRG, applying MPO to MPS, applying gates to MPS and MPO, etc. See the [ITensor documentation](https://itensor.github.io/ITensors.jl/dev) for guides and examples on using this package.
## News

Additionally, it re-exports the functionality of the [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl) package, which provides other DMRG-like MPS solvers such as TDVP and MPS linear equation solving.
### ITensorMPS.jl v0.3 release notes

## Upgrade guide
All MPS/MPO code from [ITensors.jl](https://github.com/ITensor/ITensors.jl) and [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl) has been moved into this repository and this repository now relies on ITensors.jl v0.7 and above. All of the MPS/MPO functionality that was previously in ITensors.jl and ITensorTDVP.jl will be developed here from now on. For users of this repository, this change should not break any code, though please let us know if you have any issues.

The goal will be to move the MPS and MPO code from the ITensors.jl package, along with all of the code from the [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl) package, into this repository. If you are using any MPS/MPO functionality of ITensors.jl, such as the `MPS` and `MPO` types or constructors thereof (like `randomMPS`), `OpSum`, `siteinds`, `dmrg`, `apply`, etc. you should install the ITensorMPS.jl package with `import Pkg; Pkg.add("ITensorMPS")` and add `using ITensorMPS` to your code. Additionally, if you are currently using [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl), you should replace `using ITensorTDVP` with `using ITensorMPS` in your codes.
#### Upgrade guide

## News
If you are using any MPS/MPO functionality of ITensors.jl, such as the `MPS` and `MPO` types or constructors thereof (like `random_mps`), `OpSum`, `siteinds`, `dmrg`, `apply`, etc. you should install the ITensorMPS.jl package with `import Pkg; Pkg.add("ITensorMPS")` and add `using ITensorMPS` to your code. Additionally, if you are currently using [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl), you should replace `using ITensorTDVP` with `using ITensorMPS` in your code.

### ITensorMPS.jl v0.2.1 release notes

Expand Down
52 changes: 52 additions & 0 deletions examples/autodiff/circuit_optimization/op.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
using ITensors, ITensorMPS
using Zygote

s = siteind("Qubit")

f(x) = op("Ry", s; θ=x)[1, 1]

x = 0.2
@show f(x), cos(x / 2)
@show f'(x), -sin(x / 2) / 2

# Simple gate optimization
ψ0 = state(s, "0")
ψp = state(s, "+")

function loss(x)
U = op("Ry", s; θ=x)
Uψ0 = replaceprime(U * ψ0, 1 => 0)
return -(dag(ψp) * Uψ0)[]
end

# Extremely simple gradient descent implementation,
# where gradients are computing with automatic differentiation
# using Zygote.
function gradient_descent(f, x0; γ, nsteps, grad_tol)
@show γ, nsteps
x = x0
f_x = f(x)
∇f_x = f'(x)
step = 0
@show step, x, f_x, ∇f_x
for step in 1:nsteps
x -= γ * ∇f_x
f_x = f(x)
∇f_x = f'(x)
@show step, x, f_x, ∇f_x
if norm(∇f_x) grad_tol
break
end
end
return x, f_x, ∇f_x
end

x0 = 0
γ = 2.0 # Learning rate
nsteps = 30 # Number of steps of gradient descent
grad_tol = 1e-4 # Stop if gradient falls below this value
x, loss_x, ∇loss_x = gradient_descent(loss, x0; γ=γ, nsteps=nsteps, grad_tol=grad_tol)

@show x0, loss(x0)
@show x, loss(x)
@show π / 2, loss/ 2)
65 changes: 65 additions & 0 deletions examples/autodiff/circuit_optimization/state_preparation.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
using ITensors, ITensorMPS
using OptimKit
using Random
using Zygote

nsites = 20 # Number of sites
nlayers = 3 # Layers of gates in the ansatz
gradtol = 1e-4 # Tolerance for stopping gradient descent

# A layer of the circuit we want to optimize
function layer(nsites, θ⃗)
RY_layer = [("Ry", (n,), (θ=θ⃗[n],)) for n in 1:nsites]
CX_layer = [("CX", (n, n + 1)) for n in 1:2:(nsites - 1)]
return [RY_layer; CX_layer]
end

# The variational circuit we want to optimize
function variational_circuit(nsites, nlayers, θ⃗)
range = 1:nsites
circuit = layer(nsites, θ⃗[range])
for n in 1:(nlayers - 1)
circuit = [circuit; layer(nsites, θ⃗[range .+ n * nsites])]
end
return circuit
end

Random.seed!(1234)

θ⃗ᵗᵃʳᵍᵉᵗ = 2π * rand(nsites * nlayers)
𝒰ᵗᵃʳᵍᵉᵗ = variational_circuit(nsites, nlayers, θ⃗ᵗᵃʳᵍᵉᵗ)

s = siteinds("Qubit", nsites)
Uᵗᵃʳᵍᵉᵗ = ops(𝒰ᵗᵃʳᵍᵉᵗ, s)

ψ0 = MPS(s, "0")

# Create the random target state
ψᵗᵃʳᵍᵉᵗ = apply(Uᵗᵃʳᵍᵉᵗ, ψ0; cutoff=1e-8)

#
# The loss function, a function of the gate parameters
# and implicitly depending on the target state:
#
# loss(θ⃗) = -|⟨θ⃗ᵗᵃʳᵍᵉᵗ|U(θ⃗)|0⟩|² = -|⟨θ⃗ᵗᵃʳᵍᵉᵗ|θ⃗⟩|²
#
function loss(θ⃗)
nsites = length(ψ0)
s = siteinds(ψ0)
𝒰θ⃗ = variational_circuit(nsites, nlayers, θ⃗)
Uθ⃗ = ops(𝒰θ⃗, s)
ψθ⃗ = apply(Uθ⃗, ψ0)
return -abs(inner(ψᵗᵃʳᵍᵉᵗ, ψθ⃗))^2
end

θ⃗₀ = randn!(copy(θ⃗ᵗᵃʳᵍᵉᵗ))

@show loss(θ⃗₀), loss(θ⃗ᵗᵃʳᵍᵉᵗ)

loss_∇loss(x) = (loss(x), convert(Vector, loss'(x)))
algorithm = LBFGS(; gradtol=gradtol, verbosity=2)
θ⃗ₒₚₜ, lossₒₚₜ, ∇lossₒₚₜ, numfg, normgradhistory = optimize(loss_∇loss, θ⃗₀, algorithm)

@show loss(θ⃗ₒₚₜ), loss(θ⃗ᵗᵃʳᵍᵉᵗ)

nothing
81 changes: 81 additions & 0 deletions examples/autodiff/circuit_optimization/vqe.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
using ITensors, ITensorMPS
using OptimKit
using Random
using Zygote

nsites = 4 # Number of sites
nlayers = 2 # Layers of gates in the ansatz
gradtol = 1e-4 # Tolerance for stopping gradient descent

# The Hamiltonian we are minimizing
function ising_hamiltonian(nsites; h)
= OpSum()
for j in 1:(nsites - 1)
-= 1, "Z", j, "Z", j + 1
end
for j in 1:nsites
+= h, "X", j
end
return
end

# A layer of the circuit we want to optimize
function layer(nsites, θ⃗)
RY_layer = [("Ry", (n,), (θ=θ⃗[n],)) for n in 1:nsites]
CX_layer = [("CX", (n, n + 1)) for n in 1:2:(nsites - 1)]
return [RY_layer; CX_layer]
end

# The variational circuit we want to optimize
function variational_circuit(nsites, nlayers, θ⃗)
range = 1:nsites
circuit = layer(nsites, θ⃗[range])
for n in 1:(nlayers - 1)
circuit = [circuit; layer(nsites, θ⃗[range .+ n * nsites])]
end
return circuit
end

s = siteinds("Qubit", nsites)

h = 1.3
= ising_hamiltonian(nsites; h=h)
H = MPO(ℋ, s)
ψ0 = MPS(s, "0")

#
# The loss function, a function of the gate parameters
# and implicitly depending on the Hamiltonian and state:
#
# loss(θ⃗) = ⟨0|U(θ⃗)† H U(θ⃗)|0⟩ = ⟨θ⃗|H|θ⃗⟩
#
function loss(θ⃗)
nsites = length(ψ0)
s = siteinds(ψ0)
𝒰θ⃗ = variational_circuit(nsites, nlayers, θ⃗)
Uθ⃗ = ops(𝒰θ⃗, s)
ψθ⃗ = apply(Uθ⃗, ψ0; cutoff=1e-8)
return inner(ψθ⃗, H, ψθ⃗; cutoff=1e-8)
end

Random.seed!(1234)
θ⃗₀ = 2π * rand(nsites * nlayers)

@show loss(θ⃗₀)

println("\nOptimize circuit with gradient optimization")

loss_∇loss(x) = (loss(x), convert(Vector, loss'(x)))
algorithm = LBFGS(; gradtol=1e-3, verbosity=2)
θ⃗ₒₚₜ, lossₒₚₜ, ∇lossₒₚₜ, numfg, normgradhistory = optimize(loss_∇loss, θ⃗₀, algorithm)

@show loss(θ⃗ₒₚₜ)

println("\nRun DMRG as a comparison")

e_dmrg, ψ_dmrg = dmrg(H, ψ0; nsweeps=5, maxdim=10)

println("\nCompare variational circuit energy to DMRG energy")
@show loss(θ⃗ₒₚₜ), e_dmrg

nothing
49 changes: 49 additions & 0 deletions examples/autodiff/mps_autodiff.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
using ITensors, ITensorMPS
using OptimKit
using Zygote

function ising(n; J, h)
os = OpSum()
for j in 1:(n - 1)
os -= J, "Z", j, "Z", j + 1
end
for j in 1:n
os -= h, "X", j
end
return os
end

function loss(H, ψ)
n = length(ψ)
ψHψ = ITensor(1.0)
ψψ = ITensor(1.0)
for j in 1:n
ψHψ = ψHψ * dag(ψ[j]') * H[j] * ψ[j]
ψψ = ψψ * replaceinds(dag(ψ[j]'), s[j]' => s[j]) * ψ[j]
end
return ψHψ[] / ψψ[]
end

n = 10
s = siteinds("S=1/2", n)
J = 1.0
h = 0.5

# Loss function only works with `Vector{ITensor}`,
# extract with `ITensors.data`.
ψ0 = ITensors.data(random_mps(s; linkdims=10))
H = ITensors.data(MPO(ising(n; J, h), s))

loss(ψ) = loss(H, ψ)

optimizer = LBFGS(; maxiter=25, verbosity=2)
function loss_and_grad(x)
y, (∇,) = withgradient(loss, x)
return y, ∇
end
ψ, fs, gs, niter, normgradhistory = optimize(loss_and_grad, ψ0, optimizer)
Edmrg, ψdmrg = dmrg(MPO(H), MPS(ψ0); nsweeps=10, cutoff=1e-8)

@show loss(ψ0), norm(loss'(ψ0))
@show loss(ψ), norm(loss'(ψ))
@show loss(ITensors.data(ψdmrg)), norm(loss'(ITensors.data(ψdmrg)))
Loading

0 comments on commit a2e3cad

Please sign in to comment.