Skip to content

Commit

Permalink
improve cotengra benchmark example
Browse files Browse the repository at this point in the history
  • Loading branch information
refraction-ray committed Jun 3, 2024
1 parent 9827b39 commit ed378cb
Showing 1 changed file with 14 additions and 17 deletions.
31 changes: 14 additions & 17 deletions examples/cotengra_setting_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,7 @@ def loss_f(params, n, nlayers):

c = generate_circuit(params, graph, n, nlayers)

# calculate the loss function, max cut
loss = 0.0
for e in graph.edges:
loss += c.expectation_ps(z=[e[0], e[1]])
loss = c.expectation_ps(z=[0, 1, 2], reuse=False)

return K.real(loss)

Expand All @@ -65,8 +62,8 @@ def loss_f(params, n, nlayers):


# define the benchmark parameters
n = 10
nlayers = 15
n = 12
nlayers = 12

# define the cotengra optimizer parameters
graph_args = {
Expand All @@ -84,7 +81,7 @@ def loss_f(params, n, nlayers):
methods_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#drivers
"greedy",
"kahypar",
"labels",
# "labels",
# "spinglass", # requires igraph
# "labelprop", # requires igraph
# "betweenness", # requires igraph
Expand All @@ -94,26 +91,26 @@ def loss_f(params, n, nlayers):
]

optlib_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#optimization-library
# "optuna", # pip install optuna
"random", # default when no library is installed
"optuna", # pip install optuna
# "random", # default when no library is installed
# "baytune", # pip install baytune
"nevergrad", # pip install nevergrad
# "nevergrad", # pip install nevergrad
# "chocolate", # pip install git+https://github.com/AIworx-Labs/chocolate@master
# "skopt", # pip install scikit-optimize
]

post_processing_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#slicing-and-subtree-reconfiguration
(None, None),
("slicing_opts", {"target_size": 2**28}),
("slicing_reconf_opts", {"target_size": 2**28}),
# ("slicing_opts", {"target_size": 2**28}),
# ("slicing_reconf_opts", {"target_size": 2**28}),
("reconf_opts", {}),
("simulated_annealing_opts", {}),
]

minimize_args = [ # https://cotengra.readthedocs.io/en/main/advanced.html#objective
"flops", # minimize the total number of scalar operations
"size", # minimize the size of the largest intermediate tensor
"write", # minimize the sum of sizes of all intermediate tensors
# "flops", # minimize the total number of scalar operations
# "size", # minimize the size of the largest intermediate tensor
# "write", # minimize the sum of sizes of all intermediate tensors
"combo", # minimize the sum of FLOPS + α * WRITE where α is 64
]

Expand All @@ -125,7 +122,7 @@ def get_optimizer(method, optlib, post_processing, minimize):
optlib=optlib,
minimize=minimize,
parallel=True,
max_time=30,
max_time=60,
max_repeats=128,
progbar=True,
)
Expand All @@ -135,7 +132,7 @@ def get_optimizer(method, optlib, post_processing, minimize):
optlib=optlib,
minimize=minimize,
parallel=True,
max_time=30,
max_time=60,
max_repeats=128,
progbar=True,
**{post_processing[0]: post_processing[1]},
Expand Down

0 comments on commit ed378cb

Please sign in to comment.