Skip to content

Commit

Permalink
Merge pull request #77 from adtzlr/add-gvp-hvp-methods
Browse files Browse the repository at this point in the history
Add gradient- and hessian-vector-product methods
  • Loading branch information
adtzlr authored Jan 10, 2022
2 parents 1090bcc + 036193b commit b4f1fd3
Show file tree
Hide file tree
Showing 8 changed files with 145 additions and 15 deletions.
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,17 @@ P = Mat.gradient([defgrad])[0]
A = Mat.hessian([defgrad])[0]
```

In a similar way, gradient-vector-products and hessian-vector-products are accesible via **gradient_vector_product** and **hessian_vector_product** methods, respectively.

```python
v = np.random.rand(3, 3, 5, 100) - 0.5
u = np.random.rand(3, 3, 5, 100) - 0.5

W = Mat.function([defgrad])[0]
dW = Mat.gradient_vector_product([defgrad], [v])[0]
DdW = Mat.hessian_vector_product([defgrad], [v], [u])[0]
```

## Template classes for hyperelasticity
matADi provides several template classes suitable for hyperelastic materials. Some isotropic hyperelastic material formulations are located in `matadi.models` (see list below). These strain energy functions have to be passed as the `fun` argument into an instance of `MaterialHyperelastic`. Usage is exactly the same as described above. To convert a hyperelastic material based on the deformation gradient into a mixed three-field formulation suitable for nearly-incompressible behavior (*displacements*, *pressure* and *volume ratio*) an instance of a `MaterialHyperelastic` class has to be passed to `ThreeFieldVariation`. For *plane strain* or *plane stress* use `MaterialHyperelasticPlaneStrain`, `MaterialHyperelasticPlaneStressIncompressible` or `MaterialHyperelasticPlaneStressLinearElastic` instead of `MaterialHyperelastic`. For plane strain *displacements*, *pressure* and *volume ratio* mixed-field formulations use `ThreeFieldVariationPlaneStrain`.

Expand Down
2 changes: 1 addition & 1 deletion matadi/__about__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.0.27"
__version__ = "0.0.28"
82 changes: 71 additions & 11 deletions matadi/_material.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import casadi as ca

from ._apply import apply
from . import Variable


class Function:
Expand Down Expand Up @@ -72,31 +73,53 @@ def __init__(self, x, fun, args=(), kwargs={}, compress=False):
super().__init__(x=x, fun=fun, args=args, kwargs=kwargs)

_h_diag = []
_hvp_diag = []

self._h = []
self._g = []

self._hvp = []
self._gvp = []

# generate vectors for gradient- and hessian-vector products
self.v = [Variable("v%d" % a, *x.shape) for a, x in enumerate(self.x)]
self.u = [Variable("u%d" % a, *x.shape) for a, x in enumerate(self.x)]

# alias
self.jacobian = self.gradient

# generate list of diagonal hessian entries and gradients
for y in self.x:
_hy, _gy = ca.hessian(self._f, y)
_h_diag.append(_hy)
self._g.append(_gy)

# generate upper-triangle of hessian
for i, g in enumerate(self._g):
for j, y in enumerate(self.x):
# (including vector-products)
for x, v, u in zip(self.x, self.v, self.u):
_h, _g = ca.hessian(self._f, x)
_h_diag.append(_h)
self._g.append(_g)

_gvp = ca.jtimes(self._f, x, v)
_hvp = ca.jtimes(_gvp, x, u)
self._gvp.append(_gvp)
_hvp_diag.append(_hvp)

# generate upper-triangle of hessian (-vector-products)
for i, (g, gvp) in enumerate(zip(self._g, self._gvp)):
for j, (x, u) in enumerate(zip(self.x, self.u)):
if j >= i:
if i != j:
self._h.append(ca.jacobian(g, y))
self._h.append(ca.jacobian(g, x))
self._hvp.append(ca.jtimes(gvp, x, u))
else:
self._h.append(_h_diag[i])
self._hvp.append(_hvp_diag[i])

# generate casADi function objects
self._gradient = ca.Function("g", self.x, self._g)
self._hessian = ca.Function("h", self.x, self._h)
self._gradient_vector_product = ca.Function(
"gvp", [*self.x, *self.v], self._gvp
)
self._hessian_vector_product = ca.Function(
"hvp", [*self.x, *self.v, *self.u], self._hvp
)

# generate indices
self._idx_hessian = []
Expand Down Expand Up @@ -135,22 +158,49 @@ def hessian(self, x, threads=cpu_count()):
threads=threads,
)

def gradient_vector_product(self, x, v, threads=cpu_count()):
"Return list of gradient-vector-products."
return apply(
[*x, *v],
fun=self._gradient_vector_product,
x_shape=self._idx_gradient,
fun_shape=self._idx_function * len(self._gvp),
threads=threads,
)

def hessian_vector_product(self, x, v, u, threads=cpu_count()):
"Return list of hessian-vector-products."
return apply(
[*x, *v, *u],
fun=self._hessian_vector_product,
x_shape=self._idx_gradient,
fun_shape=self._idx_function * len(self._hvp),
threads=threads,
)


class MaterialTensor(FunctionTensor):
def __init__(self, x, fun, args=(), kwargs={}, compress=False):

# init Function
super().__init__(x=x, fun=fun, args=args, kwargs=kwargs)

# generate gradients
self._g = [ca.jacobian(self._f, y) for y in self.x]
# generate vector for gradient-vector-product
self.v = [Variable("v%d" % a, *x.shape) for a, x in enumerate(self.x)]

# generate gradient and gradient-vector-product
self._g = [ca.jacobian(self._f, x) for x in self.x]
self._gvp = [ca.jtimes(self._f, x, v) for x, v in zip(self.x, self.v)]

# alias
self.jacobian = self.gradient

# generate casADi function objects
self._function = ca.Function("f", self.x, [self._f])
self._gradient = ca.Function("g", self.x, self._g)
self._gradient_vector_product = ca.Function(
"gvp", [*self.x, *self.v], self._gvp
)

# generate indices
self._idx_gradient = []
Expand All @@ -177,3 +227,13 @@ def gradient(self, x, threads=cpu_count()):
fun_shape=self._idx_gradient,
threads=threads,
)

def gradient_vector_product(self, x, threads=cpu_count()):
"Return list of gradient-vector-products."
return apply(
x,
fun=self._gradient_vector_product,
x_shape=self._idx_function,
fun_shape=self._idx_function * len(self._gvp),
threads=threads,
)
8 changes: 8 additions & 0 deletions matadi/_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ def __init__(self, material):
self.function = self.W.function
self.gradient = self.W.gradient
self.hessian = self.W.hessian
self.gradient_vector_product = self.W.gradient_vector_product
self.hessian_vector_product = self.W.hessian_vector_product

def _fun(self, x):
F, p, J = x
Expand All @@ -34,6 +36,8 @@ def __init__(self, material):
self.function = self.W.function
self.gradient = self.W.gradient
self.hessian = self.W.hessian
self.gradient_vector_product = self.W.gradient_vector_product
self.hessian_vector_product = self.W.hessian_vector_product

def _fun(self, x):
F, p, J = x
Expand All @@ -54,6 +58,8 @@ def __init__(self, fun, **kwargs):
self.function = self.W.function
self.gradient = self.W.gradient
self.hessian = self.W.hessian
self.gradient_vector_product = self.W.gradient_vector_product
self.hessian_vector_product = self.W.hessian_vector_product

def _fun_wrapper(self, x, **kwargs):
return self.fun(x[0], **kwargs)
Expand All @@ -69,6 +75,8 @@ def __init__(self, fun, **kwargs):
self.function = self.W.function
self.gradient = self.W.gradient
self.hessian = self.W.hessian
self.gradient_vector_product = self.W.gradient_vector_product
self.hessian_vector_product = self.W.hessian_vector_product

def _fun_wrapper(self, x, **kwargs):
F = horzcat(vertcat(x[0], zeros(1, 2)), zeros(3, 1))
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "matadi"
version = "0.0.27"
version = "0.0.28"
description = "Material Definition with Automatic Differentiation"
readme = "README.md"
requires-python = ">=3.6"
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = matadi
version = 0.0.27
version = 0.0.28
author = Andreas Dutzler
author_email = [email protected]
description = Material Definition with Automatic Differentiation
Expand Down
5 changes: 5 additions & 0 deletions tests/test_eigvals_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ def test_cof():

# data
FF = np.diag(2.4 * np.ones(3)).reshape(3, 3, 1, 1)
dF = np.diag(0.2 * np.ones(3)).reshape(3, 3, 1, 1)

# fun
def g(x):
Expand All @@ -105,6 +106,10 @@ def g(x):
dW = W.gradient([FF])
DW = W.jacobian([FF])

dW_gvp = W.gradient_vector_product([FF, dF])

assert np.allclose(np.einsum("ij...,ij...->...", dW[0], dF), dW_gvp)

assert np.allclose(dW, DW)
assert np.allclose(dW[0][:, :, :, :, 0, 0], Eye4)

Expand Down
48 changes: 47 additions & 1 deletion tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,19 @@ def test_models():

# data
np.random.seed(2345537)
FF = np.random.rand(3, 3, 5, 100)
dF = np.random.rand(3, 3, 5, 100) - 0.5
DF = np.random.rand(3, 3, 5, 100) - 0.5
FF = np.random.rand(3, 3, 5, 100) - 0.5
for a in range(3):
FF[a, a] += 1

pp = np.random.rand(5, 100)
dp = np.random.rand(5, 100)
Dp = np.random.rand(5, 100)

JJ = 1 + np.random.rand(5, 100) / 10
dJ = np.random.rand(5, 100) / 10
DJ = np.random.rand(5, 100) / 10

lib = library()

Expand All @@ -92,10 +99,19 @@ def test_models():
P = HM.gradient([FF], threads=1)
A = HM.hessian([FF])

dW = HM.gradient_vector_product([FF], [dF])
DW = HM.hessian_vector_product([FF], [dF], [DF])

assert len(W) == 1
assert len(P) == 1
assert len(A) == 1

dW_check = np.einsum("ij...,ij...->...", P[0], dF)
DW_check = np.einsum("ij...,ijkl...,kl...->...", dF, A[0], DF)

assert np.allclose(dW_check, dW[0])
assert np.allclose(DW_check, DW[0])

W = HM_mixed.function([FF, pp, JJ])
P = HM_mixed.gradient([FF, pp, JJ])
A = HM_mixed.hessian([FF, pp, JJ])
Expand All @@ -104,6 +120,36 @@ def test_models():
assert len(P) == 3
assert len(A) == 6

dW = HM_mixed.gradient_vector_product([FF, pp, JJ], [dF, dp, dJ])
DW = HM_mixed.hessian_vector_product([FF, pp, JJ], [dF, dp, dJ], [DF, Dp, DJ])

dWF = np.einsum("ij...,ij...->...", P[0], dF)
dWp = P[1] * dp
dWJ = P[2] * dJ

assert np.allclose(dWF, dW[0])
assert np.allclose(dWp, dW[1])
assert np.allclose(dWJ, dW[2])

dWdFdF = np.einsum("ij...,ijkl...,kl...->...", dF, A[0], DF)
dWdpdp = dp * A[3][0, 0, 0, 0] * Dp
dWdJdJ = dJ * A[5][0, 0, 0, 0] * DJ

assert np.allclose(dWdFdF, DW[0])
assert np.allclose(dWdpdp, DW[3])
assert np.allclose(dWdJdJ, DW[5])

dWdFdp = np.einsum("ij...,ij...,...->...", dF, A[1][:, :, 0, 0], Dp)
dWdFdJ = np.einsum("ij...,ij...,...->...", dF, A[2][:, :, 0, 0], DJ)
dWdpdJ = dp * A[4][0, 0, 0, 0] * DJ

assert np.allclose(dWdFdF, DW[0])
assert np.allclose(dWdFdp, DW[1])
assert np.allclose(dWdFdJ, DW[2])
assert np.allclose(dWdpdp, DW[3])
assert np.allclose(dWdpdJ, DW[4])
assert np.allclose(dWdJdJ, DW[5])

nh = matadi.MaterialHyperelastic(md.neo_hooke, **lib[md.neo_hooke])
mr = matadi.MaterialHyperelastic(md.mooney_rivlin, **lib[md.mooney_rivlin])

Expand Down

0 comments on commit b4f1fd3

Please sign in to comment.