Skip to content

Commit

Permalink
Merge pull request #1 from jejjohnson/fd-methods
Browse files Browse the repository at this point in the history
Added Forward, Backward and Central Difference Option.
  • Loading branch information
ASEM000 authored Mar 20, 2023
2 parents 5ebbe3b + e817320 commit b60ec9a
Show file tree
Hide file tree
Showing 5 changed files with 303 additions and 111 deletions.
85 changes: 43 additions & 42 deletions FiniteDiffX Examples.ipynb
Original file line number Diff line number Diff line change
@@ -1,41 +1,25 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyPb3/ZgzpaTTXlbaGk5dBZL",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
"colab_type": "text",
"id": "view-in-github"
},
"source": [
"<a href=\"https://colab.research.google.com/github/ASEM000/FiniteDiffX/blob/main/FiniteDiffX%20Examples.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"source": [
"!pip install finitediffx --quiet"
],
"execution_count": 1,
"metadata": {
"id": "VseGGGblAX5R"
},
"execution_count": 3,
"outputs": []
"outputs": [],
"source": [
"!pip install finitediffx --quiet"
]
},
{
"cell_type": "code",
Expand All @@ -47,15 +31,7 @@
"id": "sAhEeGluAWxR",
"outputId": "08ab2c66-e1bc-4ed1-a9cf-cba3dcb94b68"
},
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"WARNING:jax._src.lib.xla_bridge:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"
]
}
],
"outputs": [],
"source": [
"\n",
"import jax\n",
Expand Down Expand Up @@ -83,37 +59,37 @@
"F = jnp.stack([F1, F2, F3], axis=0)\n",
"\n",
"# ∂F1/∂x : differentiate F1 with respect to x (i.e axis=0)\n",
"dF1dx = fdx.difference(F1, axis=0, step_size=dx, accuracy=6)\n",
"dF1dx = fdx.difference(F1, axis=0, step_size=dx, accuracy=6, method=\"central\")\n",
"dF1dx_exact = 2 * X\n",
"npt.assert_allclose(dF1dx, dF1dx_exact, atol=1e-7)\n",
"\n",
"# ∂F2/∂y : differentiate F2 with respect to y (i.e axis=1)\n",
"dF2dy = fdx.difference(F2, axis=1, step_size=dy, accuracy=6)\n",
"dF2dy = fdx.difference(F2, axis=1, step_size=dy, accuracy=6, method=\"central\")\n",
"dF2dy_exact = 3 * Y**2\n",
"npt.assert_allclose(dF2dy, dF2dy_exact, atol=1e-7)\n",
"\n",
"# ∇.F : the divergence of F\n",
"divF = fdx.divergence(F, step_size=(dx, dy, dz), keepdims=False, accuracy=6)\n",
"divF = fdx.divergence(F, step_size=(dx, dy, dz), keepdims=False, accuracy=6, method=\"central\")\n",
"divF_exact = 2 * X + 3 * Y**2\n",
"npt.assert_allclose(divF, divF_exact, atol=1e-7)\n",
"\n",
"# ∇F1 : the gradient of F1\n",
"gradF1 = fdx.gradient(F1, step_size=(dx, dy, dz), accuracy=6)\n",
"gradF1 = fdx.gradient(F1, step_size=(dx, dy, dz), accuracy=6, method=\"central\")\n",
"gradF1_exact = jnp.stack([2 * X, 3 * Y**2, 0 * X], axis=0)\n",
"npt.assert_allclose(gradF1, gradF1_exact, atol=1e-7)\n",
"\n",
"# ΔF1 : laplacian of F1\n",
"lapF1 = fdx.laplacian(F1, step_size=(dx, dy, dz), accuracy=6)\n",
"lapF1 = fdx.laplacian(F1, step_size=(dx, dy, dz), accuracy=6, method=\"central\")\n",
"lapF1_exact = 2 + 6 * Y\n",
"npt.assert_allclose(lapF1, lapF1_exact, atol=1e-7)\n",
"\n",
"# ∇xF : the curl of F\n",
"curlF = fdx.curl(F, step_size=(dx, dy, dz), accuracy=6)\n",
"curlF = fdx.curl(F, step_size=(dx, dy, dz), accuracy=6, method=\"central\")\n",
"curlF_exact = jnp.stack([F1 * 0, F1 * 0, 4 * X**3 - 3 * Y**2], axis=0)\n",
"npt.assert_allclose(curlF, curlF_exact, atol=1e-7)\n",
"\n",
"# Jacobian of F\n",
"JF = fdx.jacobian(F, accuracy=4, step_size=(dx, dy, dz))\n",
"JF = fdx.jacobian(F, accuracy=4, step_size=(dx, dy, dz), method=\"central\")\n",
"JF_exact = jnp.array(\n",
" [\n",
" [2 * X, 3 * Y**2, jnp.zeros_like(X)],\n",
Expand All @@ -124,7 +100,7 @@
"npt.assert_allclose(JF, JF_exact, atol=1e-7)\n",
"\n",
"# Hessian of F1\n",
"HF1 = fdx.hessian(F1, accuracy=4, step_size=(dx, dy, dz))\n",
"HF1 = fdx.hessian(F1, accuracy=4, step_size=(dx, dy, dz), method=\"central\")\n",
"HF1_exact = jnp.array(\n",
" [\n",
" [\n",
Expand All @@ -150,5 +126,30 @@
"\n"
]
}
]
}
],
"metadata": {
"colab": {
"authorship_tag": "ABX9TyPb3/ZgzpaTTXlbaGk5dBZL",
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.0"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
28 changes: 14 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@
Differentiable finite difference tools in `jax`
Implements :

- `difference(array, axis, accuracy, step_size, derivative)`
- `gradient(array, accuracy, step_size)`
- `jacobian(array, accuracy, step_size)`
- `divergence(array, accuracy, step_size, keepdims)`
- `hessian(array, accuracy, step_size)`
- `laplacian(array, accuracy, step_size)`
- `curl(array, step_size, keep_dims)`
- `difference(array, axis, accuracy, step_size, method, derivative)`
- `gradient(array, accuracy, method, step_size)`
- `jacobian(array, accuracy, method, step_size)`
- `divergence(array, accuracy, step_size, method, keepdims)`
- `hessian(array, accuracy, method, step_size)`
- `laplacian(array, accuracy, method, step_size)`
- `curl(array, step_size, method, keep_dims)`

## 🛠️ Installation<a id="Installation"></a>

Expand Down Expand Up @@ -69,7 +69,7 @@ F3 = jnp.zeros_like(F1)
F = jnp.stack([F1, F2, F3], axis=0)

# ∂F1/∂x : differentiate F1 with respect to x (i.e axis=0)
dF1dx = fdx.difference(F1, axis=0, step_size=dx, accuracy=6)
dF1dx = fdx.difference(F1, axis=0, step_size=dx, accuracy=6, method="central")
dF1dx_exact = 2 * X
npt.assert_allclose(dF1dx, dF1dx_exact, atol=1e-7)

Expand All @@ -79,27 +79,27 @@ dF2dy_exact = 3 * Y**2
npt.assert_allclose(dF2dy, dF2dy_exact, atol=1e-7)

# ∇.F : the divergence of F
divF = fdx.divergence(F, step_size=(dx, dy, dz), keepdims=False, accuracy=6)
divF = fdx.divergence(F, step_size=(dx, dy, dz), keepdims=False, accuracy=6, method="central")
divF_exact = 2 * X + 3 * Y**2
npt.assert_allclose(divF, divF_exact, atol=1e-7)

# ∇F1 : the gradient of F1
gradF1 = fdx.gradient(F1, step_size=(dx, dy, dz), accuracy=6)
gradF1 = fdx.gradient(F1, step_size=(dx, dy, dz), accuracy=6, method="central")
gradF1_exact = jnp.stack([2 * X, 3 * Y**2, 0 * X], axis=0)
npt.assert_allclose(gradF1, gradF1_exact, atol=1e-7)

# ΔF1 : laplacian of F1
lapF1 = fdx.laplacian(F1, step_size=(dx, dy, dz), accuracy=6)
lapF1 = fdx.laplacian(F1, step_size=(dx, dy, dz), accuracy=6, method="central")
lapF1_exact = 2 + 6 * Y
npt.assert_allclose(lapF1, lapF1_exact, atol=1e-7)

# ∇xF : the curl of F
curlF = fdx.curl(F, step_size=(dx, dy, dz), accuracy=6)
curlF = fdx.curl(F, step_size=(dx, dy, dz), accuracy=6, method="central")
curlF_exact = jnp.stack([F1 * 0, F1 * 0, 4 * X**3 - 3 * Y**2], axis=0)
npt.assert_allclose(curlF, curlF_exact, atol=1e-7)

# Jacobian of F
JF = fdx.jacobian(F, accuracy=4, step_size=(dx, dy, dz))
JF = fdx.jacobian(F, accuracy=4, step_size=(dx, dy, dz), method="central")
JF_exact = jnp.array(
[
[2 * X, 3 * Y**2, jnp.zeros_like(X)],
Expand All @@ -110,7 +110,7 @@ JF_exact = jnp.array(
npt.assert_allclose(JF, JF_exact, atol=1e-7)

# Hessian of F1
HF1 = fdx.hessian(F1, accuracy=4, step_size=(dx, dy, dz))
HF1 = fdx.hessian(F1, accuracy=4, step_size=(dx, dy, dz), method="central")
HF1_exact = jnp.array(
[
[
Expand Down
Loading

0 comments on commit b60ec9a

Please sign in to comment.