Skip to content

Commit

Permalink
test diff with poly layer
Browse files Browse the repository at this point in the history
  • Loading branch information
lkct committed Sep 25, 2024
1 parent 1238437 commit 520dfb4
Show file tree
Hide file tree
Showing 4 changed files with 180 additions and 8 deletions.
91 changes: 91 additions & 0 deletions tests/backend/torch/test_compile_circuit_operators.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import functools
import itertools
from typing import cast

import numpy as np
import pytest
Expand All @@ -8,6 +10,7 @@
import cirkit.symbolic.functional as SF
from cirkit.backend.torch.circuits import TorchCircuit
from cirkit.backend.torch.compiler import TorchCompiler
from cirkit.symbolic.layers import PolynomialLayer
from tests.floats import allclose, isclose
from tests.symbolic.test_utils import (
build_bivariate_monotonic_structured_cpt_pc,
Expand Down Expand Up @@ -96,3 +99,91 @@ def test_compile_product_integrate_pc_gaussian():
int_a, int_b = -np.inf, np.inf
ig, err = integrate.dblquad(df, int_a, int_b, int_a, int_b)
assert np.isclose(ig, torch.exp(z).item(), atol=1e-15)


# TODO: semiring="complex-lse-sum"?
@pytest.mark.parametrize(
"semiring,fold,optimize,num_products",
itertools.product(["sum-product"], [False, True], [False, True], [2, 3, 4]),
)
def test_compile_product_pc_polynomial(
semiring: str, fold: bool, optimize: bool, num_products: int
) -> None:
compiler = TorchCompiler(semiring=semiring, fold=fold, optimize=optimize)
scs, tcs = [], []
for i in range(num_products):
sci = build_multivariate_monotonic_structured_cpt_pc(
num_units=2 + i, input_layer="polynomial"
)
tci = compiler.compile(sci)
scs.append(sci)
tcs.append(tci)

sc = functools.reduce(SF.multiply, scs)
num_variables = sc.num_variables
degp1 = cast(PolynomialLayer, next(sc.inputs)).degree + 1
tc: TorchCircuit = compiler.compile(sc)

inputs = (
torch.tensor(0.0) # Get default float dtype.
.new_tensor( # degp1**D should be able to determine the coeffs.
list(itertools.product(range(degp1), repeat=num_variables)) # type: ignore[misc]
)
.unsqueeze(dim=-2)
.requires_grad_()
) # shape (B, C=1, D=num_variables).

zs = torch.stack([tci(inputs) for tci in tcs], dim=0)
# shape num_prod * (B, num_out=1, num_cls=1).
assert zs.shape == (num_products, inputs.shape[0], 1, 1)
zs = zs.squeeze() # shape (num_prod, B).

# Test the partition function value
z = tc(inputs)
assert z.shape == (inputs.shape[0], 1, 1) # shape (B, num_out=1, num_cls=1).
z = z.squeeze() # shape (B,).

assert allclose(compiler.semiring.prod(zs, dim=0), z)


# TODO: semiring="complex-lse-sum"?
# TODO: test high-order?
@pytest.mark.parametrize(
"semiring,fold,optimize", itertools.product(["sum-product"], [False, True], [False, True])
)
def test_compile_differentiate_pc_polynomial(semiring: str, fold: bool, optimize: bool) -> None:
compiler = TorchCompiler(semiring=semiring, fold=fold, optimize=optimize)
sc = build_multivariate_monotonic_structured_cpt_pc(input_layer="polynomial")
num_variables = sc.num_variables

diff_sc = SF.differentiate(sc, order=1)
diff_tc: TorchCircuit = compiler.compile(diff_sc)
assert isinstance(diff_tc, TorchCircuit)
tc: TorchCircuit = compiler.get_compiled_circuit(sc)
assert isinstance(tc, TorchCircuit)

inputs = (
torch.tensor([[0.0] * num_variables, range(num_variables)]) # type: ignore[misc]
.unsqueeze(dim=-2)
.requires_grad_()
) # shape (B=2, C=1, D=num_variables).

with torch.enable_grad():
output = tc(inputs)
assert output.shape == (2, 1, 1) # shape (B=2, num_out=1, num_cls=1).
(grad_autodiff,) = torch.autograd.grad(
output, inputs, torch.ones_like(output)
) # shape (B=2, C=1, D=num_variables).

grad = diff_tc(inputs)
assert grad.shape == (2, num_variables + 1, 1) # shape (B=2, num_out=1*(D*C+1), num_cls=1).
# shape (B=2, num_out=D, num_cls=1) -> (B=2, C=1, D=num_variables).
grad = grad[:, :-1, :].movedim(1, 2)
# TODO: what if num_cls!=1?
if semiring == "sum-product":
assert allclose(grad, grad_autodiff)
elif semiring == "complex-lse-sum":
# TODO: is this correct?
assert allclose(torch.exp(grad), grad_autodiff)
else:
assert False
10 changes: 7 additions & 3 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,17 @@


@pytest.fixture(autouse=True)
def _setup_reproducible_global_state() -> None:
def _setup_global_state() -> None:
# Seed all RNGs.
seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.set_grad_enabled(False)
torch.set_default_dtype(torch.float64) # type: ignore[no-untyped-call]
# Set deterministic algorithms.
torch.use_deterministic_algorithms(True, warn_only=True)
torch.backends.cudnn.benchmark = False
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
# Use high precision computation.
torch.set_default_dtype(torch.float64) # type: ignore[no-untyped-call]
# Disable autograd because we do not need it in most cases.
torch.set_grad_enabled(False)
61 changes: 57 additions & 4 deletions tests/symbolic/test_circuit_operators.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@
import itertools
from typing import Iterable, Tuple, TypeVar

import pytest

import cirkit.symbolic.functional as SF
from cirkit.symbolic.circuit import are_compatible
from cirkit.symbolic.layers import DenseLayer, HadamardLayer, LogPartitionLayer
from cirkit.symbolic.parameters import ConjugateParameter, KroneckerParameter, ReferenceParameter
from cirkit.symbolic.layers import DenseLayer, HadamardLayer, LogPartitionLayer, PolynomialLayer
from cirkit.symbolic.parameters import (
ConjugateParameter,
KroneckerParameter,
PolynomialDifferential,
ReferenceParameter,
)
from tests.symbolic.test_utils import build_multivariate_monotonic_structured_cpt_pc


Expand All @@ -31,7 +37,7 @@ def test_integrate_circuit(num_units: int, input_layer: str):

@pytest.mark.parametrize(
"num_units,input_layer",
itertools.product([1, 3], ["bernoulli", "gaussian"]),
itertools.product([1, 3], ["bernoulli", "gaussian", "polynomial"]),
)
def test_multiply_circuits(num_units: int, input_layer: str):
sc1 = build_multivariate_monotonic_structured_cpt_pc(
Expand Down Expand Up @@ -60,6 +66,13 @@ def test_multiply_circuits(num_units: int, input_layer: str):
assert all(isinstance(l, HadamardLayer) for l in prod_layers)
assert all(l.num_input_units == prod_num_units for l in dense_layers if sc.layer_outputs(l))
assert all(l.num_output_units == prod_num_units for l in dense_layers if sc.layer_outputs(l))
# Additional check of degree for polynomial
if input_layer == "polynomial":
for in_1, in_2, in_prod in zip(sc1.inputs, sc2.inputs, sc.inputs):
assert isinstance(in_1, PolynomialLayer)
assert isinstance(in_2, PolynomialLayer)
assert isinstance(in_prod, PolynomialLayer)
assert in_prod.degree == in_1.degree + in_2.degree


@pytest.mark.parametrize(
Expand Down Expand Up @@ -94,7 +107,7 @@ def test_multiply_integrate_circuits(num_units: int, input_layer: str):

@pytest.mark.parametrize(
"num_units,input_layer",
itertools.product([1, 3], ["bernoulli", "gaussian"]),
itertools.product([1, 3], ["bernoulli", "gaussian", "polynomial"]),
)
def test_conjugate_circuit(num_units: int, input_layer: str):
sc1 = build_multivariate_monotonic_structured_cpt_pc(
Expand All @@ -112,3 +125,43 @@ def test_conjugate_circuit(num_units: int, input_layer: str):
assert all(isinstance(l, HadamardLayer) for l in prod_layers)
assert all(l.num_input_units == num_units for l in dense_layers if sc.layer_outputs(l))
assert all(l.num_output_units == num_units for l in dense_layers if sc.layer_outputs(l))


_T_co = TypeVar("_T_co", covariant=True) # TODO: for _batched. move together


# TODO: this can be made public and moved to utils, might be used elsewhere.
# itertools.batched introduced in 3.12
def _batched(iterable: Iterable[_T_co], n: int) -> Iterable[Tuple[_T_co, ...]]:
if n < 1:
raise ValueError("n must be at least one")
iterator = iter(iterable)
while batch := tuple(itertools.islice(iterator, n)):
yield batch


@pytest.mark.parametrize("num_units", [1, 3])
def test_differentiate_circuit(num_units: int) -> None:
sc = build_multivariate_monotonic_structured_cpt_pc(
num_units=num_units, input_layer="polynomial"
)
diff_sc = SF.differentiate(sc)
assert diff_sc.is_smooth
assert diff_sc.is_decomposable
assert diff_sc.is_structured_decomposable
assert not diff_sc.is_omni_compatible
sc_inputs = list(sc.inputs)
diff_inputs = list(diff_sc.inputs)
sc_inner = list(sc.inner_layers)
diff_inner = list(diff_sc.inner_layers)
assert len(diff_inputs) == len(sc_inputs) * 2 # diff and self
assert all(
isinstance(dl.coeff.output, PolynomialDifferential)
and isinstance(sl.coeff.output, ReferenceParameter)
for dl, sl in _batched(diff_inputs, 2)
)
assert len(diff_inner) == sum(len(l.scope) for l in sc_inner) + len(sc_inner)
dense_layers = list(filter(lambda l: isinstance(l, DenseLayer), diff_sc.inner_layers))
assert dense_layers
assert all(isinstance(r, ReferenceParameter) for l in dense_layers for r in l.weight.inputs)
# TODO: should we keep more info for diff layer ordering? i.e. testing order wrt each var
26 changes: 25 additions & 1 deletion tests/symbolic/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,14 @@
NormalInitializer,
UniformInitializer,
)
from cirkit.symbolic.layers import CategoricalLayer, DenseLayer, GaussianLayer, HadamardLayer, Layer
from cirkit.symbolic.layers import (
CategoricalLayer,
DenseLayer,
GaussianLayer,
HadamardLayer,
Layer,
PolynomialLayer,
)
from cirkit.symbolic.parameters import (
ExpParameter,
LogSoftmaxParameter,
Expand Down Expand Up @@ -182,6 +189,23 @@ def build_multivariate_monotonic_structured_cpt_pc(
)
for vid in range(5)
}
elif input_layer == "polynomial":
if parameterize:
coeff_factory = None
else:
coeff_factory = lambda shape: Parameter.from_leaf(
TensorParameter(*shape, initializer=UniformInitializer())
)
input_layers = {
(vid,): PolynomialLayer(
Scope([vid]),
num_output_units=num_units,
num_channels=1,
degree=2, # TODO: currently hard-coded
coeff_factory=coeff_factory,
)
for vid in range(5)
}
else:
raise NotImplementedError()

Expand Down

0 comments on commit 520dfb4

Please sign in to comment.