Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add numpy_modules.py and Add Neural Network Solvers in numpy_backend.py and Add numpy's examples in neural_solvers.py…… #34

Merged
merged 25 commits into from
Dec 8, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
a750975
Update numpy_backend.py
heatingma Nov 25, 2022
82800a1
Update test_multi_graph_solvers.py
heatingma Nov 25, 2022
a7ab4cc
Merge pull request #1 from heatingma/local_branch
heatingma Nov 25, 2022
6a0d9b4
Update numpy_backend.py
heatingma Nov 25, 2022
374e95f
Merge pull request #2 from heatingma/local_branch
heatingma Nov 25, 2022
1afb7ac
Update numpy_backend.py
heatingma Nov 28, 2022
4bd15b8
Merge pull request #3 from heatingma/local_branch
heatingma Nov 28, 2022
55aec40
Update multi_graph_solvers.py
heatingma Nov 28, 2022
13dfaf3
Merge pull request #4 from heatingma/local_branch
heatingma Nov 28, 2022
21248f1
fix double blank lines between functions
rogerwwww Nov 28, 2022
edcf6e4
Update multi_graph_solvers.py
heatingma Dec 7, 2022
8cad8ce
Merge pull request #5 from heatingma/local_branch
heatingma Dec 7, 2022
ec51845
Merge branch 'main' into main
heatingma Dec 7, 2022
7039df3
Add numpy's doc in neural_solvers.py
heatingma Dec 8, 2022
bce7179
add numpy's doc in neural_solvers.py
heatingma Dec 8, 2022
b1b6cb2
Merge pull request #6 from heatingma/local_branch
heatingma Dec 8, 2022
e8460e2
add numpy_modules.py
heatingma Dec 8, 2022
82ffdbd
Update numpy_backend.py
heatingma Dec 8, 2022
3a8b785
change download and add get_md5
heatingma Dec 8, 2022
966359d
add 'numpy'
heatingma Dec 8, 2022
cc86156
delete 'pdb'
heatingma Dec 8, 2022
df42a21
Update requirements.txt
heatingma Dec 8, 2022
45b8725
add wget
heatingma Dec 8, 2022
2b6f1a6
update downloading logic
rogerwwww Dec 8, 2022
6ab73d7
Update neural_solvers.py
heatingma Dec 8, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pygmtools/multi_graph_solvers.py
Original file line number Diff line number Diff line change
Expand Up @@ -655,6 +655,7 @@ def gamgm(A, W,
.. dropdown:: Numpy Example

::

>>> import numpy as np
>>> import pygmtools as pygm
>>> import itertools
Expand Down
192 changes: 192 additions & 0 deletions pygmtools/neural_solvers.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,53 @@ def pca_gm(feat1, feat2, A1, A2, n1=None, n2=None,
.. note::
This function also supports non-batched input, by ignoring all batch dimensions in the input tensors.

.. dropdown:: Numpy Example

::

>>> import numpy as np
>>> import pygmtools as pygm
>>> pygm.BACKEND = 'numpy'
>>> np.random.seed(1)

# Generate a batch of isomorphic graphs
>>> batch_size = 10
>>> X_gt = np.zeros((batch_size, 4, 4))
>>> X_gt[:, np.arange(0, 4, dtype='i4'), np.random.permutation(4)] = 1
>>> A1 = 1. * (np.random.rand(batch_size, 4, 4) > 0.5)
>>> for i in np.arange(4): # discard self-loop edges
... for j in np.arange(batch_size):
... A1[j][i][i] = 0
>>> A2 = np.matmul(np.matmul(X_gt.swapaxes(1, 2), A1), X_gt)
>>> feat1 = np.random.rand(batch_size, 4, 1024) - 0.5
>>> feat2 = np.matmul(X_gt.swapaxes(1, 2), feat1)
>>> n1 = n2 = np.array([4] * batch_size)

# Match by PCA-GM (load pretrained model)
>>> X, net = pygm.pca_gm(feat1, feat2, A1, A2, n1, n2, return_network=True)
Downloading to ~/.cache/pygmtools/pca_gm_voc_numpy.npy...
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# Pass the net object to avoid rebuilding the model agian
>>> X = pygm.pca_gm(feat1, feat2, A1, A2, n1, n2, network=net)
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# You may also load other pretrained weights
>>> X, net = pygm.pca_gm(feat1, feat2, A1, A2, n1, n2, return_network=True, pretrain='willow')
Downloading to ~/.cache/pygmtools/pca_gm_willow_numpy.npy...
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# You may configure your own model and integrate the model into a deep learning pipeline. For example:
>>> net = pygm.utils.get_network(pygm.pca_gm, in_channel=1024, hidden_channel=2048, out_channel=512, num_layers=3, pretrain=False)
# feat1/feat2 may be outputs by other neural networks
>>> X = pygm.pca_gm(feat1, feat2, A1, A2, n1, n2, network=net)
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0


.. dropdown:: PyTorch Example

::
Expand Down Expand Up @@ -274,6 +321,53 @@ def ipca_gm(feat1, feat2, A1, A2, n1=None, n2=None,
.. note::
This function also supports non-batched input, by ignoring all batch dimensions in the input tensors.

.. dropdown:: Numpy Example

::

>>> import numpy as np
>>> import pygmtools as pygm
>>> pygm.BACKEND = 'numpy'
>>> np.random.seed(1)

# Generate a batch of isomorphic graphs
>>> batch_size = 10
>>> X_gt = np.zeros((batch_size, 4, 4))
>>> X_gt[:, np.arange(0, 4, dtype='i4'), np.random.permutation(4)] = 1
>>> A1 = 1. * (np.random.rand(batch_size, 4, 4) > 0.5)
>>> for i in np.arange(4): # discard self-loop edges
... for j in np.arange(batch_size):
... A1[j][i][i] = 0
>>> A2 = np.matmul(np.matmul(X_gt.swapaxes(1, 2), A1), X_gt)
>>> feat1 = np.random.rand(batch_size, 4, 1024) - 0.5
>>> feat2 = np.matmul(X_gt.swapaxes(1, 2), feat1)
>>> n1 = n2 = np.array([4] * batch_size)

# Match by IPCA-GM (load pretrained model)
>>> X, net = pygm.ipca_gm(feat1, feat2, A1, A2, n1, n2, return_network=True)
Downloading to ~/.cache/pygmtools/ipca_gm_voc_numpy.npy...
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# Pass the net object to avoid rebuilding the model agian
>>> X = pygm.ipca_gm(feat1, feat2, A1, A2, n1, n2, network=net)
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# You may also load other pretrained weights
>>> X, net = pygm.ipca_gm(feat1, feat2, A1, A2, n1, n2, return_network=True, pretrain='willow')
Downloading to ~/.cache/pygmtools/ipca_gm_willow_numpy.npy...
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# You may configure your own model and integrate the model into a deep learning pipeline. For example:
>>> net = pygm.utils.get_network(pygm.ipca_gm, in_channel=1024, hidden_channel=2048, out_channel=512, num_layers=3, cross_iter=10, pretrain=False)
# feat1/feat2 may be outputs by other neural networks
>>> X = pygm.ipca_gm(feat1, feat2, A1, A2, n1, n2, network=net)
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0


.. dropdown:: PyTorch Example

::
Expand Down Expand Up @@ -489,6 +583,55 @@ def cie(feat_node1, feat_node2, A1, A2, feat_edge1, feat_edge2, n1=None, n2=None
.. note::
This function also supports non-batched input, by ignoring all batch dimensions in the input tensors.

.. dropdown:: Numpy Example

::

>>> import numpy as np
>>> import pygmtools as pygm
>>> pygm.BACKEND = 'numpy'
>>> np.random.seed(1)

# Generate a batch of isomorphic graphs
>>> batch_size = 10
>>> X_gt = np.zeros((batch_size, 4, 4))
>>> X_gt[:, np.arange(0, 4, dtype='i4'), np.random.permutation(4)] = 1
>>> A1 = 1. * (np.random.rand(batch_size, 4, 4) > 0.5)
>>> for i in np.arange(4): # discard self-loop edges
... for j in np.arange(batch_size):
... A1[j][i][i] = 0
>>> e_feat1 = np.expand_dims(np.random.rand(batch_size, 4, 4) * A1,axis=-1) # shape: (10, 4, 4, 1)
>>> A2 = np.matmul(np.matmul(X_gt.swapaxes(1, 2), A1), X_gt)
>>> e_feat2 = np.expand_dims(np.matmul(np.matmul(X_gt.swapaxes(1, 2),np.squeeze(e_feat1,axis=-1)), X_gt),axis=-1)
>>> feat1 = np.random.rand(batch_size, 4, 1024) - 0.5
>>> feat2 = np.matmul(X_gt.swapaxes(1, 2), feat1)
>>> n1 = n2 = np.array([4] * batch_size)

# Match by CIE (load pretrained model)
>>> X, net = pygm.cie(feat1, feat2, A1, A2, e_feat1, e_feat2, n1, n2, return_network=True)
Downloading to ~/.cache/pygmtools/cie_voc_numpy.npy...
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# Pass the net object to avoid rebuilding the model agian
>>> X = pygm.cie(feat1, feat2, A1, A2, e_feat1, e_feat2, n1, n2, network=net)
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# You may also load other pretrained weights
>>> X, net = pygm.cie(feat1, feat2, A1, A2, e_feat1, e_feat2, n1, n2, return_network=True, pretrain='willow')
Downloading to ~/.cache/pygmtools/cie_willow_numpy.npy...
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# You may configure your own model and integrate the model into a deep learning pipeline. For example:
>>> net = pygm.utils.get_network(pygm.cie, in_node_channel=1024, in_edge_channel=1, hidden_channel=2048, out_channel=512, num_layers=3, pretrain=False)
# feat1/feat2/e_feat1/e_feat2 may be outputs by other neural networks
>>> X = pygm.cie(feat1, feat2, A1, A2, e_feat1, e_feat2, n1, n2, network=net)
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0


.. dropdown:: PyTorch Example

::
Expand Down Expand Up @@ -710,6 +853,55 @@ def ngm(K, n1=None, n2=None, n1max=None, n2max=None, x0=None,
.. note::
This function also supports non-batched input, by ignoring all batch dimensions in the input tensors.

.. dropdown:: Numpy Example

::

>>> import numpy as np
>>> import pygmtools as pygm
>>> pygm.BACKEND = 'numpy'
>>> np.random.seed(1)

# Generate a batch of isomorphic graphs
>>> batch_size = 10
>>> X_gt = np.zeros((batch_size, 4, 4))
>>> X_gt[:, np.arange(0, 4, dtype='i4'), np.random.permutation(4)] = 1
>>> A1 = np.random.rand(batch_size, 4, 4)
>>> A2 = np.matmul(np.matmul(X_gt.swapaxes(1, 2), A1), X_gt)
>>> n1 = n2 = np.array([4] * batch_size)

# Build affinity matrix
>>> conn1, edge1, ne1 = pygm.utils.dense_to_sparse(A1)
>>> conn2, edge2, ne2 = pygm.utils.dense_to_sparse(A2)
>>> import functools
>>> gaussian_aff = functools.partial(pygm.utils.gaussian_aff_fn, sigma=1.) # set affinity function
>>> K = pygm.utils.build_aff_mat(None, edge1, conn1, None, edge2, conn2, n1, None, n2, None, edge_aff_fn=gaussian_aff)

# Solve by NGM
>>> X, net = pygm.ngm(K, n1, n2, return_network=True)
Downloading to ~/.cache/pygmtools/ngm_voc_numpy.npy...
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# Pass the net object to avoid rebuilding the model agian
>>> X = pygm.ngm(K, n1, n2, network=net)
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# You may also load other pretrained weights
>>> X, net = pygm.ngm(feat1, feat2, A1, A2, e_feat1, e_feat2, n1, n2, return_network=True, pretrain='willow')
Downloading to ~/.cache/pygmtools/ngm_willow_numpy.npy...
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0

# You may configure your own model and integrate the model into a deep learning pipeline. For example:
>>> net = pygm.utils.get_network(pygm.ngm, gnn_channels=(32, 64, 128, 64, 32), sk_emb=8, pretrain=False)
# K may be outputs by other neural networks (constructed K from node/edge features by pygm.utils.build_aff_mat)
>>> X = pygm.ngm(K, n1, n2, network=net)
>>> (pygm.hungarian(X) * X_gt).sum() / X_gt.sum() # accuracy
1.0


.. dropdown:: PyTorch Example

::
Expand Down
Loading