From aa06920e544df81186c8652b2cd9067f5542cff3 Mon Sep 17 00:00:00 2001 From: Ahmed Kadhim Date: Mon, 4 Nov 2024 10:55:03 +0000 Subject: [PATCH 01/29] add recom sys --- .gitignore | 3 + .../Applications/RecommendationSystems.py | 60 +++++++++++++++++++ examples/MNISTConvolutionDemo.py | 13 ---- examples/MNISTVanillaDemo.py | 15 ----- examples/NoisyXORDemo.py | 21 +------ examples/SequenceClassificationDemo.py | 14 ----- 6 files changed, 65 insertions(+), 61 deletions(-) create mode 100644 .gitignore create mode 100644 examples/Applications/RecommendationSystems.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..b4f6c429 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +build/ +GraphTsetlinMachine.egg-info/ +/dist/ \ No newline at end of file diff --git a/examples/Applications/RecommendationSystems.py b/examples/Applications/RecommendationSystems.py new file mode 100644 index 00000000..56b73b2e --- /dev/null +++ b/examples/Applications/RecommendationSystems.py @@ -0,0 +1,60 @@ +from GraphTsetlinMachine.graphs import Graphs +import numpy as np +from scipy.sparse import csr_matrix +from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine +from time import time +import argparse +import random +import pandas as pd +import kagglehub + +def default_args(**kwargs): + parser = argparse.ArgumentParser() + parser.add_argument("--epochs", default=10, type=int) + parser.add_argument("--number-of-clauses", default=10, type=int) + parser.add_argument("--T", default=100, type=int) + parser.add_argument("--s", default=1.0, type=float) + parser.add_argument("--number-of-state-bits", default=8, type=int) + parser.add_argument("--depth", default=2, type=int) + parser.add_argument("--hypervector-size", default=32, type=int) + parser.add_argument("--hypervector-bits", default=2, type=int) + parser.add_argument("--message-size", default=256, type=int) + parser.add_argument("--message-bits", default=2, type=int) + parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') + parser.add_argument("--noise", default=0.01, type=float) + parser.add_argument("--number-of-examples", default=10000, type=int) + parser.add_argument("--max-included-literals", default=4, type=int) + + args = parser.parse_args() + for key, value in kwargs.items(): + if key in args.__dict__: + setattr(args, key, value) + return args + +args = default_args() + +print("Creating training data") +path = kagglehub.dataset_download("arhamrumi/amazon-product-reviews") +print("Path to dataset files:", path) +data_file = path + "/Reviews.csv" # Adjust this path if necessary +data = pd.read_csv(data_file) +print("Data preview:", data.head()) + +number_of_nodes = 3 + +symbols = [] +users = data['user_id'].unique() +items = data['product_id'].unique() +categories = data['category'].unique() + +# Initialize Graphs with symbols for GTM +num_graphs = len(items) +symbols = ["I" + str(i) for i in items] + ["C" + str(c) for c in categories] + ["U" + str(u) for u in users] + +graphs_train = Graphs( + X_train.shape[0], + symbols=symbols, + hypervector_size=args.hypervector_size, + hypervector_bits=args.hypervector_bits, + double_hashing = args.double_hashing +) \ No newline at end of file diff --git a/examples/MNISTConvolutionDemo.py b/examples/MNISTConvolutionDemo.py index 8fe75473..a9ee5838 100644 --- a/examples/MNISTConvolutionDemo.py +++ b/examples/MNISTConvolutionDemo.py @@ -61,18 +61,13 @@ def default_args(**kwargs): hypervector_bits=args.hypervector_bits, double_hashing = args.double_hashing ) - for graph_id in range(X_train.shape[0]): graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes) - graphs_train.prepare_node_configuration() - for graph_id in range(X_train.shape[0]): for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): graphs_train.add_graph_node(graph_id, node_id, 0) - graphs_train.prepare_edge_configuration() - for graph_id in range(X_train.shape[0]): if graph_id % 1000 == 0: print(graph_id, X_train.shape[0]) @@ -88,23 +83,17 @@ def default_args(**kwargs): graphs_train.add_graph_node_property(graph_id, node_id, "C:%d" % (q)) graphs_train.add_graph_node_property(graph_id, node_id, "R:%d" % (r)) - graphs_train.encode() - print("Training data produced") graphs_test = Graphs(X_test.shape[0], init_with=graphs_train) for graph_id in range(X_test.shape[0]): graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes) - graphs_test.prepare_node_configuration() - for graph_id in range(X_test.shape[0]): for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): graphs_test.add_graph_node(graph_id, node_id, 0) - graphs_test.prepare_edge_configuration() - for graph_id in range(X_test.shape[0]): if graph_id % 1000 == 0: print(graph_id, X_test.shape[0]) @@ -120,9 +109,7 @@ def default_args(**kwargs): graphs_test.add_graph_node_property(graph_id, node_id, "C:%d" % (q)) graphs_test.add_graph_node_property(graph_id, node_id, "R:%d" % (r)) - graphs_test.encode() - print("Testing data produced") tm = MultiClassGraphTsetlinMachine( diff --git a/examples/MNISTVanillaDemo.py b/examples/MNISTVanillaDemo.py index 8bcb453c..02b95e2a 100644 --- a/examples/MNISTVanillaDemo.py +++ b/examples/MNISTVanillaDemo.py @@ -4,7 +4,6 @@ from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine from time import time import argparse -from skimage.util import view_as_windows from keras.datasets import mnist from numba import jit @@ -53,51 +52,37 @@ def default_args(**kwargs): hypervector_bits=args.hypervector_bits, double_hashing = args.double_hashing ) - for graph_id in range(X_train.shape[0]): graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes) - graphs_train.prepare_node_configuration() - for graph_id in range(X_train.shape[0]): number_of_outgoing_edges = 0 graphs_train.add_graph_node(graph_id, 'Image Node', number_of_outgoing_edges) - graphs_train.prepare_edge_configuration() - for graph_id in range(X_train.shape[0]): if graph_id % 1000 == 0: print(graph_id, X_train.shape[0]) for k in X_train[graph_id].nonzero()[0]: graphs_train.add_graph_node_property(graph_id, 'Image Node', "W%d,%d" % (k // 28, k % 28)) - graphs_train.encode() - print("Training data produced") graphs_test = Graphs(X_test.shape[0], init_with=graphs_train) - for graph_id in range(X_test.shape[0]): graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes) - graphs_test.prepare_node_configuration() - for graph_id in range(X_test.shape[0]): number_of_outgoing_edges = 0 graphs_test.add_graph_node(graph_id, 'Image Node', number_of_outgoing_edges) - graphs_test.prepare_edge_configuration() - for graph_id in range(X_test.shape[0]): if graph_id % 1000 == 0: print(graph_id, X_test.shape[0]) for k in X_test[graph_id].nonzero()[0]: graphs_test.add_graph_node_property(graph_id, 'Image Node', "W%d,%d" % (k // 28, k % 28)) - graphs_test.encode() - print("Testing data produced") tm = MultiClassGraphTsetlinMachine( diff --git a/examples/NoisyXORDemo.py b/examples/NoisyXORDemo.py index 83a4bbde..3069207d 100644 --- a/examples/NoisyXORDemo.py +++ b/examples/NoisyXORDemo.py @@ -34,31 +34,24 @@ def default_args(**kwargs): print("Creating training data") # Create train data - graphs_train = Graphs( args.number_of_examples, symbols=['A', 'B'], hypervector_size=args.hypervector_size, hypervector_bits=args.hypervector_bits, ) - for graph_id in range(args.number_of_examples): graphs_train.set_number_of_graph_nodes(graph_id, 2) - graphs_train.prepare_node_configuration() - for graph_id in range(args.number_of_examples): number_of_outgoing_edges = 1 graphs_train.add_graph_node(graph_id, 'Node 1', number_of_outgoing_edges) graphs_train.add_graph_node(graph_id, 'Node 2', number_of_outgoing_edges) - -graphs_train.prepare_edge_configuration() - +graphs_train.prepar_eedge_configuration() for graph_id in range(args.number_of_examples): edge_type = "Plain" graphs_train.add_graph_node_edge(graph_id, 'Node 1', 'Node 2', edge_type) graphs_train.add_graph_node_edge(graph_id, 'Node 2', 'Node 1', edge_type) - Y_train = np.empty(args.number_of_examples, dtype=np.uint32) for graph_id in range(args.number_of_examples): x1 = random.choice(['A', 'B']) @@ -74,32 +67,23 @@ def default_args(**kwargs): if np.random.rand() <= args.noise: Y_train[graph_id] = 1 - Y_train[graph_id] - graphs_train.encode() -# Create test data - +# Create test data print("Creating testing data") - graphs_test = Graphs(args.number_of_examples, init_with=graphs_train) - for graph_id in range(args.number_of_examples): graphs_test.set_number_of_graph_nodes(graph_id, 2) - graphs_test.prepare_node_configuration() - for graph_id in range(args.number_of_examples): number_of_outgoing_edges = 1 graphs_test.add_graph_node(graph_id, 'Node 1', number_of_outgoing_edges) graphs_test.add_graph_node(graph_id, 'Node 2', number_of_outgoing_edges) - graphs_test.prepare_edge_configuration() - for graph_id in range(args.number_of_examples): edge_type = "Plain" graphs_test.add_graph_node_edge(graph_id, 'Node 1', 'Node 2', edge_type) graphs_test.add_graph_node_edge(graph_id, 'Node 2', 'Node 1', edge_type) - Y_test = np.empty(args.number_of_examples, dtype=np.uint32) for graph_id in range(args.number_of_examples): x1 = random.choice(['A', 'B']) @@ -112,7 +96,6 @@ def default_args(**kwargs): Y_test[graph_id] = 0 else: Y_test[graph_id] = 1 - graphs_test.encode() tm = MultiClassGraphTsetlinMachine( diff --git a/examples/SequenceClassificationDemo.py b/examples/SequenceClassificationDemo.py index 7a2362cb..c5b13214 100644 --- a/examples/SequenceClassificationDemo.py +++ b/examples/SequenceClassificationDemo.py @@ -35,7 +35,6 @@ def default_args(**kwargs): print("Creating training data") # Create train data - graphs_train = Graphs( args.number_of_examples, symbols=['A'], @@ -43,19 +42,14 @@ def default_args(**kwargs): hypervector_bits=args.hypervector_bits, double_hashing = args.double_hashing ) - for graph_id in range(args.number_of_examples): graphs_train.set_number_of_graph_nodes(graph_id, np.random.randint(args.number_of_classes, args.max_sequence_length+1)) - graphs_train.prepare_node_configuration() - for graph_id in range(args.number_of_examples): for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): number_of_edges = 2 if node_id > 0 and node_id < graphs_train.number_of_graph_nodes[graph_id]-1 else 1 graphs_train.add_graph_node(graph_id, node_id, number_of_edges) - graphs_train.prepare_edge_configuration() - Y_train = np.empty(args.number_of_examples, dtype=np.uint32) for graph_id in range(args.number_of_examples): for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): @@ -76,26 +70,19 @@ def default_args(**kwargs): if np.random.rand() <= args.noise: Y_train[graph_id] = np.random.choice(np.setdiff1d(np.arange(args.number_of_classes), [Y_train[graph_id]])) - graphs_train.encode() # Create test data - print("Creating testing data") - graphs_test = Graphs(args.number_of_examples, init_with=graphs_train) for graph_id in range(args.number_of_examples): graphs_test.set_number_of_graph_nodes(graph_id, np.random.randint(args.number_of_classes, args.max_sequence_length+1)) - graphs_test.prepare_node_configuration() - for graph_id in range(args.number_of_examples): for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): number_of_edges = 2 if node_id > 0 and node_id < graphs_test.number_of_graph_nodes[graph_id]-1 else 1 graphs_test.add_graph_node(graph_id, node_id, number_of_edges) - graphs_test.prepare_edge_configuration() - Y_test = np.empty(args.number_of_examples, dtype=np.uint32) for graph_id in range(args.number_of_examples): for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): @@ -113,7 +100,6 @@ def default_args(**kwargs): node_id = np.random.randint(Y_test[graph_id], graphs_test.number_of_graph_nodes[graph_id]) for node_pos in range(Y_test[graph_id] + 1): graphs_test.add_graph_node_property(graph_id, node_id - node_pos, 'A') - graphs_test.encode() tm = MultiClassGraphTsetlinMachine( From 6280bfbc95ab1f3a2ce80ccde68b16851293dbb6 Mon Sep 17 00:00:00 2001 From: Ahmed Kadhim Date: Mon, 4 Nov 2024 10:58:20 +0000 Subject: [PATCH 02/29] rename --- examples/{Applications => applications}/RecommendationSystems.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{Applications => applications}/RecommendationSystems.py (100%) diff --git a/examples/Applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py similarity index 100% rename from examples/Applications/RecommendationSystems.py rename to examples/applications/RecommendationSystems.py From 771edcf2af1bd1b4bb97606adcddbda472cdae0c Mon Sep 17 00:00:00 2001 From: Ahmed Kadhim Date: Wed, 6 Nov 2024 08:49:04 +0000 Subject: [PATCH 03/29] complete recom sys --- examples/MNISTVanillaDemo.py | 31 ++-- examples/NoisyXORMNISTDemo.py | 16 -- .../applications/RecommendationSystems.py | 165 ++++++++++++++++-- examples/applications/test.ipynb | 101 +++++++++++ 4 files changed, 264 insertions(+), 49 deletions(-) create mode 100644 examples/applications/test.ipynb diff --git a/examples/MNISTVanillaDemo.py b/examples/MNISTVanillaDemo.py index 02b95e2a..4428343f 100644 --- a/examples/MNISTVanillaDemo.py +++ b/examples/MNISTVanillaDemo.py @@ -60,9 +60,8 @@ def default_args(**kwargs): graphs_train.add_graph_node(graph_id, 'Image Node', number_of_outgoing_edges) graphs_train.prepare_edge_configuration() for graph_id in range(X_train.shape[0]): - if graph_id % 1000 == 0: - print(graph_id, X_train.shape[0]) - + # if graph_id % 1000 == 0: + # print(graph_id, X_train.shape[0]) for k in X_train[graph_id].nonzero()[0]: graphs_train.add_graph_node_property(graph_id, 'Image Node', "W%d,%d" % (k // 28, k % 28)) graphs_train.encode() @@ -110,16 +109,16 @@ def default_args(**kwargs): print("%d %.2f %.2f %.2f %.2f" % (i, result_train, result_test, stop_training-start_training, stop_testing-start_testing)) -weights = tm.get_state()[1].reshape(2, -1) -for i in range(tm.number_of_clauses): - print("Clause #%d Weights:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') - l = [] - for k in range(args.hypervector_size * 2): - if tm.ta_action(0, i, k): - if k < args.hypervector_size: - l.append("x%d" % (k)) - else: - l.append("NOT x%d" % (k - args.hypervector_size)) - print(" AND ".join(l)) - -print(graphs_train.hypervectors) \ No newline at end of file +# weights = tm.get_state()[1].reshape(2, -1) +# for i in range(tm.number_of_clauses): +# print("Clause #%d Weights:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') +# l = [] +# for k in range(args.hypervector_size * 2): +# if tm.ta_action(0, i, k): +# if k < args.hypervector_size: +# l.append("x%d" % (k)) +# else: +# l.append("NOT x%d" % (k - args.hypervector_size)) +# print(" AND ".join(l)) + +# print(graphs_train.hypervectors) \ No newline at end of file diff --git a/examples/NoisyXORMNISTDemo.py b/examples/NoisyXORMNISTDemo.py index ff1b3151..5da47877 100644 --- a/examples/NoisyXORMNISTDemo.py +++ b/examples/NoisyXORMNISTDemo.py @@ -54,24 +54,18 @@ def default_args(**kwargs): hypervector_size=args.hypervector_size, hypervector_bits=args.hypervector_bits, ) - for graph_id in range(args.number_of_examples): graphs_train.set_number_of_graph_nodes(graph_id, 2) - graphs_train.prepare_node_configuration() - for graph_id in range(args.number_of_examples): number_of_outgoing_edges = 1 graphs_train.add_graph_node(graph_id, 'Node 1', number_of_outgoing_edges) graphs_train.add_graph_node(graph_id, 'Node 2', number_of_outgoing_edges) - graphs_train.prepare_edge_configuration() - for graph_id in range(args.number_of_examples): edge_type = "Plain" graphs_train.add_graph_node_edge(graph_id, 'Node 1', 'Node 2', edge_type) graphs_train.add_graph_node_edge(graph_id, 'Node 2', 'Node 1', edge_type) - Y_train = np.empty(args.number_of_examples, dtype=np.uint32) for graph_id in range(args.number_of_examples): x1 = random.choice([0, 1]) @@ -91,32 +85,23 @@ def default_args(**kwargs): if np.random.rand() <= args.noise: Y_train[graph_id] = 1 - Y_train[graph_id] - graphs_train.encode() # Create test data - print("Creating testing data") - graphs_test = Graphs(args.number_of_examples, init_with=graphs_train) - for graph_id in range(args.number_of_examples): graphs_test.set_number_of_graph_nodes(graph_id, 2) - graphs_test.prepare_node_configuration() - for graph_id in range(args.number_of_examples): number_of_outgoing_edges = 1 graphs_test.add_graph_node(graph_id, 'Node 1', number_of_outgoing_edges) graphs_test.add_graph_node(graph_id, 'Node 2', number_of_outgoing_edges) - graphs_test.prepare_edge_configuration() - for graph_id in range(args.number_of_examples): edge_type = "Plain" graphs_test.add_graph_node_edge(graph_id, 'Node 1', 'Node 2', edge_type) graphs_test.add_graph_node_edge(graph_id, 'Node 2', 'Node 1', edge_type) - Y_test = np.empty(args.number_of_examples, dtype=np.uint32) for graph_id in range(args.number_of_examples): x1 = random.choice([0, 1]) @@ -133,7 +118,6 @@ def default_args(**kwargs): Y_test[graph_id] = 0 else: Y_test[graph_id] = 1 - graphs_test.encode() tm = MultiClassGraphTsetlinMachine( diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index 56b73b2e..8901911c 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -1,25 +1,24 @@ from GraphTsetlinMachine.graphs import Graphs -import numpy as np -from scipy.sparse import csr_matrix from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine from time import time import argparse -import random import pandas as pd import kagglehub +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import LabelEncoder def default_args(**kwargs): parser = argparse.ArgumentParser() - parser.add_argument("--epochs", default=10, type=int) - parser.add_argument("--number-of-clauses", default=10, type=int) + parser.add_argument("--epochs", default=250, type=int) + parser.add_argument("--number-of-clauses", default=60, type=int) parser.add_argument("--T", default=100, type=int) - parser.add_argument("--s", default=1.0, type=float) + parser.add_argument("--s", default=10.0, type=float) parser.add_argument("--number-of-state-bits", default=8, type=int) parser.add_argument("--depth", default=2, type=int) - parser.add_argument("--hypervector-size", default=32, type=int) - parser.add_argument("--hypervector-bits", default=2, type=int) + parser.add_argument("--hypervector-size", default=1024, type=int) + parser.add_argument("--hypervector-bits", default=8, type=int) parser.add_argument("--message-size", default=256, type=int) - parser.add_argument("--message-bits", default=2, type=int) + parser.add_argument("--message-bits", default=8, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) parser.add_argument("--number-of-examples", default=10000, type=int) @@ -34,27 +33,159 @@ def default_args(**kwargs): args = default_args() print("Creating training data") -path = kagglehub.dataset_download("arhamrumi/amazon-product-reviews") +path = kagglehub.dataset_download("karkavelrajaj/amazon-sales-dataset") print("Path to dataset files:", path) -data_file = path + "/Reviews.csv" # Adjust this path if necessary +data_file = path + "/amazon.csv" data = pd.read_csv(data_file) -print("Data preview:", data.head()) +# print("Data preview:", data.head()) +data = data[['product_id', 'category', 'user_id', 'rating']] + +le_user = LabelEncoder() +le_item = LabelEncoder() +le_category = LabelEncoder() +le_rating = LabelEncoder() -number_of_nodes = 3 +data['user_id'] = le_user.fit_transform(data['user_id']) +data['product_id'] = le_item.fit_transform(data['product_id']) +data['category'] = le_category.fit_transform(data['category']) +data['rating'] = le_rating.fit_transform(data['rating']) + +x = data[['user_id', 'product_id', 'category']].values +y = data['rating'].values + +X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42) + +print("X_train shape:", X_train.shape) +print("y_train shape:", Y_train.shape) +print("X_test shape:", X_test.shape) +print("y_test shape:", Y_test.shape) -symbols = [] users = data['user_id'].unique() items = data['product_id'].unique() categories = data['category'].unique() # Initialize Graphs with symbols for GTM -num_graphs = len(items) -symbols = ["I" + str(i) for i in items] + ["C" + str(c) for c in categories] + ["U" + str(u) for u in users] +number_of_nodes = 3 +symbols = [] +symbols = ["U_" + str(u) for u in users] + ["I_" + str(i) for i in items] + ["C_" + str(c) for c in categories] +# Train data graphs_train = Graphs( X_train.shape[0], symbols=symbols, hypervector_size=args.hypervector_size, hypervector_bits=args.hypervector_bits, double_hashing = args.double_hashing -) \ No newline at end of file +) +for graph_id in range(X_train.shape[0]): + graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes) +graphs_train.prepare_node_configuration() +for graph_id in range(X_train.shape[0]): + for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): + number_of_edges = 2 if node_id > 0 and node_id < graphs_train.number_of_graph_nodes[graph_id]-1 else 1 + if node_id == 0: + graphs_train.add_graph_node(graph_id, "User", number_of_edges) + elif node_id == 1: + graphs_train.add_graph_node(graph_id, "Item", number_of_edges) + else: + graphs_train.add_graph_node(graph_id, "Category", number_of_edges) +graphs_train.prepare_edge_configuration() +for graph_id in range(X_train.shape[0]): + for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): + if node_id == 0: + graphs_train.add_graph_node_edge(graph_id, "User", "Item", "UserItem") + + if node_id == 1: + graphs_train.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") + graphs_train.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") + + if node_id == 2: + graphs_train.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") + + graphs_train.add_graph_node_property(graph_id, "User", "U_" + str(X_train[graph_id][0])) + graphs_train.add_graph_node_property(graph_id, "Item", "I_" + str(X_train[graph_id][1])) + graphs_train.add_graph_node_property(graph_id, "Category", "C_" + str(X_train[graph_id][2])) +graphs_train.encode() +print("Training data produced") + +# Test data +graphs_test = Graphs(X_test.shape[0], init_with=graphs_train) +for graph_id in range(X_test.shape[0]): + graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes) +graphs_test.prepare_node_configuration() +for graph_id in range(X_test.shape[0]): + for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): + number_of_edges = 2 if node_id > 0 and node_id < graphs_test.number_of_graph_nodes[graph_id]-1 else 1 + if node_id == 0: + graphs_test.add_graph_node(graph_id, "User", number_of_edges) + elif node_id == 1: + graphs_test.add_graph_node(graph_id, "Item", number_of_edges) + else: + graphs_test.add_graph_node(graph_id, "Category", number_of_edges) +graphs_test.prepare_edge_configuration() +for graph_id in range(X_test.shape[0]): + for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): + if node_id == 0: + graphs_test.add_graph_node_edge(graph_id, "User", "Item", "UserItem") + + if node_id == 1: + graphs_test.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") + graphs_test.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") + + if node_id == 2: + graphs_test.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") + + graphs_test.add_graph_node_property(graph_id, "User", "U_" + str(X_test[graph_id][0])) + graphs_test.add_graph_node_property(graph_id, "Item", "I_" + str(X_test[graph_id][1])) + graphs_test.add_graph_node_property(graph_id, "Category", "C_" + str(X_test[graph_id][2])) +graphs_test.encode() +print("Testing data produced") + +tm = MultiClassGraphTsetlinMachine( + args.number_of_clauses, + args.T, + args.s, + number_of_state_bits = args.number_of_state_bits, + depth=args.depth, + message_size=args.message_size, + message_bits=args.message_bits, + max_included_literals=args.max_included_literals, + double_hashing = args.double_hashing +) + +for i in range(args.epochs): + start_training = time() + tm.fit(graphs_train, Y_train, epochs=1, incremental=True) + stop_training = time() + + start_testing = time() + result_test = 100*(tm.predict(graphs_test) == Y_test).mean() + stop_testing = time() + + result_train = 100*(tm.predict(graphs_train) == Y_train).mean() + + print("%d %.2f %.2f %.2f %.2f" % (i, result_train, result_test, stop_training-start_training, stop_testing-start_testing)) + +# weights = tm.get_state()[1].reshape(2, -1) +# for i in range(tm.number_of_clauses): +# print("Clause #%d W:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') +# l = [] +# for k in range(args.hypervector_size * 2): +# if tm.ta_action(0, i, k): +# if k < args.hypervector_size: +# l.append("x%d" % (k)) +# else: +# l.append("NOT x%d" % (k - args.hypervector_size)) + +# for k in range(args.message_size * 2): +# if tm.ta_action(1, i, k): +# if k < args.message_size: +# l.append("c%d" % (k)) +# else: +# l.append("NOT c%d" % (k - args.message_size)) + +# print(" AND ".join(l)) + +# print(graphs_test.hypervectors) +# print(tm.hypervectors) +# print(graphs_test.edge_type_id) \ No newline at end of file diff --git a/examples/applications/test.ipynb b/examples/applications/test.ipynb new file mode 100644 index 00000000..44e02947 --- /dev/null +++ b/examples/applications/test.ipynb @@ -0,0 +1,101 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating training data\n", + "Path to dataset files: /root/.cache/kagglehub/datasets/karkavelrajaj/amazon-sales-dataset/versions/1\n", + "Electronics|HomeTheater,TV&Video|Accessories|RemoteControls\n", + "X_train shape: (1172, 3)\n", + "y_train shape: (1172,)\n", + "X_test shape: (293, 3)\n", + "y_test shape: (293,)\n", + "111\n", + "Electronics|HomeTheater,TV&Video|Accessories|RemoteControls\n" + ] + } + ], + "source": [ + "from GraphTsetlinMachine.graphs import Graphs\n", + "import numpy as np\n", + "from scipy.sparse import csr_matrix\n", + "from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine\n", + "from time import time\n", + "import argparse\n", + "import random\n", + "import pandas as pd\n", + "import kagglehub\n", + "from sklearn.model_selection import train_test_split\n", + "from sklearn.preprocessing import LabelEncoder\n", + "\n", + "\n", + "print(\"Creating training data\")\n", + "path = kagglehub.dataset_download(\"karkavelrajaj/amazon-sales-dataset\")\n", + "print(\"Path to dataset files:\", path)\n", + "data_file = path + \"/amazon.csv\" # Adjust this path if necessary\n", + "data = pd.read_csv(data_file)\n", + "# print(\"Data preview:\", data.head())\n", + "data = data[['product_id', 'category', 'user_id', 'rating']]\n", + "print(data['category'][100])\n", + " \n", + "# Step 2: Encode user_id, product_id, and category with LabelEncoder\n", + "# This converts string identifiers into unique integer values\n", + "le_user = LabelEncoder()\n", + "le_item = LabelEncoder()\n", + "le_category = LabelEncoder()\n", + "\n", + "data['user_id'] = le_user.fit_transform(data['user_id'])\n", + "data['product_id'] = le_item.fit_transform(data['product_id'])\n", + "data['category'] = le_category.fit_transform(data['category'])\n", + "\n", + "# Step 3: Prepare X (features) and y (labels)\n", + "x = data[['user_id', 'product_id', 'category']].values # Features: [user, item, category]\n", + "y = data['rating'].values # Labels: rating\n", + "\n", + "# Step 4: Split the data into training and test sets\n", + "X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42)\n", + "\n", + "# Display the shapes to verify the split\n", + "print(\"X_train shape:\", X_train.shape)\n", + "print(\"y_train shape:\", Y_train.shape)\n", + "print(\"X_test shape:\", X_test.shape)\n", + "print(\"y_test shape:\", Y_test.shape)\n", + "\n", + "users = data['user_id'].unique()\n", + "items = data['product_id'].unique()\n", + "categories = data['category'].unique()\n", + "\n", + "print(categories[100])\n", + "original_user_id = le_category.inverse_transform([data['category'][100]])[0]\n", + "print(original_user_id)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From ec3fc8725952d91e73fe17f0ad6a3628afa6ccd8 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 4 Dec 2024 11:29:05 +0000 Subject: [PATCH 04/29] rename --- .devcontainer/devcontainer.json | 4 ++-- .devcontainer/docker-compose.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index e500cf24..b264ff4a 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,7 +1,7 @@ { - "name": "TM Graph Devcontainer", + "name": "TM Graph Recomm", "dockerComposeFile": "docker-compose.yml", - "service": "tm-graph-development", + "service": "tm-graph-recomm", "workspaceFolder": "/app", "forwardPorts": [], "postCreateCommand": "echo 'Devcontainer is ready'", diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 183c3acd..0dccd188 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -1,5 +1,5 @@ services: - tm-graph-development: + tm-graph-recomm: build: context: ../ dockerfile: .devcontainer/Dockerfile From 08693ab145312d82fe5e99bb04bb82a2e9a35194 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 4 Dec 2024 13:22:41 +0000 Subject: [PATCH 05/29] tunning --- examples/applications/RecommendationSystems.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index 8901911c..016b154f 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -10,18 +10,18 @@ def default_args(**kwargs): parser = argparse.ArgumentParser() parser.add_argument("--epochs", default=250, type=int) - parser.add_argument("--number-of-clauses", default=60, type=int) - parser.add_argument("--T", default=100, type=int) + parser.add_argument("--number-of-clauses", default=1000, type=int) + parser.add_argument("--T", default=1000, type=int) parser.add_argument("--s", default=10.0, type=float) parser.add_argument("--number-of-state-bits", default=8, type=int) - parser.add_argument("--depth", default=2, type=int) - parser.add_argument("--hypervector-size", default=1024, type=int) - parser.add_argument("--hypervector-bits", default=8, type=int) - parser.add_argument("--message-size", default=256, type=int) - parser.add_argument("--message-bits", default=8, type=int) + parser.add_argument("--depth", default=3, type=int) + parser.add_argument("--hypervector-size", default=16384, type=int) + parser.add_argument("--hypervector-bits", default=328, type=int) + parser.add_argument("--message-size", default=1024, type=int) + parser.add_argument("--message-bits", default=32, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) - parser.add_argument("--number-of-examples", default=10000, type=int) + parser.add_argument("--number-of-examples", default=1000, type=int) parser.add_argument("--max-included-literals", default=4, type=int) args = parser.parse_args() @@ -68,7 +68,7 @@ def default_args(**kwargs): number_of_nodes = 3 symbols = [] symbols = ["U_" + str(u) for u in users] + ["I_" + str(i) for i in items] + ["C_" + str(c) for c in categories] - +print(len(symbols)) # Train data graphs_train = Graphs( X_train.shape[0], From 9c4be1f888844ae37879a6fc97ff68561d4f62d2 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Mon, 16 Dec 2024 12:19:18 +0000 Subject: [PATCH 06/29] update --- examples/applications/RecommendationSystems.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index 016b154f..4cb0751d 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -16,7 +16,7 @@ def default_args(**kwargs): parser.add_argument("--number-of-state-bits", default=8, type=int) parser.add_argument("--depth", default=3, type=int) parser.add_argument("--hypervector-size", default=16384, type=int) - parser.add_argument("--hypervector-bits", default=328, type=int) + parser.add_argument("--hypervector-bits", default=496, type=int) parser.add_argument("--message-size", default=1024, type=int) parser.add_argument("--message-bits", default=32, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') From daf8d5ad1f8319beafbe7d4c654bd1db02695a2c Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Tue, 17 Dec 2024 14:39:07 +0000 Subject: [PATCH 07/29] update --- .../applications/RecommendationSystems.py | 50 ++++++++++++++----- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index 4cb0751d..7e2cdeef 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -3,6 +3,7 @@ from time import time import argparse import pandas as pd +import numpy as np import kagglehub from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder @@ -15,13 +16,13 @@ def default_args(**kwargs): parser.add_argument("--s", default=10.0, type=float) parser.add_argument("--number-of-state-bits", default=8, type=int) parser.add_argument("--depth", default=3, type=int) - parser.add_argument("--hypervector-size", default=16384, type=int) - parser.add_argument("--hypervector-bits", default=496, type=int) - parser.add_argument("--message-size", default=1024, type=int) - parser.add_argument("--message-bits", default=32, type=int) + parser.add_argument("--hypervector-size", default=1024, type=int) + parser.add_argument("--hypervector-bits", default=10, type=int) + parser.add_argument("--message-size", default=512, type=int) + parser.add_argument("--message-bits", default=10, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) - parser.add_argument("--number-of-examples", default=1000, type=int) + parser.add_argument("--number-of-examples", default=500, type=int) parser.add_argument("--max-included-literals", default=4, type=int) args = parser.parse_args() @@ -32,13 +33,38 @@ def default_args(**kwargs): args = default_args() -print("Creating training data") -path = kagglehub.dataset_download("karkavelrajaj/amazon-sales-dataset") -print("Path to dataset files:", path) -data_file = path + "/amazon.csv" -data = pd.read_csv(data_file) -# print("Data preview:", data.head()) -data = data[['product_id', 'category', 'user_id', 'rating']] +# print("Creating training data") +# path = kagglehub.dataset_download("karkavelrajaj/amazon-sales-dataset") +# print("Path to dataset files:", path) +# data_file = path + "/amazon.csv" +# data = pd.read_csv(data_file) +# # print("Data preview:", data.head()) +# data = data[['product_id', 'category', 'user_id', 'rating']] + +############################# artificial dataset ######################## +# Set random seed for reproducibility +np.random.seed(42) +# Define the size of the artificial dataset +num_users = 10 # Number of unique users +num_items = 50 # Number of unique items +num_categories = 10 # Number of unique categories +num_interactions = 10000 # Number of user-item interactions +# Generate random ratings (e.g., between 1 and 5) +ratings = np.random.choice(range(1, 3), num_interactions) +# Generate random user-item interactions +user_ids = np.random.choice(range(num_users), num_interactions) +item_ids = np.random.choice(range(num_items), num_interactions) +categories = np.random.choice(range(num_categories), num_interactions) +# Combine into a DataFrame +data = pd.DataFrame({ + 'user_id': user_ids, + 'product_id': item_ids, + 'category': categories, + 'rating': ratings +}) +print("Artificial Dataset Preview:") +print(data.head()) +######################################################################## le_user = LabelEncoder() le_item = LabelEncoder() From 9dacba5364e8abc3f9c746399f8c5185d4410cad Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 18 Dec 2024 10:25:12 +0000 Subject: [PATCH 08/29] update --- examples/applications/RecommendationSystems.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index 7e2cdeef..a453d42d 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -19,7 +19,7 @@ def default_args(**kwargs): parser.add_argument("--hypervector-size", default=1024, type=int) parser.add_argument("--hypervector-bits", default=10, type=int) parser.add_argument("--message-size", default=512, type=int) - parser.add_argument("--message-bits", default=10, type=int) + parser.add_argument("--message-bits", default=2, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) parser.add_argument("--number-of-examples", default=500, type=int) @@ -48,7 +48,7 @@ def default_args(**kwargs): num_users = 10 # Number of unique users num_items = 50 # Number of unique items num_categories = 10 # Number of unique categories -num_interactions = 10000 # Number of user-item interactions +num_interactions = 100000 # Number of user-item interactions # Generate random ratings (e.g., between 1 and 5) ratings = np.random.choice(range(1, 3), num_interactions) # Generate random user-item interactions From 3dd2b7c9f2d116aa7308bb1677449e7c3a798a5d Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 18 Dec 2024 10:35:51 +0000 Subject: [PATCH 09/29] run on gpu 6 --- .devcontainer/docker-compose.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 0dccd188..46271d06 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -9,4 +9,5 @@ services: devices: - driver: nvidia capabilities: [gpu] - count: 1 # Assign number of GPUs or use 'all' to assign all available GPUs \ No newline at end of file + # count: 1 # Assign number of GPUs or use 'all' to assign all available GPUs + device_ids: ["6"] \ No newline at end of file From e9bdcd6a605e95756ad34caad8cabc811b563b35 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 18 Dec 2024 10:44:45 +0000 Subject: [PATCH 10/29] add requirments --- requirments.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 requirments.txt diff --git a/requirments.txt b/requirments.txt new file mode 100644 index 00000000..12b86c03 --- /dev/null +++ b/requirments.txt @@ -0,0 +1,7 @@ +numpy +numba +pycuda +scipy +pandas +kagglehub +scikit-learn \ No newline at end of file From da31b30562feb78fd0ed42869c144c3f0dc4a2d6 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 18 Dec 2024 13:27:36 +0000 Subject: [PATCH 11/29] update --- .../applications/RecommendationSystems.py | 166 ++++++++++++++---- 1 file changed, 130 insertions(+), 36 deletions(-) diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index a453d42d..eea88e73 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -11,18 +11,17 @@ def default_args(**kwargs): parser = argparse.ArgumentParser() parser.add_argument("--epochs", default=250, type=int) - parser.add_argument("--number-of-clauses", default=1000, type=int) - parser.add_argument("--T", default=1000, type=int) + parser.add_argument("--number-of-clauses", default=10000, type=int) + parser.add_argument("--T", default=10000, type=int) parser.add_argument("--s", default=10.0, type=float) parser.add_argument("--number-of-state-bits", default=8, type=int) - parser.add_argument("--depth", default=3, type=int) - parser.add_argument("--hypervector-size", default=1024, type=int) - parser.add_argument("--hypervector-bits", default=10, type=int) - parser.add_argument("--message-size", default=512, type=int) - parser.add_argument("--message-bits", default=2, type=int) + parser.add_argument("--depth", default=1, type=int) + parser.add_argument("--hypervector-size", default=4096, type=int) + parser.add_argument("--hypervector-bits", default=256, type=int) + parser.add_argument("--message-size", default=4096, type=int) + parser.add_argument("--message-bits", default=256, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) - parser.add_argument("--number-of-examples", default=500, type=int) parser.add_argument("--max-included-literals", default=4, type=int) args = parser.parse_args() @@ -33,38 +32,133 @@ def default_args(**kwargs): args = default_args() -# print("Creating training data") -# path = kagglehub.dataset_download("karkavelrajaj/amazon-sales-dataset") -# print("Path to dataset files:", path) -# data_file = path + "/amazon.csv" -# data = pd.read_csv(data_file) -# # print("Data preview:", data.head()) -# data = data[['product_id', 'category', 'user_id', 'rating']] +############################# real dataset ######################## + +print("Creating training data") +path = kagglehub.dataset_download("karkavelrajaj/amazon-sales-dataset") +print("Path to dataset files:", path) +data_file = path + "/amazon.csv" +data = pd.read_csv(data_file) +# print("Data preview:", data.head()) +data = data[['product_id', 'category', 'user_id', 'rating']] ############################# artificial dataset ######################## + # Set random seed for reproducibility -np.random.seed(42) -# Define the size of the artificial dataset -num_users = 10 # Number of unique users -num_items = 50 # Number of unique items -num_categories = 10 # Number of unique categories -num_interactions = 100000 # Number of user-item interactions -# Generate random ratings (e.g., between 1 and 5) -ratings = np.random.choice(range(1, 3), num_interactions) -# Generate random user-item interactions -user_ids = np.random.choice(range(num_users), num_interactions) -item_ids = np.random.choice(range(num_items), num_interactions) -categories = np.random.choice(range(num_categories), num_interactions) -# Combine into a DataFrame -data = pd.DataFrame({ - 'user_id': user_ids, - 'product_id': item_ids, - 'category': categories, - 'rating': ratings -}) -print("Artificial Dataset Preview:") -print(data.head()) +# np.random.seed(42) + +########################## ver 1 ############################ + +# num_users = 5 # Number of unique users +# num_items =10 # Number of unique items +# num_categories = 5 # Number of unique categories +# num_interactions = 1000 # Number of user-item interactions +# # Generate random ratings (e.g., between 1 and 5) +# ratings = np.random.choice(range(1, 3), num_interactions) +# # Generate random user-item interactions +# user_ids = np.random.choice(range(num_users), num_interactions) +# item_ids = np.random.choice(range(num_items), num_interactions) +# categories = np.random.choice(range(num_categories), num_interactions) + +# data = pd.DataFrame({ +# 'user_id': user_ids, +# 'product_id': item_ids, +# 'category': categories, +# 'rating': ratings +# }) +# print("Artificial Dataset Preview:") + +########################## ver 2 ############################ + +# Parameters +# num_users = 100 # Number of unique users +# num_items = 50 # Number of unique items +# num_categories = 50 # Number of unique categories +# num_interactions = 1000 # Number of user-item interactions +# noise_ratio = 0.01 # Percentage of noisy interactions + +# # Generate user preferences: each user prefers 1-3 random categories +# user_preferences = { +# user: np.random.choice(range(num_categories), size=np.random.randint(1, 4), replace=False) +# for user in range(num_users) +# } + +# # Assign each item to a category +# item_categories = {item: np.random.choice(range(num_categories)) for item in range(num_items)} + +# # Generate interactions +# user_ids = np.random.choice(range(num_users), num_interactions) +# item_ids = np.random.choice(range(num_items), num_interactions) + +# # Generate ratings based on the pattern +# ratings = [] +# for user, item in zip(user_ids, item_ids): +# item_category = item_categories[item] +# if item_category in user_preferences[user]: +# ratings.append(np.random.choice([3, 4])) # High rating for preferred categories +# else: +# ratings.append(np.random.choice([1, 2])) # Low rating otherwise + +# # Introduce noise +# num_noisy = int(noise_ratio * num_interactions) +# noisy_indices = np.random.choice(range(num_interactions), num_noisy, replace=False) +# for idx in noisy_indices: +# ratings[idx] = np.random.choice(range(1, 6)) # Replace with random rating + +# # Combine into a DataFrame +# data = pd.DataFrame({ +# 'user_id': user_ids, +# 'product_id': item_ids, +# 'category': [item_categories[item] for item in item_ids], +# 'rating': ratings +# }) +# print("Artificial Dataset Preview:") + +########################### ver 3 ############################## + +# Parameters +# num_users = 100 # Number of unique users +# num_items = 50 # Number of unique items +# num_categories = 5 # Number of unique categories +# num_interactions = 10000 # Number of user-item interactions +# noise_ratio = 0.01 # Percentage of noisy interactions + +# # Step 1: Define deterministic user preferences +# user_preferences = {user: user % num_categories for user in range(num_users)} + +# # Step 2: Assign items to categories in a cyclic pattern +# item_categories = {item: item % num_categories for item in range(num_items)} + +# # Step 3: Generate deterministic interactions +# user_ids = np.arange(num_interactions) % num_users # Cycle through users +# item_ids = np.arange(num_interactions) % num_items # Cycle through items + +# # Step 4: Generate ratings based on the pattern +# ratings = [] +# for user, item in zip(user_ids, item_ids): +# preferred_category = user_preferences[user] +# item_category = item_categories[item] +# if item_category == preferred_category: +# ratings.append(5) # High rating for preferred category +# else: +# ratings.append(1) # Low rating otherwise + +# # Step 5: Introduce noise +# num_noisy = int(noise_ratio * num_interactions) +# noisy_indices = np.random.choice(range(num_interactions), num_noisy, replace=False) +# for idx in noisy_indices: +# ratings[idx] = np.random.choice(range(1, 6)) # Replace with random rating + +# # Step 6: Create a DataFrame +# data = pd.DataFrame({ +# 'user_id': user_ids, +# 'product_id': item_ids, +# 'category': [item_categories[item] for item in item_ids], +# 'rating': ratings +# }) + ######################################################################## +print(data.head()) le_user = LabelEncoder() le_item = LabelEncoder() From fababa59963ca02253cdb8ff9a1f9127d228c607 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 18 Dec 2024 13:47:42 +0000 Subject: [PATCH 12/29] expanded ds --- .../applications/RecommendationSystems.py | 34 +++++++++++++++++-- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index eea88e73..db7000fb 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -38,10 +38,38 @@ def default_args(**kwargs): path = kagglehub.dataset_download("karkavelrajaj/amazon-sales-dataset") print("Path to dataset files:", path) data_file = path + "/amazon.csv" -data = pd.read_csv(data_file) +org_data = pd.read_csv(data_file) # print("Data preview:", data.head()) -data = data[['product_id', 'category', 'user_id', 'rating']] - +org_data = org_data[['product_id', 'category', 'user_id', 'rating']] +#################################### expanded +org_data['rating'] = pd.to_numeric(org_data['rating'], errors='coerce') # Coerce invalid values to NaN +org_data.dropna(subset=['rating'], inplace=True) # Drop rows with NaN ratings +org_data['rating'] = org_data['rating'].astype(int) +# Expand the dataset 10 times +data = pd.concat([org_data] * 10, ignore_index=True) + +# Shuffle the expanded dataset +data = data.sample(frac=1, random_state=42).reset_index(drop=True) + +# Add noise +# Define the noise ratio +noise_ratio = 0.1 # 10% noise + +# Select rows to apply noise +num_noisy_rows = int(noise_ratio * len(data)) +noisy_indices = np.random.choice(data.index, size=num_noisy_rows, replace=False) + +# Add noise to ratings +data.loc[noisy_indices, 'rating'] = np.random.choice(range(1, 6), size=num_noisy_rows) + +# Add noise to categories +unique_categories = data['category'].unique() +data.loc[noisy_indices, 'category'] = np.random.choice(unique_categories, size=num_noisy_rows) + +# Print a preview of the noisy and expanded dataset +print("Original data shape:", org_data.shape) +print("Expanded data shape:", data.shape) +print("Data preview:\n", data.head()) ############################# artificial dataset ######################## # Set random seed for reproducibility From 82305ab67649e458064ee4bfb5d732da250b433b Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Thu, 19 Dec 2024 14:57:03 +0000 Subject: [PATCH 13/29] update --- .../applications/RecommendationSystems.py | 2 +- examples/applications/test.ipynb | 226 +++++++++++++++--- 2 files changed, 199 insertions(+), 29 deletions(-) diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index db7000fb..4a1daa46 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -22,7 +22,7 @@ def default_args(**kwargs): parser.add_argument("--message-bits", default=256, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) - parser.add_argument("--max-included-literals", default=4, type=int) + parser.add_argument("--max-included-literals", default=10, type=int) args = parser.parse_args() for key, value in kwargs.items(): diff --git a/examples/applications/test.ipynb b/examples/applications/test.ipynb index 44e02947..7d389f1b 100644 --- a/examples/applications/test.ipynb +++ b/examples/applications/test.ipynb @@ -2,66 +2,138 @@ "cells": [ { "cell_type": "code", - "execution_count": 19, + "execution_count": 1, "metadata": {}, "outputs": [ { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "Creating training data\n", - "Path to dataset files: /root/.cache/kagglehub/datasets/karkavelrajaj/amazon-sales-dataset/versions/1\n", - "Electronics|HomeTheater,TV&Video|Accessories|RemoteControls\n", - "X_train shape: (1172, 3)\n", - "y_train shape: (1172,)\n", - "X_test shape: (293, 3)\n", - "y_test shape: (293,)\n", - "111\n", - "Electronics|HomeTheater,TV&Video|Accessories|RemoteControls\n" + "/usr/local/lib/python3.10/dist-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "usage: ipykernel_launcher.py [-h] [--epochs EPOCHS]\n", + " [--number-of-clauses NUMBER_OF_CLAUSES] [--T T]\n", + " [--s S]\n", + " [--number-of-state-bits NUMBER_OF_STATE_BITS]\n", + " [--depth DEPTH]\n", + " [--hypervector-size HYPERVECTOR_SIZE]\n", + " [--hypervector-bits HYPERVECTOR_BITS]\n", + " [--message-size MESSAGE_SIZE]\n", + " [--message-bits MESSAGE_BITS] [--double-hashing]\n", + " [--noise NOISE]\n", + " [--max-included-literals MAX_INCLUDED_LITERALS]\n", + "ipykernel_launcher.py: error: unrecognized arguments: --f=/root/.local/share/jupyter/runtime/kernel-v3a1695e0e67c01cd0a818bc897e0f886c634ee3d4.json\n" + ] + }, + { + "ename": "SystemExit", + "evalue": "2", + "output_type": "error", + "traceback": [ + "An exception has occurred, use %tb to see the full traceback.\n", + "\u001b[0;31mSystemExit\u001b[0m\u001b[0;31m:\u001b[0m 2\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/root/.local/lib/python3.10/site-packages/IPython/core/interactiveshell.py:3585: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\n", + " warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\n" ] } ], "source": [ "from GraphTsetlinMachine.graphs import Graphs\n", - "import numpy as np\n", - "from scipy.sparse import csr_matrix\n", "from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine\n", "from time import time\n", "import argparse\n", - "import random\n", "import pandas as pd\n", + "import numpy as np\n", "import kagglehub\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.preprocessing import LabelEncoder\n", "\n", + "def default_args(**kwargs):\n", + " parser = argparse.ArgumentParser()\n", + " parser.add_argument(\"--epochs\", default=250, type=int)\n", + " parser.add_argument(\"--number-of-clauses\", default=10000, type=int)\n", + " parser.add_argument(\"--T\", default=10000, type=int)\n", + " parser.add_argument(\"--s\", default=10.0, type=float)\n", + " parser.add_argument(\"--number-of-state-bits\", default=8, type=int)\n", + " parser.add_argument(\"--depth\", default=1, type=int)\n", + " parser.add_argument(\"--hypervector-size\", default=4096, type=int)\n", + " parser.add_argument(\"--hypervector-bits\", default=256, type=int)\n", + " parser.add_argument(\"--message-size\", default=4096, type=int)\n", + " parser.add_argument(\"--message-bits\", default=256, type=int)\n", + " parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true')\n", + " parser.add_argument(\"--noise\", default=0.01, type=float)\n", + " parser.add_argument(\"--max-included-literals\", default=10, type=int)\n", + "\n", + " args = parser.parse_args()\n", + " for key, value in kwargs.items():\n", + " if key in args.__dict__:\n", + " setattr(args, key, value)\n", + " return args\n", + "\n", + "args = default_args()\n", + "\n", + "############################# real dataset ########################\n", "\n", "print(\"Creating training data\")\n", "path = kagglehub.dataset_download(\"karkavelrajaj/amazon-sales-dataset\")\n", "print(\"Path to dataset files:\", path)\n", - "data_file = path + \"/amazon.csv\" # Adjust this path if necessary\n", - "data = pd.read_csv(data_file)\n", + "data_file = path + \"/amazon.csv\" \n", + "org_data = pd.read_csv(data_file)\n", "# print(\"Data preview:\", data.head())\n", - "data = data[['product_id', 'category', 'user_id', 'rating']]\n", - "print(data['category'][100])\n", + "org_data = org_data[['product_id', 'category', 'user_id', 'rating']]\n", + "#################################### expanded \n", + "org_data['rating'] = pd.to_numeric(org_data['rating'], errors='coerce') # Coerce invalid values to NaN\n", + "org_data.dropna(subset=['rating'], inplace=True) # Drop rows with NaN ratings\n", + "org_data['rating'] = org_data['rating'].astype(int)\n", + "# Expand the dataset 10 times\n", + "data = pd.concat([org_data] * 10, ignore_index=True)\n", + "\n", + "# Shuffle the expanded dataset\n", + "data = data.sample(frac=1, random_state=42).reset_index(drop=True)\n", + "\n", + "# Add noise\n", + "# Define the noise ratio\n", + "noise_ratio = 0.1 # 10% noise\n", + "\n", + "# Select rows to apply noise\n", + "num_noisy_rows = int(noise_ratio * len(data))\n", + "noisy_indices = np.random.choice(data.index, size=num_noisy_rows, replace=False)\n", + "\n", + "# Add noise to ratings\n", + "data.loc[noisy_indices, 'rating'] = np.random.choice(range(1, 6), size=num_noisy_rows)\n", + "\n", + "# Add noise to categories\n", + "unique_categories = data['category'].unique()\n", + "data.loc[noisy_indices, 'category'] = np.random.choice(unique_categories, size=num_noisy_rows)\n", + "\n", + "# Print a preview of the noisy and expanded dataset\n", + "print(\"Original data shape:\", org_data.shape)\n", + "print(\"Expanded data shape:\", data.shape)\n", + "print(\"Data preview:\\n\", data.head())\n", + "\n", + "print(data.head())\n", " \n", - "# Step 2: Encode user_id, product_id, and category with LabelEncoder\n", - "# This converts string identifiers into unique integer values\n", "le_user = LabelEncoder()\n", "le_item = LabelEncoder()\n", "le_category = LabelEncoder()\n", + "le_rating = LabelEncoder() \n", "\n", "data['user_id'] = le_user.fit_transform(data['user_id'])\n", "data['product_id'] = le_item.fit_transform(data['product_id'])\n", "data['category'] = le_category.fit_transform(data['category'])\n", + "data['rating'] = le_rating.fit_transform(data['rating'])\n", "\n", - "# Step 3: Prepare X (features) and y (labels)\n", - "x = data[['user_id', 'product_id', 'category']].values # Features: [user, item, category]\n", - "y = data['rating'].values # Labels: rating\n", + "x = data[['user_id', 'product_id', 'category']].values \n", + "y = data['rating'].values \n", "\n", - "# Step 4: Split the data into training and test sets\n", "X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42)\n", "\n", - "# Display the shapes to verify the split\n", "print(\"X_train shape:\", X_train.shape)\n", "print(\"y_train shape:\", Y_train.shape)\n", "print(\"X_test shape:\", X_test.shape)\n", @@ -71,9 +143,107 @@ "items = data['product_id'].unique()\n", "categories = data['category'].unique()\n", "\n", - "print(categories[100])\n", - "original_user_id = le_category.inverse_transform([data['category'][100]])[0]\n", - "print(original_user_id)" + "# Initialize Graphs with symbols for GTM\n", + "number_of_nodes = 3\n", + "symbols = []\n", + "symbols = [\"U_\" + str(u) for u in users] + [\"I_\" + str(i) for i in items] + [\"C_\" + str(c) for c in categories] \n", + "print(len(symbols))\n", + "# Train data\n", + "graphs_train = Graphs(\n", + " X_train.shape[0],\n", + " symbols=symbols,\n", + " hypervector_size=args.hypervector_size,\n", + " hypervector_bits=args.hypervector_bits,\n", + " double_hashing = args.double_hashing\n", + ")\n", + "for graph_id in range(X_train.shape[0]):\n", + " graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes)\n", + "graphs_train.prepare_node_configuration()\n", + "for graph_id in range(X_train.shape[0]):\n", + " for node_id in range(graphs_train.number_of_graph_nodes[graph_id]):\n", + " number_of_edges = 2 if node_id > 0 and node_id < graphs_train.number_of_graph_nodes[graph_id]-1 else 1\n", + " if node_id == 0:\n", + " graphs_train.add_graph_node(graph_id, \"User\", number_of_edges)\n", + " elif node_id == 1:\n", + " graphs_train.add_graph_node(graph_id, \"Item\", number_of_edges)\n", + " else:\n", + " graphs_train.add_graph_node(graph_id, \"Category\", number_of_edges)\n", + "graphs_train.prepare_edge_configuration()\n", + "for graph_id in range(X_train.shape[0]):\n", + " for node_id in range(graphs_train.number_of_graph_nodes[graph_id]):\n", + " if node_id == 0:\n", + " graphs_train.add_graph_node_edge(graph_id, \"User\", \"Item\", \"UserItem\")\n", + " \n", + " if node_id == 1:\n", + " graphs_train.add_graph_node_edge(graph_id, \"Item\", \"Category\", \"ItemCategory\")\n", + " graphs_train.add_graph_node_edge(graph_id, \"Item\", \"User\", \"ItemUser\")\n", + " \n", + " if node_id == 2:\n", + " graphs_train.add_graph_node_edge(graph_id, \"Category\", \"Item\", \"CatrgoryItem\")\n", + "\n", + " graphs_train.add_graph_node_property(graph_id, \"User\", \"U_\" + str(X_train[graph_id][0]))\n", + " graphs_train.add_graph_node_property(graph_id, \"Item\", \"I_\" + str(X_train[graph_id][1]))\n", + " graphs_train.add_graph_node_property(graph_id, \"Category\", \"C_\" + str(X_train[graph_id][2]))\n", + "graphs_train.encode()\n", + "print(\"Training data produced\")\n", + "\n", + "# Test data\n", + "graphs_test = Graphs(X_test.shape[0], init_with=graphs_train)\n", + "for graph_id in range(X_test.shape[0]):\n", + " graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes)\n", + "graphs_test.prepare_node_configuration()\n", + "for graph_id in range(X_test.shape[0]):\n", + " for node_id in range(graphs_test.number_of_graph_nodes[graph_id]):\n", + " number_of_edges = 2 if node_id > 0 and node_id < graphs_test.number_of_graph_nodes[graph_id]-1 else 1\n", + " if node_id == 0:\n", + " graphs_test.add_graph_node(graph_id, \"User\", number_of_edges)\n", + " elif node_id == 1:\n", + " graphs_test.add_graph_node(graph_id, \"Item\", number_of_edges)\n", + " else:\n", + " graphs_test.add_graph_node(graph_id, \"Category\", number_of_edges)\n", + "graphs_test.prepare_edge_configuration()\n", + "for graph_id in range(X_test.shape[0]):\n", + " for node_id in range(graphs_test.number_of_graph_nodes[graph_id]):\n", + " if node_id == 0:\n", + " graphs_test.add_graph_node_edge(graph_id, \"User\", \"Item\", \"UserItem\")\n", + " \n", + " if node_id == 1:\n", + " graphs_test.add_graph_node_edge(graph_id, \"Item\", \"Category\", \"ItemCategory\")\n", + " graphs_test.add_graph_node_edge(graph_id, \"Item\", \"User\", \"ItemUser\")\n", + " \n", + " if node_id == 2:\n", + " graphs_test.add_graph_node_edge(graph_id, \"Category\", \"Item\", \"CatrgoryItem\")\n", + "\n", + " graphs_test.add_graph_node_property(graph_id, \"User\", \"U_\" + str(X_test[graph_id][0]))\n", + " graphs_test.add_graph_node_property(graph_id, \"Item\", \"I_\" + str(X_test[graph_id][1]))\n", + " graphs_test.add_graph_node_property(graph_id, \"Category\", \"C_\" + str(X_test[graph_id][2]))\n", + "graphs_test.encode()\n", + "print(\"Testing data produced\")\n", + "\n", + "tm = MultiClassGraphTsetlinMachine(\n", + " args.number_of_clauses,\n", + " args.T,\n", + " args.s,\n", + " number_of_state_bits = args.number_of_state_bits,\n", + " depth=args.depth,\n", + " message_size=args.message_size,\n", + " message_bits=args.message_bits,\n", + " max_included_literals=args.max_included_literals,\n", + " double_hashing = args.double_hashing\n", + ")\n", + "\n", + "for i in range(args.epochs):\n", + " start_training = time()\n", + " tm.fit(graphs_train, Y_train, epochs=1, incremental=True)\n", + " stop_training = time()\n", + "\n", + " start_testing = time()\n", + " result_test = 100*(tm.predict(graphs_test) == Y_test).mean()\n", + " stop_testing = time()\n", + "\n", + " result_train = 100*(tm.predict(graphs_train) == Y_train).mean()\n", + "\n", + " print(\"%d %.2f %.2f %.2f %.2f\" % (i, result_train, result_test, stop_training-start_training, stop_testing-start_testing))" ] } ], From 218a96f11d24076ac1076c4d153b280b778700a0 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Fri, 20 Dec 2024 10:16:33 +0000 Subject: [PATCH 14/29] before add example no --- .../applications/RecommendationSystems.py | 47 ++++++++++--------- examples/applications/test.ipynb | 2 +- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py index 4a1daa46..2b57ecc4 100644 --- a/examples/applications/RecommendationSystems.py +++ b/examples/applications/RecommendationSystems.py @@ -10,19 +10,20 @@ def default_args(**kwargs): parser = argparse.ArgumentParser() - parser.add_argument("--epochs", default=250, type=int) + parser.add_argument("--epochs", default=10, type=int) parser.add_argument("--number-of-clauses", default=10000, type=int) parser.add_argument("--T", default=10000, type=int) parser.add_argument("--s", default=10.0, type=float) parser.add_argument("--number-of-state-bits", default=8, type=int) - parser.add_argument("--depth", default=1, type=int) + parser.add_argument("--depth", default=3, type=int) parser.add_argument("--hypervector-size", default=4096, type=int) parser.add_argument("--hypervector-bits", default=256, type=int) - parser.add_argument("--message-size", default=4096, type=int) - parser.add_argument("--message-bits", default=256, type=int) + parser.add_argument("--message-size", default=256, type=int) + parser.add_argument("--message-bits", default=2, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) parser.add_argument("--max-included-literals", default=10, type=int) + parser.add_argument("--number-of-examples", default=1000, type=int) args = parser.parse_args() for key, value in kwargs.items(): @@ -314,25 +315,25 @@ def default_args(**kwargs): print("%d %.2f %.2f %.2f %.2f" % (i, result_train, result_test, stop_training-start_training, stop_testing-start_testing)) -# weights = tm.get_state()[1].reshape(2, -1) -# for i in range(tm.number_of_clauses): -# print("Clause #%d W:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') -# l = [] -# for k in range(args.hypervector_size * 2): -# if tm.ta_action(0, i, k): -# if k < args.hypervector_size: -# l.append("x%d" % (k)) -# else: -# l.append("NOT x%d" % (k - args.hypervector_size)) - -# for k in range(args.message_size * 2): -# if tm.ta_action(1, i, k): -# if k < args.message_size: -# l.append("c%d" % (k)) -# else: -# l.append("NOT c%d" % (k - args.message_size)) - -# print(" AND ".join(l)) +weights = tm.get_state()[1].reshape(2, -1) +for i in range(tm.number_of_clauses): + print("Clause #%d W:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') + l = [] + for k in range(args.hypervector_size * 2): + if tm.ta_action(0, i, k): + if k < args.hypervector_size: + l.append("x%d" % (k)) + else: + l.append("NOT x%d" % (k - args.hypervector_size)) + + for k in range(args.message_size * 2): + if tm.ta_action(1, i, k): + if k < args.message_size: + l.append("c%d" % (k)) + else: + l.append("NOT c%d" % (k - args.message_size)) + + print(" AND ".join(l)) # print(graphs_test.hypervectors) # print(tm.hypervectors) diff --git a/examples/applications/test.ipynb b/examples/applications/test.ipynb index 7d389f1b..1465bf14 100644 --- a/examples/applications/test.ipynb +++ b/examples/applications/test.ipynb @@ -22,7 +22,7 @@ " [--message-bits MESSAGE_BITS] [--double-hashing]\n", " [--noise NOISE]\n", " [--max-included-literals MAX_INCLUDED_LITERALS]\n", - "ipykernel_launcher.py: error: unrecognized arguments: --f=/root/.local/share/jupyter/runtime/kernel-v3a1695e0e67c01cd0a818bc897e0f886c634ee3d4.json\n" + "ipykernel_launcher.py: error: unrecognized arguments: --f=/root/.local/share/jupyter/runtime/kernel-v306f6e67794e909fd94dbef768cafee2e613728cc.json\n" ] }, { From 799493fd1d3241fb0a6e5271bbaeaecb8c9271cb Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Fri, 20 Dec 2024 11:49:12 +0000 Subject: [PATCH 15/29] orgnizing files --- .../applications/RecommendationSystems.py | 340 ------------------ .../prepare_dataset.cpython-310.pyc | Bin 0 -> 1415 bytes .../recommendation/main_products.py | 178 +++++++++ .../recommendation/prepare_dataset.py | 145 ++++++++ 4 files changed, 323 insertions(+), 340 deletions(-) delete mode 100644 examples/applications/RecommendationSystems.py create mode 100644 examples/applications/recommendation/__pycache__/prepare_dataset.cpython-310.pyc create mode 100644 examples/applications/recommendation/main_products.py create mode 100644 examples/applications/recommendation/prepare_dataset.py diff --git a/examples/applications/RecommendationSystems.py b/examples/applications/RecommendationSystems.py deleted file mode 100644 index 2b57ecc4..00000000 --- a/examples/applications/RecommendationSystems.py +++ /dev/null @@ -1,340 +0,0 @@ -from GraphTsetlinMachine.graphs import Graphs -from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine -from time import time -import argparse -import pandas as pd -import numpy as np -import kagglehub -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import LabelEncoder - -def default_args(**kwargs): - parser = argparse.ArgumentParser() - parser.add_argument("--epochs", default=10, type=int) - parser.add_argument("--number-of-clauses", default=10000, type=int) - parser.add_argument("--T", default=10000, type=int) - parser.add_argument("--s", default=10.0, type=float) - parser.add_argument("--number-of-state-bits", default=8, type=int) - parser.add_argument("--depth", default=3, type=int) - parser.add_argument("--hypervector-size", default=4096, type=int) - parser.add_argument("--hypervector-bits", default=256, type=int) - parser.add_argument("--message-size", default=256, type=int) - parser.add_argument("--message-bits", default=2, type=int) - parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') - parser.add_argument("--noise", default=0.01, type=float) - parser.add_argument("--max-included-literals", default=10, type=int) - parser.add_argument("--number-of-examples", default=1000, type=int) - - args = parser.parse_args() - for key, value in kwargs.items(): - if key in args.__dict__: - setattr(args, key, value) - return args - -args = default_args() - -############################# real dataset ######################## - -print("Creating training data") -path = kagglehub.dataset_download("karkavelrajaj/amazon-sales-dataset") -print("Path to dataset files:", path) -data_file = path + "/amazon.csv" -org_data = pd.read_csv(data_file) -# print("Data preview:", data.head()) -org_data = org_data[['product_id', 'category', 'user_id', 'rating']] -#################################### expanded -org_data['rating'] = pd.to_numeric(org_data['rating'], errors='coerce') # Coerce invalid values to NaN -org_data.dropna(subset=['rating'], inplace=True) # Drop rows with NaN ratings -org_data['rating'] = org_data['rating'].astype(int) -# Expand the dataset 10 times -data = pd.concat([org_data] * 10, ignore_index=True) - -# Shuffle the expanded dataset -data = data.sample(frac=1, random_state=42).reset_index(drop=True) - -# Add noise -# Define the noise ratio -noise_ratio = 0.1 # 10% noise - -# Select rows to apply noise -num_noisy_rows = int(noise_ratio * len(data)) -noisy_indices = np.random.choice(data.index, size=num_noisy_rows, replace=False) - -# Add noise to ratings -data.loc[noisy_indices, 'rating'] = np.random.choice(range(1, 6), size=num_noisy_rows) - -# Add noise to categories -unique_categories = data['category'].unique() -data.loc[noisy_indices, 'category'] = np.random.choice(unique_categories, size=num_noisy_rows) - -# Print a preview of the noisy and expanded dataset -print("Original data shape:", org_data.shape) -print("Expanded data shape:", data.shape) -print("Data preview:\n", data.head()) -############################# artificial dataset ######################## - -# Set random seed for reproducibility -# np.random.seed(42) - -########################## ver 1 ############################ - -# num_users = 5 # Number of unique users -# num_items =10 # Number of unique items -# num_categories = 5 # Number of unique categories -# num_interactions = 1000 # Number of user-item interactions -# # Generate random ratings (e.g., between 1 and 5) -# ratings = np.random.choice(range(1, 3), num_interactions) -# # Generate random user-item interactions -# user_ids = np.random.choice(range(num_users), num_interactions) -# item_ids = np.random.choice(range(num_items), num_interactions) -# categories = np.random.choice(range(num_categories), num_interactions) - -# data = pd.DataFrame({ -# 'user_id': user_ids, -# 'product_id': item_ids, -# 'category': categories, -# 'rating': ratings -# }) -# print("Artificial Dataset Preview:") - -########################## ver 2 ############################ - -# Parameters -# num_users = 100 # Number of unique users -# num_items = 50 # Number of unique items -# num_categories = 50 # Number of unique categories -# num_interactions = 1000 # Number of user-item interactions -# noise_ratio = 0.01 # Percentage of noisy interactions - -# # Generate user preferences: each user prefers 1-3 random categories -# user_preferences = { -# user: np.random.choice(range(num_categories), size=np.random.randint(1, 4), replace=False) -# for user in range(num_users) -# } - -# # Assign each item to a category -# item_categories = {item: np.random.choice(range(num_categories)) for item in range(num_items)} - -# # Generate interactions -# user_ids = np.random.choice(range(num_users), num_interactions) -# item_ids = np.random.choice(range(num_items), num_interactions) - -# # Generate ratings based on the pattern -# ratings = [] -# for user, item in zip(user_ids, item_ids): -# item_category = item_categories[item] -# if item_category in user_preferences[user]: -# ratings.append(np.random.choice([3, 4])) # High rating for preferred categories -# else: -# ratings.append(np.random.choice([1, 2])) # Low rating otherwise - -# # Introduce noise -# num_noisy = int(noise_ratio * num_interactions) -# noisy_indices = np.random.choice(range(num_interactions), num_noisy, replace=False) -# for idx in noisy_indices: -# ratings[idx] = np.random.choice(range(1, 6)) # Replace with random rating - -# # Combine into a DataFrame -# data = pd.DataFrame({ -# 'user_id': user_ids, -# 'product_id': item_ids, -# 'category': [item_categories[item] for item in item_ids], -# 'rating': ratings -# }) -# print("Artificial Dataset Preview:") - -########################### ver 3 ############################## - -# Parameters -# num_users = 100 # Number of unique users -# num_items = 50 # Number of unique items -# num_categories = 5 # Number of unique categories -# num_interactions = 10000 # Number of user-item interactions -# noise_ratio = 0.01 # Percentage of noisy interactions - -# # Step 1: Define deterministic user preferences -# user_preferences = {user: user % num_categories for user in range(num_users)} - -# # Step 2: Assign items to categories in a cyclic pattern -# item_categories = {item: item % num_categories for item in range(num_items)} - -# # Step 3: Generate deterministic interactions -# user_ids = np.arange(num_interactions) % num_users # Cycle through users -# item_ids = np.arange(num_interactions) % num_items # Cycle through items - -# # Step 4: Generate ratings based on the pattern -# ratings = [] -# for user, item in zip(user_ids, item_ids): -# preferred_category = user_preferences[user] -# item_category = item_categories[item] -# if item_category == preferred_category: -# ratings.append(5) # High rating for preferred category -# else: -# ratings.append(1) # Low rating otherwise - -# # Step 5: Introduce noise -# num_noisy = int(noise_ratio * num_interactions) -# noisy_indices = np.random.choice(range(num_interactions), num_noisy, replace=False) -# for idx in noisy_indices: -# ratings[idx] = np.random.choice(range(1, 6)) # Replace with random rating - -# # Step 6: Create a DataFrame -# data = pd.DataFrame({ -# 'user_id': user_ids, -# 'product_id': item_ids, -# 'category': [item_categories[item] for item in item_ids], -# 'rating': ratings -# }) - -######################################################################## -print(data.head()) - -le_user = LabelEncoder() -le_item = LabelEncoder() -le_category = LabelEncoder() -le_rating = LabelEncoder() - -data['user_id'] = le_user.fit_transform(data['user_id']) -data['product_id'] = le_item.fit_transform(data['product_id']) -data['category'] = le_category.fit_transform(data['category']) -data['rating'] = le_rating.fit_transform(data['rating']) - -x = data[['user_id', 'product_id', 'category']].values -y = data['rating'].values - -X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42) - -print("X_train shape:", X_train.shape) -print("y_train shape:", Y_train.shape) -print("X_test shape:", X_test.shape) -print("y_test shape:", Y_test.shape) - -users = data['user_id'].unique() -items = data['product_id'].unique() -categories = data['category'].unique() - -# Initialize Graphs with symbols for GTM -number_of_nodes = 3 -symbols = [] -symbols = ["U_" + str(u) for u in users] + ["I_" + str(i) for i in items] + ["C_" + str(c) for c in categories] -print(len(symbols)) -# Train data -graphs_train = Graphs( - X_train.shape[0], - symbols=symbols, - hypervector_size=args.hypervector_size, - hypervector_bits=args.hypervector_bits, - double_hashing = args.double_hashing -) -for graph_id in range(X_train.shape[0]): - graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes) -graphs_train.prepare_node_configuration() -for graph_id in range(X_train.shape[0]): - for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): - number_of_edges = 2 if node_id > 0 and node_id < graphs_train.number_of_graph_nodes[graph_id]-1 else 1 - if node_id == 0: - graphs_train.add_graph_node(graph_id, "User", number_of_edges) - elif node_id == 1: - graphs_train.add_graph_node(graph_id, "Item", number_of_edges) - else: - graphs_train.add_graph_node(graph_id, "Category", number_of_edges) -graphs_train.prepare_edge_configuration() -for graph_id in range(X_train.shape[0]): - for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): - if node_id == 0: - graphs_train.add_graph_node_edge(graph_id, "User", "Item", "UserItem") - - if node_id == 1: - graphs_train.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") - graphs_train.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") - - if node_id == 2: - graphs_train.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") - - graphs_train.add_graph_node_property(graph_id, "User", "U_" + str(X_train[graph_id][0])) - graphs_train.add_graph_node_property(graph_id, "Item", "I_" + str(X_train[graph_id][1])) - graphs_train.add_graph_node_property(graph_id, "Category", "C_" + str(X_train[graph_id][2])) -graphs_train.encode() -print("Training data produced") - -# Test data -graphs_test = Graphs(X_test.shape[0], init_with=graphs_train) -for graph_id in range(X_test.shape[0]): - graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes) -graphs_test.prepare_node_configuration() -for graph_id in range(X_test.shape[0]): - for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): - number_of_edges = 2 if node_id > 0 and node_id < graphs_test.number_of_graph_nodes[graph_id]-1 else 1 - if node_id == 0: - graphs_test.add_graph_node(graph_id, "User", number_of_edges) - elif node_id == 1: - graphs_test.add_graph_node(graph_id, "Item", number_of_edges) - else: - graphs_test.add_graph_node(graph_id, "Category", number_of_edges) -graphs_test.prepare_edge_configuration() -for graph_id in range(X_test.shape[0]): - for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): - if node_id == 0: - graphs_test.add_graph_node_edge(graph_id, "User", "Item", "UserItem") - - if node_id == 1: - graphs_test.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") - graphs_test.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") - - if node_id == 2: - graphs_test.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") - - graphs_test.add_graph_node_property(graph_id, "User", "U_" + str(X_test[graph_id][0])) - graphs_test.add_graph_node_property(graph_id, "Item", "I_" + str(X_test[graph_id][1])) - graphs_test.add_graph_node_property(graph_id, "Category", "C_" + str(X_test[graph_id][2])) -graphs_test.encode() -print("Testing data produced") - -tm = MultiClassGraphTsetlinMachine( - args.number_of_clauses, - args.T, - args.s, - number_of_state_bits = args.number_of_state_bits, - depth=args.depth, - message_size=args.message_size, - message_bits=args.message_bits, - max_included_literals=args.max_included_literals, - double_hashing = args.double_hashing -) - -for i in range(args.epochs): - start_training = time() - tm.fit(graphs_train, Y_train, epochs=1, incremental=True) - stop_training = time() - - start_testing = time() - result_test = 100*(tm.predict(graphs_test) == Y_test).mean() - stop_testing = time() - - result_train = 100*(tm.predict(graphs_train) == Y_train).mean() - - print("%d %.2f %.2f %.2f %.2f" % (i, result_train, result_test, stop_training-start_training, stop_testing-start_testing)) - -weights = tm.get_state()[1].reshape(2, -1) -for i in range(tm.number_of_clauses): - print("Clause #%d W:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') - l = [] - for k in range(args.hypervector_size * 2): - if tm.ta_action(0, i, k): - if k < args.hypervector_size: - l.append("x%d" % (k)) - else: - l.append("NOT x%d" % (k - args.hypervector_size)) - - for k in range(args.message_size * 2): - if tm.ta_action(1, i, k): - if k < args.message_size: - l.append("c%d" % (k)) - else: - l.append("NOT c%d" % (k - args.message_size)) - - print(" AND ".join(l)) - -# print(graphs_test.hypervectors) -# print(tm.hypervectors) -# print(graphs_test.edge_type_id) \ No newline at end of file diff --git a/examples/applications/recommendation/__pycache__/prepare_dataset.cpython-310.pyc b/examples/applications/recommendation/__pycache__/prepare_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..334918248b0038345364fc10337498871cad89c2 GIT binary patch literal 1415 zcmZ8hOK&SR7_~i*q)D2lZBzsjC}Kqr_jbjmssQQ2T^1yekh&T9j(w+-PR4`n^bsYi zvds^GRm+ZFfcPukvg)1{E6z75QZKf~kI%>7^LWheZjYh;@uMy-Q^x+H&GRO(`3j%= z41;2dSL~d(vp5&+9G%BXsOV>Qo~T$Qm{Y~SXVdf%H`rND<9{u}g1%Sy+-nRT8t*t~ zp5F^49{8Fh(Td#(&Q?st@3A%a(S5w)I1@NaR$`X4r=nmg{f)oRV5@VsJbGod^krR4 zyp?553njf=etaqIrM%LWl`}azmUFo@^$RB}?OqVTX}>)BPWp?9H|-4wCT~j|pDg=N z@Rzx}T8_W9Wl`3$YEct+Asc-%jYF@oMlEuml`3>|>2+c3ZRjkVwj@ez>(w+3Nn^{} zhn-7V6qUYMya}Tx2Qp=@>&i$KL<1!VL}f4;lGeyH3ULDm!w$hS@-1|YEi&5v&g>rY z_|NIFY?@rM|IVX>TSfV3u%AD*IW%i2av(4<1 zA|BRNr`sR^C`1mQEh!DTEG7keQYh~t{^WYTlv9hKqe*J~opI^QvoGX_)bWqM#FV$ko z?T45wJ+kHao0|qYw0h3iGdfsI9Vy*Pe!$1#NW{@djQKb3mq@kWxOSYm;R= qq%?t7&8__e>unwh`OGpIu&w#-=^X!wN`FD6c0__Mo)@|p^!^25{fROF literal 0 HcmV?d00001 diff --git a/examples/applications/recommendation/main_products.py b/examples/applications/recommendation/main_products.py new file mode 100644 index 00000000..03d5b641 --- /dev/null +++ b/examples/applications/recommendation/main_products.py @@ -0,0 +1,178 @@ +from GraphTsetlinMachine.graphs import Graphs +from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine +from time import time +import argparse +import pandas as pd +import numpy as np +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import LabelEncoder +import prepare_dataset + +def default_args(**kwargs): + parser = argparse.ArgumentParser() + parser.add_argument("--epochs", default=10, type=int) + parser.add_argument("--number-of-clauses", default=1000, type=int) + parser.add_argument("--T", default=10000, type=int) + parser.add_argument("--s", default=10.0, type=float) + parser.add_argument("--number-of-state-bits", default=8, type=int) + parser.add_argument("--depth", default=3, type=int) + parser.add_argument("--hypervector-size", default=4096, type=int) + parser.add_argument("--hypervector-bits", default=256, type=int) + parser.add_argument("--message-size", default=256, type=int) + parser.add_argument("--message-bits", default=2, type=int) + parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') + parser.add_argument("--noise", default=0.01, type=float) + parser.add_argument("--max-included-literals", default=10, type=int) + + args = parser.parse_args() + for key, value in kwargs.items(): + if key in args.__dict__: + setattr(args, key, value) + return args +args = default_args() + +data = prepare_dataset.aug_amazon_products() +print(data.head()) +le_user = LabelEncoder() +le_item = LabelEncoder() +le_category = LabelEncoder() +le_rating = LabelEncoder() +data['user_id'] = le_user.fit_transform(data['user_id']) +data['product_id'] = le_item.fit_transform(data['product_id']) +data['category'] = le_category.fit_transform(data['category']) +data['rating'] = le_rating.fit_transform(data['rating']) +x = data[['user_id', 'product_id', 'category']].values +y = data['rating'].values +X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42) +print("X_train shape:", X_train.shape) +print("y_train shape:", Y_train.shape) +print("X_test shape:", X_test.shape) +print("y_test shape:", Y_test.shape) +users = data['user_id'].unique() +items = data['product_id'].unique() +categories = data['category'].unique() +# Initialize Graphs with symbols for GTM +number_of_nodes = 3 +symbols = [] +symbols = ["U_" + str(u) for u in users] + ["I_" + str(i) for i in items] + ["C_" + str(c) for c in categories] +print("Symbols: ",len(symbols)) + +# Train data +graphs_train = Graphs( + X_train.shape[0], + symbols=symbols, + hypervector_size=args.hypervector_size, + hypervector_bits=args.hypervector_bits, + double_hashing = args.double_hashing +) +for graph_id in range(X_train.shape[0]): + graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes) +graphs_train.prepare_node_configuration() +for graph_id in range(X_train.shape[0]): + for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): + number_of_edges = 2 if node_id > 0 and node_id < graphs_train.number_of_graph_nodes[graph_id]-1 else 1 + if node_id == 0: + graphs_train.add_graph_node(graph_id, "User", number_of_edges) + elif node_id == 1: + graphs_train.add_graph_node(graph_id, "Item", number_of_edges) + else: + graphs_train.add_graph_node(graph_id, "Category", number_of_edges) +graphs_train.prepare_edge_configuration() +for graph_id in range(X_train.shape[0]): + for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): + if node_id == 0: + graphs_train.add_graph_node_edge(graph_id, "User", "Item", "UserItem") + + if node_id == 1: + graphs_train.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") + graphs_train.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") + + if node_id == 2: + graphs_train.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") + + graphs_train.add_graph_node_property(graph_id, "User", "U_" + str(X_train[graph_id][0])) + graphs_train.add_graph_node_property(graph_id, "Item", "I_" + str(X_train[graph_id][1])) + graphs_train.add_graph_node_property(graph_id, "Category", "C_" + str(X_train[graph_id][2])) +graphs_train.encode() +print("Training data produced") + +# Test data +graphs_test = Graphs(X_test.shape[0], init_with=graphs_train) +for graph_id in range(X_test.shape[0]): + graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes) +graphs_test.prepare_node_configuration() +for graph_id in range(X_test.shape[0]): + for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): + number_of_edges = 2 if node_id > 0 and node_id < graphs_test.number_of_graph_nodes[graph_id]-1 else 1 + if node_id == 0: + graphs_test.add_graph_node(graph_id, "User", number_of_edges) + elif node_id == 1: + graphs_test.add_graph_node(graph_id, "Item", number_of_edges) + else: + graphs_test.add_graph_node(graph_id, "Category", number_of_edges) +graphs_test.prepare_edge_configuration() +for graph_id in range(X_test.shape[0]): + for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): + if node_id == 0: + graphs_test.add_graph_node_edge(graph_id, "User", "Item", "UserItem") + + if node_id == 1: + graphs_test.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") + graphs_test.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") + + if node_id == 2: + graphs_test.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") + + graphs_test.add_graph_node_property(graph_id, "User", "U_" + str(X_test[graph_id][0])) + graphs_test.add_graph_node_property(graph_id, "Item", "I_" + str(X_test[graph_id][1])) + graphs_test.add_graph_node_property(graph_id, "Category", "C_" + str(X_test[graph_id][2])) +graphs_test.encode() +print("Testing data produced") + +tm = MultiClassGraphTsetlinMachine( + args.number_of_clauses, + args.T, + args.s, + number_of_state_bits = args.number_of_state_bits, + depth=args.depth, + message_size=args.message_size, + message_bits=args.message_bits, + max_included_literals=args.max_included_literals, + double_hashing = args.double_hashing +) + +for i in range(args.epochs): + start_training = time() + tm.fit(graphs_train, Y_train, epochs=1, incremental=True) + stop_training = time() + + start_testing = time() + result_test = 100*(tm.predict(graphs_test) == Y_test).mean() + stop_testing = time() + + result_train = 100*(tm.predict(graphs_train) == Y_train).mean() + print("%d %.2f %.2f %.2f %.2f" % (i, result_train, result_test, stop_training-start_training, stop_testing-start_testing)) + +# weights = tm.get_state()[1].reshape(2, -1) +# for i in range(tm.number_of_clauses): +# print("Clause #%d W:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') +# l = [] +# for k in range(args.hypervector_size * 2): +# if tm.ta_action(0, i, k): +# if k < args.hypervector_size: +# l.append("x%d" % (k)) +# else: +# l.append("NOT x%d" % (k - args.hypervector_size)) + +# for k in range(args.message_size * 2): +# if tm.ta_action(1, i, k): +# if k < args.message_size: +# l.append("c%d" % (k)) +# else: +# l.append("NOT c%d" % (k - args.message_size)) + +# print(" AND ".join(l)) + +# print(graphs_test.hypervectors) +# print(tm.hypervectors) +# print(graphs_test.edge_type_id) \ No newline at end of file diff --git a/examples/applications/recommendation/prepare_dataset.py b/examples/applications/recommendation/prepare_dataset.py new file mode 100644 index 00000000..582b569d --- /dev/null +++ b/examples/applications/recommendation/prepare_dataset.py @@ -0,0 +1,145 @@ +import pandas as pd +import kagglehub +import numpy as np + +np.random.seed(42) + +def amazon_products(): + print("Creating training data") + path = kagglehub.dataset_download("karkavelrajaj/amazon-sales-dataset") + print("Path to dataset files:", path) + data_file = path + "/amazon.csv" + org_data = pd.read_csv(data_file) + print("Original data shape:", org_data.shape) + return org_data[['product_id', 'category', 'user_id', 'rating']] + +def aug_amazon_products(): + org_data = amazon_products() + org_data['rating'] = pd.to_numeric(org_data['rating'], errors='coerce') # Coerce invalid values to NaN + org_data.dropna(subset=['rating'], inplace=True) # Drop rows with NaN ratings + org_data['rating'] = org_data['rating'].astype(int) + # Expand the dataset 10 times + data = pd.concat([org_data] * 10, ignore_index=True) + # Shuffle the expanded dataset + data = data.sample(frac=1, random_state=42).reset_index(drop=True) + # Add noise + # Define the noise ratio + noise_ratio = 0.1 # 10% noise + # Select rows to apply noise + num_noisy_rows = int(noise_ratio * len(data)) + noisy_indices = np.random.choice(data.index, size=num_noisy_rows, replace=False) + # Add noise to ratings + data.loc[noisy_indices, 'rating'] = np.random.choice(range(1, 6), size=num_noisy_rows) + # Add noise to categories + unique_categories = data['category'].unique() + data.loc[noisy_indices, 'category'] = np.random.choice(unique_categories, size=num_noisy_rows) + # Print a preview of the noisy and expanded dataset + print("Expanded data shape:", data.shape) + print("Data preview:\n", data.head()) + return data + +def artificial(): + num_users = 5 # Number of unique users + num_items =10 # Number of unique items + num_categories = 5 # Number of unique categories + num_interactions = 1000 # Number of user-item interactions + # Generate random ratings (e.g., between 1 and 5) + ratings = np.random.choice(range(1, 3), num_interactions) + # Generate random user-item interactions + user_ids = np.random.choice(range(num_users), num_interactions) + item_ids = np.random.choice(range(num_items), num_interactions) + categories = np.random.choice(range(num_categories), num_interactions) + + data = pd.DataFrame({ + 'user_id': user_ids, + 'product_id': item_ids, + 'category': categories, + 'rating': ratings + }) + return data + +def artificial_with_user_pref(): + num_users = 100 # Number of unique users + num_items = 50 # Number of unique items + num_categories = 50 # Number of unique categories + num_interactions = 1000 # Number of user-item interactions + noise_ratio = 0.01 # Percentage of noisy interactions + + # Generate user preferences: each user prefers 1-3 random categories + user_preferences = { + user: np.random.choice(range(num_categories), size=np.random.randint(1, 4), replace=False) + for user in range(num_users) + } + + # Assign each item to a category + item_categories = {item: np.random.choice(range(num_categories)) for item in range(num_items)} + + # Generate interactions + user_ids = np.random.choice(range(num_users), num_interactions) + item_ids = np.random.choice(range(num_items), num_interactions) + + # Generate ratings based on the pattern + ratings = [] + for user, item in zip(user_ids, item_ids): + item_category = item_categories[item] + if item_category in user_preferences[user]: + ratings.append(np.random.choice([3, 4])) # High rating for preferred categories + else: + ratings.append(np.random.choice([1, 2])) # Low rating otherwise + + # Introduce noise + num_noisy = int(noise_ratio * num_interactions) + noisy_indices = np.random.choice(range(num_interactions), num_noisy, replace=False) + for idx in noisy_indices: + ratings[idx] = np.random.choice(range(1, 6)) # Replace with random rating + + # Combine into a DataFrame + data = pd.DataFrame({ + 'user_id': user_ids, + 'product_id': item_ids, + 'category': [item_categories[item] for item in item_ids], + 'rating': ratings + }) + return data + +def artificial_pattered(): + num_users = 100 # Number of unique users + num_items = 50 # Number of unique items + num_categories = 5 # Number of unique categories + num_interactions = 10000 # Number of user-item interactions + noise_ratio = 0.01 # Percentage of noisy interactions + + # Step 1: Define deterministic user preferences + user_preferences = {user: user % num_categories for user in range(num_users)} + + # Step 2: Assign items to categories in a cyclic pattern + item_categories = {item: item % num_categories for item in range(num_items)} + + # Step 3: Generate deterministic interactions + user_ids = np.arange(num_interactions) % num_users # Cycle through users + item_ids = np.arange(num_interactions) % num_items # Cycle through items + + # Step 4: Generate ratings based on the pattern + ratings = [] + for user, item in zip(user_ids, item_ids): + preferred_category = user_preferences[user] + item_category = item_categories[item] + if item_category == preferred_category: + ratings.append(5) # High rating for preferred category + else: + ratings.append(1) # Low rating otherwise + + # Step 5: Introduce noise + num_noisy = int(noise_ratio * num_interactions) + noisy_indices = np.random.choice(range(num_interactions), num_noisy, replace=False) + for idx in noisy_indices: + ratings[idx] = np.random.choice(range(1, 6)) # Replace with random rating + + # Step 6: Create a DataFrame + data = pd.DataFrame({ + 'user_id': user_ids, + 'product_id': item_ids, + 'category': [item_categories[item] for item in item_ids], + 'rating': ratings + }) + return data \ No newline at end of file From 801b7e399fd3050920f2b1d988dad94ac6925c88 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Fri, 20 Dec 2024 13:27:26 +0000 Subject: [PATCH 16/29] update --- .../__pycache__/prepare_dataset.cpython-310.pyc | Bin 0 -> 4044 bytes .../main.py} | 13 +++++++++---- .../prepare_dataset.py | 8 +++++--- .../{ => products_recommendation}/test.ipynb | 0 .../__pycache__/prepare_dataset.cpython-310.pyc | Bin 1415 -> 0 bytes 5 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc rename examples/applications/{recommendation/main_products.py => products_recommendation/main.py} (94%) rename examples/applications/{recommendation => products_recommendation}/prepare_dataset.py (97%) rename examples/applications/{ => products_recommendation}/test.ipynb (100%) delete mode 100644 examples/applications/recommendation/__pycache__/prepare_dataset.cpython-310.pyc diff --git a/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc b/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6947f000940c5e0a9189502c8ece11503498ecf GIT binary patch literal 4044 zcma)9&2uA16`$_;&}j5w*%rzgieLg%5LLFRP!zB+yMf(xQhay=36Pj9Vxn%#qm^g0 zJ>y+ljJSBKk{nzGl`99En=ko;`bKd8U*SekNq(*tXIX!zm)Q@;%PV-&yAZ+>Y+!9M^UJrm`L(wk!G->T>N^%6D9nX zgfGhY^+g~m_$>?ehSjc~VJ2&{OY;0HN`_e!_m(piMlm%+n1!Pk9){{+ z_*f29*bn<_;chre;*~TU$aIAaQkjifuZP*ra+a78P%Ljp=)O6sOw3oi>EluJzKVKL z91e`ta=H@^<;}LEgP}^qUN`GRLi^n?lf6V8=+a&)6{$RBylQ*e9jYkKbopV}>kZ`2 z-n+UnaiAlTC-EQ&h2}#{f6{PoC{c9? zp9T1mtm3QYp}Le=GIa5n%>E0F%<5B4kvC-5KD3Zor|%-CdyV9Y@}pmaeLc5DP1KLIm|G3$f0k*8`f{+K=?NK#Kb&&m_M<`?qs}s$!eag zvIVh#QHutRg!skHvu`1CMf;$=ZX#7zwwd;%Qi)36Y;*0Ud+%bWbt#I6gRm>ls3>7l zje2o{OhmDe`)7=lTszw;?CPotW0CB3(hSgosUuV}{Kp@fAHDjw(bL--r?5+-k<=w5 z(@aU3XfyQ{{3xomQS;vZ5OYXz3Ajzo;~}AwX2%>-3X@e;#uy!BNhjXhl`86Lk5-IB z?S*M}FqGOR6eGMC_(DVmak`=;0xJU0_CUt4*h!+U)UF9iyI84*!Gi=d?#0ogJ*mi5 zRe@-Gia>_o0BK!`lPHxPDx5^u;YNoP2OX6>Np-DgV^U0>>iJ@-&ZO`nnVNbiM1$sB z4EK7SXJAbwlRghoWgcs>23ufF-r|nk;7zv7tNe?s${nV@2a`)Mr_CwhF);rl8nZAL zd<PF$z06IfY%$k=!Dc zq@h%j#_L&QB#%;e$nRPYzVQwp`{VNaR$gAR#z7wBPVO&Rk1h3fUOFxk?|_wp9?n8y z&PTUr^c7K^>a$XA=iG2y#c&j@OMt0NSgx8^v0`mpGcluAREX`ss-?WPZav!1{Nplt z_MU~98)E*cpL-NpUJ?riV)eXkSgnAVz#g8NRQ&)g^<{`IG1d8pAV%O>#FMT20q#25CF{ULwEv5r%qmf*Ufs5aZ~3`#EmO0Yuh#~GN9uYOxQ z1g=x2ZezqOe%8Sc8IHT*tqt^~K7?m+zWj;z$!KK?uFjJv+cD_InJ2eb*TGNWAicFZ z9sijq2~j3W^3p_EIgj#q66FaiqB0&Kv}w6r1aivNPcdFQR1@mwXinW%n|S>ho|Ja= zc}v}-?$=0|(kAT&f?g)gS0O&*BW)x3`0pk}k22wn8E}XD(x1(K7mJyyyAP97rsqs? z&eWypG#nbN52Cc#O-Yl|`c3Qe?c>Er^)g1agPFZB)UyVy;U8^}BAiILy5NSQsOk+F zsdXAQr+!F^TO?i~@f`@=Fw8=UlS;-o57dvT?+%F{k)S;)w+(j91xca;_@cMg^ReMD3sX@ag`_98REi)nux$^nG4S3jp6NC z!unD{SOraug9&j}@=6{&w z&;fe!*P%8cr#i@~pOue;3v#N9xfyb57;>7+=Rg|s#rdbG#?`ANz7G*KUPS9khN6Sg z)J2Qzs4hxWyE$s3WzE!VQ64{`8LueK6nawxEo=1R3^H&{#{nqC=c18wSJdxm6;o_R z+`neg$D|KQ5C?3iUsCgb7QcFfOrDwf%?llYO#K?-Pt^Dg#QzKCnOd$~Rm-SgbJbIC z(X4NiFtu!mj}C~Upc_{ewA!NnYioF+sB~=b`F-y|{gy_&LxSp9Js>f?Q2dTwO#v@n zk)}R+Qv%Fv>9*{aUAOC#vh_{rRjU)wMHOa2$CE1v?NWTI;T literal 0 HcmV?d00001 diff --git a/examples/applications/recommendation/main_products.py b/examples/applications/products_recommendation/main.py similarity index 94% rename from examples/applications/recommendation/main_products.py rename to examples/applications/products_recommendation/main.py index 03d5b641..e045607a 100644 --- a/examples/applications/recommendation/main_products.py +++ b/examples/applications/products_recommendation/main.py @@ -10,19 +10,19 @@ def default_args(**kwargs): parser = argparse.ArgumentParser() - parser.add_argument("--epochs", default=10, type=int) + parser.add_argument("--epochs", default=100, type=int) parser.add_argument("--number-of-clauses", default=1000, type=int) parser.add_argument("--T", default=10000, type=int) parser.add_argument("--s", default=10.0, type=float) parser.add_argument("--number-of-state-bits", default=8, type=int) - parser.add_argument("--depth", default=3, type=int) + parser.add_argument("--depth", default=1, type=int) parser.add_argument("--hypervector-size", default=4096, type=int) parser.add_argument("--hypervector-bits", default=256, type=int) parser.add_argument("--message-size", default=256, type=int) parser.add_argument("--message-bits", default=2, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) - parser.add_argument("--max-included-literals", default=10, type=int) + parser.add_argument("--max-included-literals", default=3, type=int) args = parser.parse_args() for key, value in kwargs.items(): @@ -30,9 +30,14 @@ def default_args(**kwargs): setattr(args, key, value) return args args = default_args() +np.random.seed(42) +# data = prepare_dataset.amazon_products() data = prepare_dataset.aug_amazon_products() -print(data.head()) +# data = prepare_dataset.artificial() +# data = prepare_dataset.artificial_with_user_pref() +# data = prepare_dataset.artificial_pattered() +# print(data.head()) le_user = LabelEncoder() le_item = LabelEncoder() le_category = LabelEncoder() diff --git a/examples/applications/recommendation/prepare_dataset.py b/examples/applications/products_recommendation/prepare_dataset.py similarity index 97% rename from examples/applications/recommendation/prepare_dataset.py rename to examples/applications/products_recommendation/prepare_dataset.py index 582b569d..20162f01 100644 --- a/examples/applications/recommendation/prepare_dataset.py +++ b/examples/applications/products_recommendation/prepare_dataset.py @@ -2,7 +2,6 @@ import kagglehub import numpy as np -np.random.seed(42) def amazon_products(): print("Creating training data") @@ -14,6 +13,7 @@ def amazon_products(): return org_data[['product_id', 'category', 'user_id', 'rating']] def aug_amazon_products(): + np.random.seed(42) org_data = amazon_products() org_data['rating'] = pd.to_numeric(org_data['rating'], errors='coerce') # Coerce invalid values to NaN org_data.dropna(subset=['rating'], inplace=True) # Drop rows with NaN ratings @@ -24,7 +24,7 @@ def aug_amazon_products(): data = data.sample(frac=1, random_state=42).reset_index(drop=True) # Add noise # Define the noise ratio - noise_ratio = 0.1 # 10% noise + noise_ratio = 0.01 # 10% noise # Select rows to apply noise num_noisy_rows = int(noise_ratio * len(data)) noisy_indices = np.random.choice(data.index, size=num_noisy_rows, replace=False) @@ -35,10 +35,10 @@ def aug_amazon_products(): data.loc[noisy_indices, 'category'] = np.random.choice(unique_categories, size=num_noisy_rows) # Print a preview of the noisy and expanded dataset print("Expanded data shape:", data.shape) - print("Data preview:\n", data.head()) return data def artificial(): + np.random.seed(42) num_users = 5 # Number of unique users num_items =10 # Number of unique items num_categories = 5 # Number of unique categories @@ -59,6 +59,7 @@ def artificial(): return data def artificial_with_user_pref(): + np.random.seed(42) num_users = 100 # Number of unique users num_items = 50 # Number of unique items num_categories = 50 # Number of unique categories @@ -103,6 +104,7 @@ def artificial_with_user_pref(): return data def artificial_pattered(): + np.random.seed(42) num_users = 100 # Number of unique users num_items = 50 # Number of unique items num_categories = 5 # Number of unique categories diff --git a/examples/applications/test.ipynb b/examples/applications/products_recommendation/test.ipynb similarity index 100% rename from examples/applications/test.ipynb rename to examples/applications/products_recommendation/test.ipynb diff --git a/examples/applications/recommendation/__pycache__/prepare_dataset.cpython-310.pyc b/examples/applications/recommendation/__pycache__/prepare_dataset.cpython-310.pyc deleted file mode 100644 index 334918248b0038345364fc10337498871cad89c2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1415 zcmZ8hOK&SR7_~i*q)D2lZBzsjC}Kqr_jbjmssQQ2T^1yekh&T9j(w+-PR4`n^bsYi zvds^GRm+ZFfcPukvg)1{E6z75QZKf~kI%>7^LWheZjYh;@uMy-Q^x+H&GRO(`3j%= z41;2dSL~d(vp5&+9G%BXsOV>Qo~T$Qm{Y~SXVdf%H`rND<9{u}g1%Sy+-nRT8t*t~ zp5F^49{8Fh(Td#(&Q?st@3A%a(S5w)I1@NaR$`X4r=nmg{f)oRV5@VsJbGod^krR4 zyp?553njf=etaqIrM%LWl`}azmUFo@^$RB}?OqVTX}>)BPWp?9H|-4wCT~j|pDg=N z@Rzx}T8_W9Wl`3$YEct+Asc-%jYF@oMlEuml`3>|>2+c3ZRjkVwj@ez>(w+3Nn^{} zhn-7V6qUYMya}Tx2Qp=@>&i$KL<1!VL}f4;lGeyH3ULDm!w$hS@-1|YEi&5v&g>rY z_|NIFY?@rM|IVX>TSfV3u%AD*IW%i2av(4<1 zA|BRNr`sR^C`1mQEh!DTEG7keQYh~t{^WYTlv9hKqe*J~opI^QvoGX_)bWqM#FV$ko z?T45wJ+kHao0|qYw0h3iGdfsI9Vy*Pe!$1#NW{@djQKb3mq@kWxOSYm;R= qq%?t7&8__e>unwh`OGpIu&w#-=^X!wN`FD6c0__Mo)@|p^!^25{fROF From fdfb81fb27cab73377e095984cdd5bede8002401 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Tue, 24 Dec 2024 11:30:06 +0000 Subject: [PATCH 17/29] add TMClassifier --- .../prepare_dataset.cpython-310.pyc | Bin 4044 -> 4044 bytes .../products_recommendation/baseline.py | 114 ++++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 examples/applications/products_recommendation/baseline.py diff --git a/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc b/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc index d6947f000940c5e0a9189502c8ece11503498ecf..7742db56bf6d6b0f6731787e01a26b4ff39baead 100644 GIT binary patch delta 19 ZcmX>je@31wpO=@50SKatH*y`~2LLh>1kL~e delta 19 ZcmX>je@31wpO=@50SF@UH*y`~2LLhb1jhgX diff --git a/examples/applications/products_recommendation/baseline.py b/examples/applications/products_recommendation/baseline.py new file mode 100644 index 00000000..f1d37271 --- /dev/null +++ b/examples/applications/products_recommendation/baseline.py @@ -0,0 +1,114 @@ +import logging +import argparse +import numpy as np +from tmu.models.classification.vanilla_classifier import TMClassifier +from tmu.tools import BenchmarkTimer +from tmu.util.cuda_profiler import CudaProfiler +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import LabelEncoder +import prepare_dataset +from tmu.data import MNIST +from sklearn.preprocessing import OneHotEncoder +import pandas as pd + +_LOGGER = logging.getLogger(__name__) + +def metrics(args): + return dict( + accuracy=[], + train_time=[], + test_time=[], + args=vars(args) + ) + +def prepare_data(): + # Step 1: Load and encode dataset + data = prepare_dataset.aug_amazon_products() + le_user = LabelEncoder() + le_item = LabelEncoder() + le_category = LabelEncoder() + le_rating = LabelEncoder() + data['user_id'] = le_user.fit_transform(data['user_id']) + data['product_id'] = le_item.fit_transform(data['product_id']) + data['category'] = le_category.fit_transform(data['category']) + data['rating'] = le_rating.fit_transform(data['rating']) + + x = data[['user_id', 'product_id', 'category']].values + y = data['rating'].values + # Step 3: One-hot encode features + encoder = OneHotEncoder(sparse_output=False, dtype=np.uint32) + x_binary = encoder.fit_transform(x) + + # Verify feature dimensions + print(f"Number of features after one-hot encoding: {x_binary.shape[1]}") + + x_train, x_test, y_train, y_test = train_test_split(x_binary, y, test_size=0.2, random_state=42) + + y_train = y_train.astype(np.uint32) + y_test = y_test.astype(np.uint32) + + print("x_train shape:", x_train.shape, "dtype:", x_train.dtype) + print("y_train shape:", y_train.shape, "dtype:", y_train.dtype) + print("x_test shape:", x_test.shape, "dtype:", x_test.dtype) + print("y_test shape:", y_test.shape, "dtype:", y_test.dtype) + + return x_train, x_test, y_train, y_test + +def main(args): + experiment_results = metrics(args) + X_train, X_test, Y_train, Y_test = prepare_data() + + tm = TMClassifier( + number_of_clauses=args.num_clauses, + T=args.T, + s=args.s, + max_included_literals=args.max_included_literals, + platform=args.platform, + weighted_clauses=args.weighted_clauses + ) + _LOGGER.info(f"Running {TMClassifier} for {args.epochs}") + for epoch in range(args.epochs): + benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") + with benchmark_total: + benchmark1 = BenchmarkTimer(logger=None, text="Training Time") + with benchmark1: + res = tm.fit( + X_train, + Y_train, + ) + + experiment_results["train_time"].append(benchmark1.elapsed()) + benchmark2 = BenchmarkTimer(logger=None, text="Testing Time") + with benchmark2: + result = 100 * (tm.predict(X_test) == Y_test).mean() + experiment_results["accuracy"].append(result) + experiment_results["test_time"].append(benchmark2.elapsed()) + + _LOGGER.info(f"Epoch: {epoch + 1}, Accuracy: {result:.2f}, Training Time: {benchmark1.elapsed():.2f}s, " + f"Testing Time: {benchmark2.elapsed():.2f}s") + + if args.platform == "CUDA": + CudaProfiler().print_timings(benchmark=benchmark_total) + + return experiment_results + + +def default_args(**kwargs): + parser = argparse.ArgumentParser() + parser.add_argument("--num_clauses", default=2000, type=int) + parser.add_argument("--T", default=5000, type=int) + parser.add_argument("--s", default=10.0, type=float) + parser.add_argument("--max_included_literals", default=32, type=int) + parser.add_argument("--platform", default="CPU_sparse", type=str, choices=["CPU", "CPU_sparse", "CUDA"]) + parser.add_argument("--weighted_clauses", default=True, type=bool) + parser.add_argument("--epochs", default=60, type=int) + args = parser.parse_args() + for key, value in kwargs.items(): + if key in args.__dict__: + setattr(args, key, value) + return args + + +if __name__ == "__main__": + results = main(default_args()) + _LOGGER.info(results) \ No newline at end of file From 3168dc7c889fafd0adada91d73b99dd983d64a5b Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Tue, 24 Dec 2024 17:12:57 +0000 Subject: [PATCH 18/29] update --- .../prepare_dataset.cpython-310.pyc | Bin 4044 -> 5371 bytes .../products_recommendation/baseline.py | 49 ++------------- .../products_recommendation/main.py | 59 ++++++------------ .../prepare_dataset.py | 44 +++++++++++-- 4 files changed, 64 insertions(+), 88 deletions(-) diff --git a/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc b/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc index 7742db56bf6d6b0f6731787e01a26b4ff39baead..a91b323f497e840523e521b9b11aa792ba20e8ba 100644 GIT binary patch literal 5371 zcma)A-ESOM6`y-&c4qd|>yK;_3dIykS*kcy5eTIwDKs$!iu(z)re&Gz9j|Azp4r?P zr}i?7P#vl5gC!6G3B<#uZ{dOegMZ<^A&|f?yzo{b%I}<6+w0VU?rP4QIrn4kJs-b& z&Ls7E#lr99zx8|3XDsWl^f3Fg@$f9J^c@goDKW4%1mER`J)W;rp z+x~{nyaT?xQ3hX9?p2=!UyQ@&lWZ#J z($J$HZH%txaHVS?88){sgq4YjRPvq}lf=%geJQNmQqFm6EHZoI5}a z(YKM?TZaja6_#VoxawV#!Yu_LfyuT;^83ZhKB?O6h~1nPGF8G zR^jd;=@gxvO&xSiO$V_`wmWGC|3X(s>14QeAI)hr3XKOt@=T=sw$P8^rimXd-Pj#s zXsAv(?UIr4kUWzn#T;R%*rK|IF{Y9wop@(E)KS;C^rAR0Zjfes!_e5|!Pr#{-@-=N z+e}r5*iW%;V-La@J+_jl8=4aD#gy=-9tIB*%(xRrw|7EarZtjhAu_6Q5~X2>A|=uD zv<8QK+cdC@4z=xdbnIIKUF?nj{i>W%(po_wkW1%36G|ghL)9XCo(lmeiX^@(5 zMN2frf>@F**|e9$vaHEZiJEkTzJd;2T*p6JuZN5M`sZL~{aVQ}e0dCy9ZTgD{x-2Q zhkTiQ>z*w9jbs2f!4mc=mH+}-;_{r-2cF9kFDp&lyp+4S2fy_G-~a~w+=rj}>$-(i z98#?5M~M4}4GzbNbqL0~?`T@C=63Q0_Q6p^Qiu<^^+q4+f&Y=`Z# zDU<#Q$kKw1vT!>sta74r)1+STix8g5fCGPI%u9_wML}>h7I>JN%E?guHF_`M*Nn|7 zfzG1Ms2c@?FQFw}2B`^W`d^X@aseiDq!h*9|F`4n=h5kux#l3C{gW!vrx8qNn#+D) zz+6(5#`e2nQz%z?cm0gcA90<4!(pjEmBLIUeZQE?Df;CS#w|=q5;7%8+@3Xt@yK-h z^1Aib<8R8bKQ6y#<>iakxRO_LC-*N}cP#xz?oA5AJAk4}52qkOLl|j8;`ubNiSVPE=pDZtrIPq)aP&*TSCZ;P-Qv zc9wf;fnBVTH{fE6Qx{Xv;UT-oDmZ-(;h-B{T|-MsKT}cr(L+YvI=wx2I`kf6~r_dS;%mq4r81K`UbU;#r1U}FA{kb#MEbZw^tw;A0+zg)NDs;_XLPw zo7ZV1I~8l-;zM1AOq0?08Y=9IlAL-HsaY5GX7WfO05o|SaF>u|) z@KeCmgy0ysS`Py*2f$SVaJf_9@|1rDaFwUh8Q?-HO+a9^IIEdH2|PUrSdSEdRlwA^ zG6k+`Ud=1Fq<)#eN#QgCP(8zZrQm14sm8%ia7y4*0|?asPW8Ob;M8C^Kr1Se`V^cR z0H=Odo>Y#(iK>$maB4C*&E<0djrjtc^mm{y{aqr@fJ9Ar*4YSELNmt&9h)4x9>s2s z*l1ZZF)No|y3rB#Y?F$JO$ zRBEsU1rbZA_^?@GPbD@a4ojS=#DN6pu1Zt^_mP26wagr+i<@+=y6b1Afc68@wykX} z*VH$otb^h?PB#<1ZQMJ-U?)tGF_Kb^HwgK#WQGvcR3Vw389-G$6j`4UyGHEI_#PXf zj+!COG}XzwoxR^+#vh-*1+EL?mQc)C7>;DfMd#9d9b+qH-$We10%_oo0G;M0O_R%p35)k7}r#5ae zhw3a3?b*MTO|7v3pwYE{m^djqQx; zXgtxnCd_V^H3cWb7HUvrkzaXQZ;^!5qq$^1E=l!>M);f~ zAEX70R?uWhD7%Jx#fHwOw+3OLTyPG+8)Jh+Gbb@3l#oIaP3#HH+ zxZ>qHO5dbM!Z(h?LP4~enGNxw!h!9t;+;E#@G4T3qOSJwv52xB`unm{_Rst0{7e3m Ie!b@Z7rInPIHVq@1toDl+x+w=mn^r}&EZBpw$AR5#zPz(Mh2w9Ck z5|Zz8&O&XEwOOdwj24q(l1WYK9RYiZwOGxRqQa#i&5q#I2jfvHwDx&xH=)qPbc+!s zRb)i7I-<>6yk<+vTG|~^%aAwI;cYInZ4p{65oTL_j~CeWu5LNCtL8`Z!sD=(}zqB&#nY971<9I(c`?Mm6(@^%{aa#JyRoJXuf@p_j1Efcir8vvuuq zvMkmsDWvZjL(BA6AU(ahtky#kSS-98>)U~^x?G({oC9H^_4F4KrmG(op)1r|G#KmE z(BD#T59YyICAdm0K*`fgy$0uHK$0E2C0gwWJVz<(Vsy(sHa95s9UAEJIFH!nO)vB- z4OR7nCz*X`Us*j%6xSZgERpaeBBIJz+o?8V(`z>Ux>R#;rvXa@ZeA_JJ_lF|%?}3mg=0x9TZu9cvT=p!V!q%t~;1b|8 zLDhMYEbLD)x}Te!8B}@&l&95;e#*VO{0T+|L`d=sk$m^z_W+Q#q^`kyqN%l@uA~1{ zm>Yc`j=y@J;7b@cqlY8&yNR-y!;~dG>DW|!^#Rx)0up7DV>OW1c}89^*8zu0OapPD zsr);>Qof9q^Y-ip$R7b9z4{o?mtNgM*NN`t*F*dQt{t0AI?i5ju*WI##YTO5tNGbM v70C^83X?7Z$^c9p(j`$SxeTnOExG~j`R6z>0=O)?#Oc+nv^8msXXpO~)Wa`t diff --git a/examples/applications/products_recommendation/baseline.py b/examples/applications/products_recommendation/baseline.py index f1d37271..b390764a 100644 --- a/examples/applications/products_recommendation/baseline.py +++ b/examples/applications/products_recommendation/baseline.py @@ -1,15 +1,9 @@ import logging import argparse -import numpy as np from tmu.models.classification.vanilla_classifier import TMClassifier from tmu.tools import BenchmarkTimer from tmu.util.cuda_profiler import CudaProfiler -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import LabelEncoder import prepare_dataset -from tmu.data import MNIST -from sklearn.preprocessing import OneHotEncoder -import pandas as pd _LOGGER = logging.getLogger(__name__) @@ -21,42 +15,11 @@ def metrics(args): args=vars(args) ) -def prepare_data(): - # Step 1: Load and encode dataset - data = prepare_dataset.aug_amazon_products() - le_user = LabelEncoder() - le_item = LabelEncoder() - le_category = LabelEncoder() - le_rating = LabelEncoder() - data['user_id'] = le_user.fit_transform(data['user_id']) - data['product_id'] = le_item.fit_transform(data['product_id']) - data['category'] = le_category.fit_transform(data['category']) - data['rating'] = le_rating.fit_transform(data['rating']) - - x = data[['user_id', 'product_id', 'category']].values - y = data['rating'].values - # Step 3: One-hot encode features - encoder = OneHotEncoder(sparse_output=False, dtype=np.uint32) - x_binary = encoder.fit_transform(x) - - # Verify feature dimensions - print(f"Number of features after one-hot encoding: {x_binary.shape[1]}") - - x_train, x_test, y_train, y_test = train_test_split(x_binary, y, test_size=0.2, random_state=42) - - y_train = y_train.astype(np.uint32) - y_test = y_test.astype(np.uint32) - - print("x_train shape:", x_train.shape, "dtype:", x_train.dtype) - print("y_train shape:", y_train.shape, "dtype:", y_train.dtype) - print("x_test shape:", x_test.shape, "dtype:", x_test.dtype) - print("y_test shape:", y_test.shape, "dtype:", y_test.dtype) - - return x_train, x_test, y_train, y_test - def main(args): experiment_results = metrics(args) - X_train, X_test, Y_train, Y_test = prepare_data() + data = prepare_dataset.aug_amazon_products() + x, y = prepare_dataset.construct_x_y(data) + X_train, X_test, Y_train, Y_test = prepare_dataset.one_hot_encoding(x,y) tm = TMClassifier( number_of_clauses=args.num_clauses, @@ -92,23 +55,21 @@ def main(args): return experiment_results - def default_args(**kwargs): parser = argparse.ArgumentParser() parser.add_argument("--num_clauses", default=2000, type=int) - parser.add_argument("--T", default=5000, type=int) + parser.add_argument("--T", default=10000, type=int) parser.add_argument("--s", default=10.0, type=float) parser.add_argument("--max_included_literals", default=32, type=int) parser.add_argument("--platform", default="CPU_sparse", type=str, choices=["CPU", "CPU_sparse", "CUDA"]) parser.add_argument("--weighted_clauses", default=True, type=bool) - parser.add_argument("--epochs", default=60, type=int) + parser.add_argument("--epochs", default=10, type=int) args = parser.parse_args() for key, value in kwargs.items(): if key in args.__dict__: setattr(args, key, value) return args - if __name__ == "__main__": results = main(default_args()) _LOGGER.info(results) \ No newline at end of file diff --git a/examples/applications/products_recommendation/main.py b/examples/applications/products_recommendation/main.py index e045607a..41168a93 100644 --- a/examples/applications/products_recommendation/main.py +++ b/examples/applications/products_recommendation/main.py @@ -2,16 +2,13 @@ from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine from time import time import argparse -import pandas as pd import numpy as np -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import LabelEncoder import prepare_dataset def default_args(**kwargs): parser = argparse.ArgumentParser() - parser.add_argument("--epochs", default=100, type=int) - parser.add_argument("--number-of-clauses", default=1000, type=int) + parser.add_argument("--epochs", default=10, type=int) + parser.add_argument("--number-of-clauses", default=2000, type=int) parser.add_argument("--T", default=10000, type=int) parser.add_argument("--s", default=10.0, type=float) parser.add_argument("--number-of-state-bits", default=8, type=int) @@ -22,7 +19,7 @@ def default_args(**kwargs): parser.add_argument("--message-bits", default=2, type=int) parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) - parser.add_argument("--max-included-literals", default=3, type=int) + parser.add_argument("--max-included-literals", default=23, type=int) args = parser.parse_args() for key, value in kwargs.items(): @@ -38,21 +35,8 @@ def default_args(**kwargs): # data = prepare_dataset.artificial_with_user_pref() # data = prepare_dataset.artificial_pattered() # print(data.head()) -le_user = LabelEncoder() -le_item = LabelEncoder() -le_category = LabelEncoder() -le_rating = LabelEncoder() -data['user_id'] = le_user.fit_transform(data['user_id']) -data['product_id'] = le_item.fit_transform(data['product_id']) -data['category'] = le_category.fit_transform(data['category']) -data['rating'] = le_rating.fit_transform(data['rating']) -x = data[['user_id', 'product_id', 'category']].values -y = data['rating'].values -X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42) -print("X_train shape:", X_train.shape) -print("y_train shape:", Y_train.shape) -print("X_test shape:", X_test.shape) -print("y_test shape:", Y_test.shape) +x, y = prepare_dataset.construct_x_y(data) +X_train, X_test, Y_train, Y_test = prepare_dataset.train_test_split(x,y) users = data['user_id'].unique() items = data['product_id'].unique() categories = data['category'].unique() @@ -160,24 +144,21 @@ def default_args(**kwargs): # weights = tm.get_state()[1].reshape(2, -1) # for i in range(tm.number_of_clauses): -# print("Clause #%d W:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') -# l = [] -# for k in range(args.hypervector_size * 2): -# if tm.ta_action(0, i, k): -# if k < args.hypervector_size: -# l.append("x%d" % (k)) -# else: -# l.append("NOT x%d" % (k - args.hypervector_size)) - -# for k in range(args.message_size * 2): -# if tm.ta_action(1, i, k): -# if k < args.message_size: -# l.append("c%d" % (k)) -# else: -# l.append("NOT c%d" % (k - args.message_size)) - -# print(" AND ".join(l)) - +# print("Clause #%d W:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') +# l = [] +# for k in range(args.hypervector_size * 2): +# if tm.ta_action(0, i, k): +# if k < args.hypervector_size: +# l.append("x%d" % (k)) +# else: +# l.append("NOT x%d" % (k - args.hypervector_size)) +# for k in range(args.message_size * 2): +# if tm.ta_action(1, i, k): +# if k < args.message_size: +# l.append("c%d" % (k)) +# else: +# l.append("NOT c%d" % (k - args.message_size)) +# print(" AND ".join(l)) # print(graphs_test.hypervectors) # print(tm.hypervectors) # print(graphs_test.edge_type_id) \ No newline at end of file diff --git a/examples/applications/products_recommendation/prepare_dataset.py b/examples/applications/products_recommendation/prepare_dataset.py index 20162f01..dfe1b50b 100644 --- a/examples/applications/products_recommendation/prepare_dataset.py +++ b/examples/applications/products_recommendation/prepare_dataset.py @@ -1,7 +1,9 @@ import pandas as pd import kagglehub import numpy as np - +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import OneHotEncoder def amazon_products(): print("Creating training data") @@ -12,7 +14,7 @@ def amazon_products(): print("Original data shape:", org_data.shape) return org_data[['product_id', 'category', 'user_id', 'rating']] -def aug_amazon_products(): +def aug_amazon_products(noise_ratio = 0.01): np.random.seed(42) org_data = amazon_products() org_data['rating'] = pd.to_numeric(org_data['rating'], errors='coerce') # Coerce invalid values to NaN @@ -23,8 +25,6 @@ def aug_amazon_products(): # Shuffle the expanded dataset data = data.sample(frac=1, random_state=42).reset_index(drop=True) # Add noise - # Define the noise ratio - noise_ratio = 0.01 # 10% noise # Select rows to apply noise num_noisy_rows = int(noise_ratio * len(data)) noisy_indices = np.random.choice(data.index, size=num_noisy_rows, replace=False) @@ -144,4 +144,38 @@ def artificial_pattered(): 'category': [item_categories[item] for item in item_ids], 'rating': ratings }) - return data \ No newline at end of file + return data + +def construct_x_y(data): + le_user = LabelEncoder() + le_item = LabelEncoder() + le_category = LabelEncoder() + le_rating = LabelEncoder() + data['user_id'] = le_user.fit_transform(data['user_id']) + data['product_id'] = le_item.fit_transform(data['product_id']) + data['category'] = le_category.fit_transform(data['category']) + data['rating'] = le_rating.fit_transform(data['rating']) + x = data[['user_id', 'product_id', 'category']].values + y = data['rating'].values + return x,y + +def split_train_test(x,y): + X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42) + print("X_train shape:", X_train.shape) + print("y_train shape:", Y_train.shape) + print("X_test shape:", X_test.shape) + print("y_test shape:", Y_test.shape) + return X_train, X_test, Y_train, Y_test + +def one_hot_encoding(x,y): + encoder = OneHotEncoder(sparse_output=False, dtype=np.uint32) + x_binary = encoder.fit_transform(x) + # print(f"Number of features after one-hot encoding: {x_binary.shape[1]}") + x_train, x_test, y_train, y_test = split_train_test(x_binary, y) + y_train = y_train.astype(np.uint32) + y_test = y_test.astype(np.uint32) + print("x_train shape:", x_train.shape, "dtype:", x_train.dtype) + print("y_train shape:", y_train.shape, "dtype:", y_train.dtype) + print("x_test shape:", x_test.shape, "dtype:", x_test.dtype) + print("y_test shape:", y_test.shape, "dtype:", y_test.dtype) + return x_train, x_test, y_train, y_test \ No newline at end of file From e2232de44dfe505f19dfe0dffd298571cc485df3 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Tue, 24 Dec 2024 18:54:16 +0000 Subject: [PATCH 19/29] add graph nn --- .../prepare_dataset.cpython-310.pyc | Bin 5371 -> 5371 bytes .../products_recommendation/graph_nn.py | 106 ++++++++++++++++++ .../{main.py => graph_tm.py} | 0 .../{baseline.py => tm_classifier.py} | 0 4 files changed, 106 insertions(+) create mode 100644 examples/applications/products_recommendation/graph_nn.py rename examples/applications/products_recommendation/{main.py => graph_tm.py} (100%) rename examples/applications/products_recommendation/{baseline.py => tm_classifier.py} (100%) diff --git a/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc b/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc index a91b323f497e840523e521b9b11aa792ba20e8ba..8a2cd18398677e0c9bd2a85d805bb970ed3cb003 100644 GIT binary patch delta 19 ZcmeyZ`CF4KpO=@50SLNYZ{+$Y0suYN1{(kX delta 19 ZcmeyZ`CF4KpO=@50SE%0Y~=bV0suT-1=|1s diff --git a/examples/applications/products_recommendation/graph_nn.py b/examples/applications/products_recommendation/graph_nn.py new file mode 100644 index 00000000..fa78480d --- /dev/null +++ b/examples/applications/products_recommendation/graph_nn.py @@ -0,0 +1,106 @@ +import torch +import torch.nn.functional as F +from torch_geometric.data import Data +from torch_geometric.nn import GCNConv +from time import time +import prepare_dataset + +# Step 1: Dataset Preparation + +data = prepare_dataset.aug_amazon_products() +x, y = prepare_dataset.construct_x_y(data) +X_train, X_test, Y_train, Y_test = prepare_dataset.train_test_split(x,y) + +# Graph Construction +num_users = len(data['user_id'].unique()) +num_items = len(data['product_id'].unique()) +num_categories = len(data['category'].unique()) +num_nodes = num_users + num_items + num_categories + +# Build edge list +edge_list = [] + +# User ↔ Item edges +for user, item in zip(X_train[:, 0], X_train[:, 1]): + edge_list.append((user, num_users + item)) # User to Item + edge_list.append((num_users + item, user)) # Item to User + +# Item ↔ Category edges +for item, category in zip(X_train[:, 1], X_train[:, 2]): + edge_list.append((num_users + item, num_users + num_items + category)) # Item to Category + edge_list.append((num_users + num_items + category, num_users + item)) # Category to Item + +# Create edge index for PyTorch Geometric +edge_index = torch.tensor(edge_list, dtype=torch.long).t() + +# Node features +node_features = torch.rand((num_nodes, 64), dtype=torch.float) + +# PyTorch Geometric Data object +graph_data = Data(x=node_features, edge_index=edge_index) + +# Step 2: Define GCN Model +class GCN(torch.nn.Module): + def __init__(self, input_dim, hidden_dim, output_dim): + super(GCN, self).__init__() + self.conv1 = GCNConv(input_dim, hidden_dim) + self.conv2 = GCNConv(hidden_dim, output_dim) + + def forward(self, x, edge_index): + x = self.conv1(x, edge_index) + x = F.relu(x) + x = self.conv2(x, edge_index) + return x + +# Initialize Model +model = GCN(input_dim=64, hidden_dim=128, output_dim=64) + +# Define optimizer +optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + +# Convert train/test data to tensors +train_edges = torch.tensor( + [(user, num_users + item) for user, item in zip(X_train[:, 0], X_train[:, 1])], + dtype=torch.long +).t() +train_labels = torch.tensor(Y_train, dtype=torch.float) + +test_edges = torch.tensor( + [(user, num_users + item) for user, item in zip(X_test[:, 0], X_test[:, 1])], + dtype=torch.long +).t() +test_labels = torch.tensor(Y_test, dtype=torch.float) + +# Training Loop with Accuracy Logging +epochs = 1000 +for epoch in range(epochs): + start_time = time() + + # Training Phase + model.train() + optimizer.zero_grad() + out = model(graph_data.x, graph_data.edge_index) + + # User-item embeddings + user_embeddings = out[train_edges[0]] + item_embeddings = out[train_edges[1]] + predicted_ratings = (user_embeddings * item_embeddings).sum(dim=1) + + # Compute loss + loss = F.mse_loss(predicted_ratings, train_labels) + loss.backward() + optimizer.step() + + # Testing Phase + model.eval() + with torch.no_grad(): + out = model(graph_data.x, graph_data.edge_index) + test_user_embeddings = out[test_edges[0]] + test_item_embeddings = out[test_edges[1]] + test_predicted_ratings = (test_user_embeddings * test_item_embeddings).sum(dim=1) + + # Compute accuracy + test_accuracy = ((test_predicted_ratings.round() == test_labels).float().mean().item()) * 100 + + elapsed_time = time() - start_time + print(f"Epoch {epoch + 1}/{epochs}, Loss: {loss.item():.4f}, Accuracy: {test_accuracy:.2f}%, Time: {elapsed_time:.2f}s") diff --git a/examples/applications/products_recommendation/main.py b/examples/applications/products_recommendation/graph_tm.py similarity index 100% rename from examples/applications/products_recommendation/main.py rename to examples/applications/products_recommendation/graph_tm.py diff --git a/examples/applications/products_recommendation/baseline.py b/examples/applications/products_recommendation/tm_classifier.py similarity index 100% rename from examples/applications/products_recommendation/baseline.py rename to examples/applications/products_recommendation/tm_classifier.py From c4546310371c7dbcb0ccad959223c087c0a6669c Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 25 Dec 2024 13:04:24 +0000 Subject: [PATCH 20/29] add main.bash --- .../experiment_results.csv | 6 + .../products_recommendation/graph_nn.py | 226 +++++++------- .../products_recommendation/graph_tm.py | 277 +++++++++--------- .../products_recommendation/main.sh | 16 + .../products_recommendation/test.ipynb | 271 ----------------- .../products_recommendation/tm_classifier.py | 73 +++-- 6 files changed, 323 insertions(+), 546 deletions(-) create mode 100644 examples/applications/products_recommendation/experiment_results.csv create mode 100644 examples/applications/products_recommendation/main.sh delete mode 100644 examples/applications/products_recommendation/test.ipynb diff --git a/examples/applications/products_recommendation/experiment_results.csv b/examples/applications/products_recommendation/experiment_results.csv new file mode 100644 index 00000000..d3f66d27 --- /dev/null +++ b/examples/applications/products_recommendation/experiment_results.csv @@ -0,0 +1,6 @@ +Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy +Graph NN,0.005,0,0,0,1000,CPU,0.03006434440612793,76.72131061553955 +GraphTM,0.005,10000,10.0,23,10,CUDA,34.547648191452026,98.46994535519126 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,89.6943154335022,76.63934426229508 +Graph NN,0.01,0,0,0,1000,CPU,0.01817464828491211,75.95628499984741 +GraphTM,0.01,10000,10.0,23,10,CUDA,34.95576763153076,98.44262295081967 diff --git a/examples/applications/products_recommendation/graph_nn.py b/examples/applications/products_recommendation/graph_nn.py index fa78480d..30292db9 100644 --- a/examples/applications/products_recommendation/graph_nn.py +++ b/examples/applications/products_recommendation/graph_nn.py @@ -1,106 +1,130 @@ +import argparse import torch import torch.nn.functional as F from torch_geometric.data import Data from torch_geometric.nn import GCNConv -from time import time import prepare_dataset - -# Step 1: Dataset Preparation - -data = prepare_dataset.aug_amazon_products() -x, y = prepare_dataset.construct_x_y(data) -X_train, X_test, Y_train, Y_test = prepare_dataset.train_test_split(x,y) - -# Graph Construction -num_users = len(data['user_id'].unique()) -num_items = len(data['product_id'].unique()) -num_categories = len(data['category'].unique()) -num_nodes = num_users + num_items + num_categories - -# Build edge list -edge_list = [] - -# User ↔ Item edges -for user, item in zip(X_train[:, 0], X_train[:, 1]): - edge_list.append((user, num_users + item)) # User to Item - edge_list.append((num_users + item, user)) # Item to User - -# Item ↔ Category edges -for item, category in zip(X_train[:, 1], X_train[:, 2]): - edge_list.append((num_users + item, num_users + num_items + category)) # Item to Category - edge_list.append((num_users + num_items + category, num_users + item)) # Category to Item - -# Create edge index for PyTorch Geometric -edge_index = torch.tensor(edge_list, dtype=torch.long).t() - -# Node features -node_features = torch.rand((num_nodes, 64), dtype=torch.float) - -# PyTorch Geometric Data object -graph_data = Data(x=node_features, edge_index=edge_index) - -# Step 2: Define GCN Model -class GCN(torch.nn.Module): - def __init__(self, input_dim, hidden_dim, output_dim): - super(GCN, self).__init__() - self.conv1 = GCNConv(input_dim, hidden_dim) - self.conv2 = GCNConv(hidden_dim, output_dim) - - def forward(self, x, edge_index): - x = self.conv1(x, edge_index) - x = F.relu(x) - x = self.conv2(x, edge_index) - return x - -# Initialize Model -model = GCN(input_dim=64, hidden_dim=128, output_dim=64) - -# Define optimizer -optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - -# Convert train/test data to tensors -train_edges = torch.tensor( - [(user, num_users + item) for user, item in zip(X_train[:, 0], X_train[:, 1])], - dtype=torch.long -).t() -train_labels = torch.tensor(Y_train, dtype=torch.float) - -test_edges = torch.tensor( - [(user, num_users + item) for user, item in zip(X_test[:, 0], X_test[:, 1])], - dtype=torch.long -).t() -test_labels = torch.tensor(Y_test, dtype=torch.float) - -# Training Loop with Accuracy Logging -epochs = 1000 -for epoch in range(epochs): - start_time = time() - - # Training Phase - model.train() - optimizer.zero_grad() - out = model(graph_data.x, graph_data.edge_index) - - # User-item embeddings - user_embeddings = out[train_edges[0]] - item_embeddings = out[train_edges[1]] - predicted_ratings = (user_embeddings * item_embeddings).sum(dim=1) - - # Compute loss - loss = F.mse_loss(predicted_ratings, train_labels) - loss.backward() - optimizer.step() - - # Testing Phase - model.eval() - with torch.no_grad(): - out = model(graph_data.x, graph_data.edge_index) - test_user_embeddings = out[test_edges[0]] - test_item_embeddings = out[test_edges[1]] - test_predicted_ratings = (test_user_embeddings * test_item_embeddings).sum(dim=1) - - # Compute accuracy - test_accuracy = ((test_predicted_ratings.round() == test_labels).float().mean().item()) * 100 - - elapsed_time = time() - start_time - print(f"Epoch {epoch + 1}/{epochs}, Loss: {loss.item():.4f}, Accuracy: {test_accuracy:.2f}%, Time: {elapsed_time:.2f}s") +from tmu.tools import BenchmarkTimer +import os +import pandas as pd + +def main(args): + results = [] + data = prepare_dataset.aug_amazon_products(noise_ratio = args.dataset_noise_ratio) + x, y = prepare_dataset.construct_x_y(data) + X_train, X_test, Y_train, Y_test = prepare_dataset.train_test_split(x,y) + # Graph Construction + num_users = len(data['user_id'].unique()) + num_items = len(data['product_id'].unique()) + num_categories = len(data['category'].unique()) + num_nodes = num_users + num_items + num_categories + # Build edge list + edge_list = [] + # User ↔ Item edges + for user, item in zip(X_train[:, 0], X_train[:, 1]): + edge_list.append((user, num_users + item)) # User to Item + edge_list.append((num_users + item, user)) # Item to User + # Item ↔ Category edges + for item, category in zip(X_train[:, 1], X_train[:, 2]): + edge_list.append((num_users + item, num_users + num_items + category)) # Item to Category + edge_list.append((num_users + num_items + category, num_users + item)) # Category to Item + # Create edge index for PyTorch Geometric + edge_index = torch.tensor(edge_list, dtype=torch.long).t() + # Node features + node_features = torch.rand((num_nodes, 64), dtype=torch.float) + # PyTorch Geometric Data object + graph_data = Data(x=node_features, edge_index=edge_index) + # Step 2: Define GCN Model + class GCN(torch.nn.Module): + def __init__(self, input_dim, hidden_dim, output_dim): + super(GCN, self).__init__() + self.conv1 = GCNConv(input_dim, hidden_dim) + self.conv2 = GCNConv(hidden_dim, output_dim) + def forward(self, x, edge_index): + x = self.conv1(x, edge_index) + x = F.relu(x) + x = self.conv2(x, edge_index) + return x + # Initialize Model + model = GCN(input_dim=64, hidden_dim=128, output_dim=64) + # Define optimizer + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + # Convert train/test data to tensors + train_edges = torch.tensor( + [(user, num_users + item) for user, item in zip(X_train[:, 0], X_train[:, 1])], + dtype=torch.long + ).t() + train_labels = torch.tensor(Y_train, dtype=torch.float) + test_edges = torch.tensor( + [(user, num_users + item) for user, item in zip(X_test[:, 0], X_test[:, 1])], + dtype=torch.long + ).t() + test_labels = torch.tensor(Y_test, dtype=torch.float) + # Training Loop with Accuracy Logging + for epoch in range(args.epochs): + benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") + with benchmark_total: + benchmark1 = BenchmarkTimer(logger=None, text="Training Time") + with benchmark1: + # Training Phase + model.train() + optimizer.zero_grad() + out = model(graph_data.x, graph_data.edge_index) + # User-item embeddings + user_embeddings = out[train_edges[0]] + item_embeddings = out[train_edges[1]] + predicted_ratings = (user_embeddings * item_embeddings).sum(dim=1) + # Compute loss + loss = F.mse_loss(predicted_ratings, train_labels) + loss.backward() + optimizer.step() + train_time = benchmark1.elapsed() + # Testing Phase + benchmark2 = BenchmarkTimer(logger=None, text="Testing Time") + with benchmark2: + model.eval() + with torch.no_grad(): + out = model(graph_data.x, graph_data.edge_index) + test_user_embeddings = out[test_edges[0]] + test_item_embeddings = out[test_edges[1]] + test_predicted_ratings = (test_user_embeddings * test_item_embeddings).sum(dim=1) + # Compute accuracy + accuracy = ((test_predicted_ratings.round() == test_labels).float().mean().item()) * 100 + test_time = benchmark2.elapsed() + total_time = benchmark_total.elapsed() + # Append results for each epoch + results.append({ + "Algorithm": "Graph NN", + "Noise_Ratio": args.dataset_noise_ratio, + "T": 0, + "s": 0, + "Max_Included_Literals": 0, + "Epochs": args.epochs, + "Platform": args.platform, + "Total_Time": total_time, + "Accuracy": accuracy, + }) + + # Save results to CSV + results_df = pd.DataFrame(results) + results_file = "experiment_results.csv" + if os.path.exists(results_file): + results_df.to_csv(results_file, mode='a', index=False, header=False) + else: + results_df.to_csv(results_file, index=False) + print(f"Results saved to {results_file}") + + +def default_args(**kwargs): + parser = argparse.ArgumentParser() + parser.add_argument("--platform", default="CPU", type=str, choices=["CPU", "CUDA"]) + parser.add_argument("--epochs", default=1000, type=int) + parser.add_argument("--dataset_noise_ratio", default=0.01, type=float) + args = parser.parse_args() + for key, value in kwargs.items(): + if key in args.__dict__: + setattr(args, key, value) + return args + +if __name__ == "__main__": + main(default_args()) \ No newline at end of file diff --git a/examples/applications/products_recommendation/graph_tm.py b/examples/applications/products_recommendation/graph_tm.py index 41168a93..0ec2171c 100644 --- a/examples/applications/products_recommendation/graph_tm.py +++ b/examples/applications/products_recommendation/graph_tm.py @@ -1,9 +1,145 @@ from GraphTsetlinMachine.graphs import Graphs from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine -from time import time import argparse import numpy as np import prepare_dataset +import pandas as pd +from tmu.tools import BenchmarkTimer +import os + +def main(args): + np.random.seed(42) + results = [] + data = prepare_dataset.aug_amazon_products(noise_ratio = args.dataset_noise_ratio) + x, y = prepare_dataset.construct_x_y(data) + X_train, X_test, Y_train, Y_test = prepare_dataset.train_test_split(x,y) + users = data['user_id'].unique() + items = data['product_id'].unique() + categories = data['category'].unique() + # Initialize Graphs with symbols for GTM + number_of_nodes = 3 + symbols = [] + symbols = ["U_" + str(u) for u in users] + ["I_" + str(i) for i in items] + ["C_" + str(c) for c in categories] + print("Symbols: ",len(symbols)) + + # Train data + graphs_train = Graphs( + X_train.shape[0], + symbols=symbols, + hypervector_size=args.hypervector_size, + hypervector_bits=args.hypervector_bits, + double_hashing = args.double_hashing + ) + for graph_id in range(X_train.shape[0]): + graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes) + graphs_train.prepare_node_configuration() + for graph_id in range(X_train.shape[0]): + for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): + number_of_edges = 2 if node_id > 0 and node_id < graphs_train.number_of_graph_nodes[graph_id]-1 else 1 + if node_id == 0: + graphs_train.add_graph_node(graph_id, "User", number_of_edges) + elif node_id == 1: + graphs_train.add_graph_node(graph_id, "Item", number_of_edges) + else: + graphs_train.add_graph_node(graph_id, "Category", number_of_edges) + graphs_train.prepare_edge_configuration() + for graph_id in range(X_train.shape[0]): + for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): + if node_id == 0: + graphs_train.add_graph_node_edge(graph_id, "User", "Item", "UserItem") + + if node_id == 1: + graphs_train.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") + graphs_train.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") + + if node_id == 2: + graphs_train.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") + + graphs_train.add_graph_node_property(graph_id, "User", "U_" + str(X_train[graph_id][0])) + graphs_train.add_graph_node_property(graph_id, "Item", "I_" + str(X_train[graph_id][1])) + graphs_train.add_graph_node_property(graph_id, "Category", "C_" + str(X_train[graph_id][2])) + graphs_train.encode() + print("Training data produced") + + # Test data + graphs_test = Graphs(X_test.shape[0], init_with=graphs_train) + for graph_id in range(X_test.shape[0]): + graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes) + graphs_test.prepare_node_configuration() + for graph_id in range(X_test.shape[0]): + for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): + number_of_edges = 2 if node_id > 0 and node_id < graphs_test.number_of_graph_nodes[graph_id]-1 else 1 + if node_id == 0: + graphs_test.add_graph_node(graph_id, "User", number_of_edges) + elif node_id == 1: + graphs_test.add_graph_node(graph_id, "Item", number_of_edges) + else: + graphs_test.add_graph_node(graph_id, "Category", number_of_edges) + graphs_test.prepare_edge_configuration() + for graph_id in range(X_test.shape[0]): + for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): + if node_id == 0: + graphs_test.add_graph_node_edge(graph_id, "User", "Item", "UserItem") + + if node_id == 1: + graphs_test.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") + graphs_test.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") + + if node_id == 2: + graphs_test.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") + + graphs_test.add_graph_node_property(graph_id, "User", "U_" + str(X_test[graph_id][0])) + graphs_test.add_graph_node_property(graph_id, "Item", "I_" + str(X_test[graph_id][1])) + graphs_test.add_graph_node_property(graph_id, "Category", "C_" + str(X_test[graph_id][2])) + graphs_test.encode() + print("Testing data produced") + + tm = MultiClassGraphTsetlinMachine( + args.number_of_clauses, + args.T, + args.s, + number_of_state_bits = args.number_of_state_bits, + depth=args.depth, + message_size=args.message_size, + message_bits=args.message_bits, + max_included_literals=args.max_included_literals, + double_hashing = args.double_hashing + ) + + for epoch in range(args.epochs): + benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") + with benchmark_total: + benchmark1 = BenchmarkTimer(logger=None, text="Training Time") + with benchmark1: + tm.fit(graphs_train, Y_train, epochs=1, incremental=True) + train_time = benchmark1.elapsed() + + benchmark2 = BenchmarkTimer(logger=None, text="Testing Time") + with benchmark2: + accuracy = 100*(tm.predict(graphs_test) == Y_test).mean() + test_time = benchmark2.elapsed() + total_time = benchmark_total.elapsed() + # result_train = 100*(tm.predict(graphs_train) == Y_train).mean() + results.append({ + "Algorithm": "GraphTM", + "Noise_Ratio": args.dataset_noise_ratio, + "T": args.T, + "s": args.s, + "Max_Included_Literals": args.max_included_literals, + "Epochs": args.epochs, + "Platform": "CUDA", + "Total_Time": total_time, + "Accuracy": accuracy, + }) + + # Save results to CSV + results_df = pd.DataFrame(results) + results_file = "experiment_results.csv" + if os.path.exists(results_file): + results_df.to_csv(results_file, mode='a', index=False, header=False) + else: + results_df.to_csv(results_file, index=False) + print(f"Results saved to {results_file}") def default_args(**kwargs): parser = argparse.ArgumentParser() @@ -20,145 +156,12 @@ def default_args(**kwargs): parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true') parser.add_argument("--noise", default=0.01, type=float) parser.add_argument("--max-included-literals", default=23, type=int) - + parser.add_argument("--dataset_noise_ratio", default=0.01, type=float) args = parser.parse_args() for key, value in kwargs.items(): if key in args.__dict__: setattr(args, key, value) return args -args = default_args() -np.random.seed(42) - -# data = prepare_dataset.amazon_products() -data = prepare_dataset.aug_amazon_products() -# data = prepare_dataset.artificial() -# data = prepare_dataset.artificial_with_user_pref() -# data = prepare_dataset.artificial_pattered() -# print(data.head()) -x, y = prepare_dataset.construct_x_y(data) -X_train, X_test, Y_train, Y_test = prepare_dataset.train_test_split(x,y) -users = data['user_id'].unique() -items = data['product_id'].unique() -categories = data['category'].unique() -# Initialize Graphs with symbols for GTM -number_of_nodes = 3 -symbols = [] -symbols = ["U_" + str(u) for u in users] + ["I_" + str(i) for i in items] + ["C_" + str(c) for c in categories] -print("Symbols: ",len(symbols)) - -# Train data -graphs_train = Graphs( - X_train.shape[0], - symbols=symbols, - hypervector_size=args.hypervector_size, - hypervector_bits=args.hypervector_bits, - double_hashing = args.double_hashing -) -for graph_id in range(X_train.shape[0]): - graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes) -graphs_train.prepare_node_configuration() -for graph_id in range(X_train.shape[0]): - for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): - number_of_edges = 2 if node_id > 0 and node_id < graphs_train.number_of_graph_nodes[graph_id]-1 else 1 - if node_id == 0: - graphs_train.add_graph_node(graph_id, "User", number_of_edges) - elif node_id == 1: - graphs_train.add_graph_node(graph_id, "Item", number_of_edges) - else: - graphs_train.add_graph_node(graph_id, "Category", number_of_edges) -graphs_train.prepare_edge_configuration() -for graph_id in range(X_train.shape[0]): - for node_id in range(graphs_train.number_of_graph_nodes[graph_id]): - if node_id == 0: - graphs_train.add_graph_node_edge(graph_id, "User", "Item", "UserItem") - - if node_id == 1: - graphs_train.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") - graphs_train.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") - - if node_id == 2: - graphs_train.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") - - graphs_train.add_graph_node_property(graph_id, "User", "U_" + str(X_train[graph_id][0])) - graphs_train.add_graph_node_property(graph_id, "Item", "I_" + str(X_train[graph_id][1])) - graphs_train.add_graph_node_property(graph_id, "Category", "C_" + str(X_train[graph_id][2])) -graphs_train.encode() -print("Training data produced") - -# Test data -graphs_test = Graphs(X_test.shape[0], init_with=graphs_train) -for graph_id in range(X_test.shape[0]): - graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes) -graphs_test.prepare_node_configuration() -for graph_id in range(X_test.shape[0]): - for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): - number_of_edges = 2 if node_id > 0 and node_id < graphs_test.number_of_graph_nodes[graph_id]-1 else 1 - if node_id == 0: - graphs_test.add_graph_node(graph_id, "User", number_of_edges) - elif node_id == 1: - graphs_test.add_graph_node(graph_id, "Item", number_of_edges) - else: - graphs_test.add_graph_node(graph_id, "Category", number_of_edges) -graphs_test.prepare_edge_configuration() -for graph_id in range(X_test.shape[0]): - for node_id in range(graphs_test.number_of_graph_nodes[graph_id]): - if node_id == 0: - graphs_test.add_graph_node_edge(graph_id, "User", "Item", "UserItem") - - if node_id == 1: - graphs_test.add_graph_node_edge(graph_id, "Item", "Category", "ItemCategory") - graphs_test.add_graph_node_edge(graph_id, "Item", "User", "ItemUser") - - if node_id == 2: - graphs_test.add_graph_node_edge(graph_id, "Category", "Item", "CatrgoryItem") - - graphs_test.add_graph_node_property(graph_id, "User", "U_" + str(X_test[graph_id][0])) - graphs_test.add_graph_node_property(graph_id, "Item", "I_" + str(X_test[graph_id][1])) - graphs_test.add_graph_node_property(graph_id, "Category", "C_" + str(X_test[graph_id][2])) -graphs_test.encode() -print("Testing data produced") - -tm = MultiClassGraphTsetlinMachine( - args.number_of_clauses, - args.T, - args.s, - number_of_state_bits = args.number_of_state_bits, - depth=args.depth, - message_size=args.message_size, - message_bits=args.message_bits, - max_included_literals=args.max_included_literals, - double_hashing = args.double_hashing -) - -for i in range(args.epochs): - start_training = time() - tm.fit(graphs_train, Y_train, epochs=1, incremental=True) - stop_training = time() - - start_testing = time() - result_test = 100*(tm.predict(graphs_test) == Y_test).mean() - stop_testing = time() - - result_train = 100*(tm.predict(graphs_train) == Y_train).mean() - print("%d %.2f %.2f %.2f %.2f" % (i, result_train, result_test, stop_training-start_training, stop_testing-start_testing)) -# weights = tm.get_state()[1].reshape(2, -1) -# for i in range(tm.number_of_clauses): -# print("Clause #%d W:(%d %d)" % (i, weights[0,i], weights[1,i]), end=' ') -# l = [] -# for k in range(args.hypervector_size * 2): -# if tm.ta_action(0, i, k): -# if k < args.hypervector_size: -# l.append("x%d" % (k)) -# else: -# l.append("NOT x%d" % (k - args.hypervector_size)) -# for k in range(args.message_size * 2): -# if tm.ta_action(1, i, k): -# if k < args.message_size: -# l.append("c%d" % (k)) -# else: -# l.append("NOT c%d" % (k - args.message_size)) -# print(" AND ".join(l)) -# print(graphs_test.hypervectors) -# print(tm.hypervectors) -# print(graphs_test.edge_type_id) \ No newline at end of file +if __name__ == "__main__": + main(default_args()) \ No newline at end of file diff --git a/examples/applications/products_recommendation/main.sh b/examples/applications/products_recommendation/main.sh new file mode 100644 index 00000000..8c7a22ad --- /dev/null +++ b/examples/applications/products_recommendation/main.sh @@ -0,0 +1,16 @@ +echo `date`, Setup the environment ... +set -e # exit if error + +models="graph_tm tm_classifier graph_nn" +dataset_noise_ratios="0.005 0.01 0.02 0.05 0.1 0.2" + +for N in $dataset_noise_ratios; do + echo `date`, Running Graph NN ... + python3 graph_nn.py --dataset_noise_ratio $N + + echo `date`, Running Graph Tsetlin Machine ... + python3 graph_tm.py --dataset_noise_ratio $N + + echo `date`, Running Tsetlin Machine Classifier ... + python3 tm_classifier.py --dataset_noise_ratio $N +done \ No newline at end of file diff --git a/examples/applications/products_recommendation/test.ipynb b/examples/applications/products_recommendation/test.ipynb deleted file mode 100644 index 1465bf14..00000000 --- a/examples/applications/products_recommendation/test.ipynb +++ /dev/null @@ -1,271 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/usr/local/lib/python3.10/dist-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n", - "usage: ipykernel_launcher.py [-h] [--epochs EPOCHS]\n", - " [--number-of-clauses NUMBER_OF_CLAUSES] [--T T]\n", - " [--s S]\n", - " [--number-of-state-bits NUMBER_OF_STATE_BITS]\n", - " [--depth DEPTH]\n", - " [--hypervector-size HYPERVECTOR_SIZE]\n", - " [--hypervector-bits HYPERVECTOR_BITS]\n", - " [--message-size MESSAGE_SIZE]\n", - " [--message-bits MESSAGE_BITS] [--double-hashing]\n", - " [--noise NOISE]\n", - " [--max-included-literals MAX_INCLUDED_LITERALS]\n", - "ipykernel_launcher.py: error: unrecognized arguments: --f=/root/.local/share/jupyter/runtime/kernel-v306f6e67794e909fd94dbef768cafee2e613728cc.json\n" - ] - }, - { - "ename": "SystemExit", - "evalue": "2", - "output_type": "error", - "traceback": [ - "An exception has occurred, use %tb to see the full traceback.\n", - "\u001b[0;31mSystemExit\u001b[0m\u001b[0;31m:\u001b[0m 2\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/root/.local/lib/python3.10/site-packages/IPython/core/interactiveshell.py:3585: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\n", - " warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\n" - ] - } - ], - "source": [ - "from GraphTsetlinMachine.graphs import Graphs\n", - "from GraphTsetlinMachine.tm import MultiClassGraphTsetlinMachine\n", - "from time import time\n", - "import argparse\n", - "import pandas as pd\n", - "import numpy as np\n", - "import kagglehub\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.preprocessing import LabelEncoder\n", - "\n", - "def default_args(**kwargs):\n", - " parser = argparse.ArgumentParser()\n", - " parser.add_argument(\"--epochs\", default=250, type=int)\n", - " parser.add_argument(\"--number-of-clauses\", default=10000, type=int)\n", - " parser.add_argument(\"--T\", default=10000, type=int)\n", - " parser.add_argument(\"--s\", default=10.0, type=float)\n", - " parser.add_argument(\"--number-of-state-bits\", default=8, type=int)\n", - " parser.add_argument(\"--depth\", default=1, type=int)\n", - " parser.add_argument(\"--hypervector-size\", default=4096, type=int)\n", - " parser.add_argument(\"--hypervector-bits\", default=256, type=int)\n", - " parser.add_argument(\"--message-size\", default=4096, type=int)\n", - " parser.add_argument(\"--message-bits\", default=256, type=int)\n", - " parser.add_argument('--double-hashing', dest='double_hashing', default=False, action='store_true')\n", - " parser.add_argument(\"--noise\", default=0.01, type=float)\n", - " parser.add_argument(\"--max-included-literals\", default=10, type=int)\n", - "\n", - " args = parser.parse_args()\n", - " for key, value in kwargs.items():\n", - " if key in args.__dict__:\n", - " setattr(args, key, value)\n", - " return args\n", - "\n", - "args = default_args()\n", - "\n", - "############################# real dataset ########################\n", - "\n", - "print(\"Creating training data\")\n", - "path = kagglehub.dataset_download(\"karkavelrajaj/amazon-sales-dataset\")\n", - "print(\"Path to dataset files:\", path)\n", - "data_file = path + \"/amazon.csv\" \n", - "org_data = pd.read_csv(data_file)\n", - "# print(\"Data preview:\", data.head())\n", - "org_data = org_data[['product_id', 'category', 'user_id', 'rating']]\n", - "#################################### expanded \n", - "org_data['rating'] = pd.to_numeric(org_data['rating'], errors='coerce') # Coerce invalid values to NaN\n", - "org_data.dropna(subset=['rating'], inplace=True) # Drop rows with NaN ratings\n", - "org_data['rating'] = org_data['rating'].astype(int)\n", - "# Expand the dataset 10 times\n", - "data = pd.concat([org_data] * 10, ignore_index=True)\n", - "\n", - "# Shuffle the expanded dataset\n", - "data = data.sample(frac=1, random_state=42).reset_index(drop=True)\n", - "\n", - "# Add noise\n", - "# Define the noise ratio\n", - "noise_ratio = 0.1 # 10% noise\n", - "\n", - "# Select rows to apply noise\n", - "num_noisy_rows = int(noise_ratio * len(data))\n", - "noisy_indices = np.random.choice(data.index, size=num_noisy_rows, replace=False)\n", - "\n", - "# Add noise to ratings\n", - "data.loc[noisy_indices, 'rating'] = np.random.choice(range(1, 6), size=num_noisy_rows)\n", - "\n", - "# Add noise to categories\n", - "unique_categories = data['category'].unique()\n", - "data.loc[noisy_indices, 'category'] = np.random.choice(unique_categories, size=num_noisy_rows)\n", - "\n", - "# Print a preview of the noisy and expanded dataset\n", - "print(\"Original data shape:\", org_data.shape)\n", - "print(\"Expanded data shape:\", data.shape)\n", - "print(\"Data preview:\\n\", data.head())\n", - "\n", - "print(data.head())\n", - " \n", - "le_user = LabelEncoder()\n", - "le_item = LabelEncoder()\n", - "le_category = LabelEncoder()\n", - "le_rating = LabelEncoder() \n", - "\n", - "data['user_id'] = le_user.fit_transform(data['user_id'])\n", - "data['product_id'] = le_item.fit_transform(data['product_id'])\n", - "data['category'] = le_category.fit_transform(data['category'])\n", - "data['rating'] = le_rating.fit_transform(data['rating'])\n", - "\n", - "x = data[['user_id', 'product_id', 'category']].values \n", - "y = data['rating'].values \n", - "\n", - "X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=42)\n", - "\n", - "print(\"X_train shape:\", X_train.shape)\n", - "print(\"y_train shape:\", Y_train.shape)\n", - "print(\"X_test shape:\", X_test.shape)\n", - "print(\"y_test shape:\", Y_test.shape)\n", - "\n", - "users = data['user_id'].unique()\n", - "items = data['product_id'].unique()\n", - "categories = data['category'].unique()\n", - "\n", - "# Initialize Graphs with symbols for GTM\n", - "number_of_nodes = 3\n", - "symbols = []\n", - "symbols = [\"U_\" + str(u) for u in users] + [\"I_\" + str(i) for i in items] + [\"C_\" + str(c) for c in categories] \n", - "print(len(symbols))\n", - "# Train data\n", - "graphs_train = Graphs(\n", - " X_train.shape[0],\n", - " symbols=symbols,\n", - " hypervector_size=args.hypervector_size,\n", - " hypervector_bits=args.hypervector_bits,\n", - " double_hashing = args.double_hashing\n", - ")\n", - "for graph_id in range(X_train.shape[0]):\n", - " graphs_train.set_number_of_graph_nodes(graph_id, number_of_nodes)\n", - "graphs_train.prepare_node_configuration()\n", - "for graph_id in range(X_train.shape[0]):\n", - " for node_id in range(graphs_train.number_of_graph_nodes[graph_id]):\n", - " number_of_edges = 2 if node_id > 0 and node_id < graphs_train.number_of_graph_nodes[graph_id]-1 else 1\n", - " if node_id == 0:\n", - " graphs_train.add_graph_node(graph_id, \"User\", number_of_edges)\n", - " elif node_id == 1:\n", - " graphs_train.add_graph_node(graph_id, \"Item\", number_of_edges)\n", - " else:\n", - " graphs_train.add_graph_node(graph_id, \"Category\", number_of_edges)\n", - "graphs_train.prepare_edge_configuration()\n", - "for graph_id in range(X_train.shape[0]):\n", - " for node_id in range(graphs_train.number_of_graph_nodes[graph_id]):\n", - " if node_id == 0:\n", - " graphs_train.add_graph_node_edge(graph_id, \"User\", \"Item\", \"UserItem\")\n", - " \n", - " if node_id == 1:\n", - " graphs_train.add_graph_node_edge(graph_id, \"Item\", \"Category\", \"ItemCategory\")\n", - " graphs_train.add_graph_node_edge(graph_id, \"Item\", \"User\", \"ItemUser\")\n", - " \n", - " if node_id == 2:\n", - " graphs_train.add_graph_node_edge(graph_id, \"Category\", \"Item\", \"CatrgoryItem\")\n", - "\n", - " graphs_train.add_graph_node_property(graph_id, \"User\", \"U_\" + str(X_train[graph_id][0]))\n", - " graphs_train.add_graph_node_property(graph_id, \"Item\", \"I_\" + str(X_train[graph_id][1]))\n", - " graphs_train.add_graph_node_property(graph_id, \"Category\", \"C_\" + str(X_train[graph_id][2]))\n", - "graphs_train.encode()\n", - "print(\"Training data produced\")\n", - "\n", - "# Test data\n", - "graphs_test = Graphs(X_test.shape[0], init_with=graphs_train)\n", - "for graph_id in range(X_test.shape[0]):\n", - " graphs_test.set_number_of_graph_nodes(graph_id, number_of_nodes)\n", - "graphs_test.prepare_node_configuration()\n", - "for graph_id in range(X_test.shape[0]):\n", - " for node_id in range(graphs_test.number_of_graph_nodes[graph_id]):\n", - " number_of_edges = 2 if node_id > 0 and node_id < graphs_test.number_of_graph_nodes[graph_id]-1 else 1\n", - " if node_id == 0:\n", - " graphs_test.add_graph_node(graph_id, \"User\", number_of_edges)\n", - " elif node_id == 1:\n", - " graphs_test.add_graph_node(graph_id, \"Item\", number_of_edges)\n", - " else:\n", - " graphs_test.add_graph_node(graph_id, \"Category\", number_of_edges)\n", - "graphs_test.prepare_edge_configuration()\n", - "for graph_id in range(X_test.shape[0]):\n", - " for node_id in range(graphs_test.number_of_graph_nodes[graph_id]):\n", - " if node_id == 0:\n", - " graphs_test.add_graph_node_edge(graph_id, \"User\", \"Item\", \"UserItem\")\n", - " \n", - " if node_id == 1:\n", - " graphs_test.add_graph_node_edge(graph_id, \"Item\", \"Category\", \"ItemCategory\")\n", - " graphs_test.add_graph_node_edge(graph_id, \"Item\", \"User\", \"ItemUser\")\n", - " \n", - " if node_id == 2:\n", - " graphs_test.add_graph_node_edge(graph_id, \"Category\", \"Item\", \"CatrgoryItem\")\n", - "\n", - " graphs_test.add_graph_node_property(graph_id, \"User\", \"U_\" + str(X_test[graph_id][0]))\n", - " graphs_test.add_graph_node_property(graph_id, \"Item\", \"I_\" + str(X_test[graph_id][1]))\n", - " graphs_test.add_graph_node_property(graph_id, \"Category\", \"C_\" + str(X_test[graph_id][2]))\n", - "graphs_test.encode()\n", - "print(\"Testing data produced\")\n", - "\n", - "tm = MultiClassGraphTsetlinMachine(\n", - " args.number_of_clauses,\n", - " args.T,\n", - " args.s,\n", - " number_of_state_bits = args.number_of_state_bits,\n", - " depth=args.depth,\n", - " message_size=args.message_size,\n", - " message_bits=args.message_bits,\n", - " max_included_literals=args.max_included_literals,\n", - " double_hashing = args.double_hashing\n", - ")\n", - "\n", - "for i in range(args.epochs):\n", - " start_training = time()\n", - " tm.fit(graphs_train, Y_train, epochs=1, incremental=True)\n", - " stop_training = time()\n", - "\n", - " start_testing = time()\n", - " result_test = 100*(tm.predict(graphs_test) == Y_test).mean()\n", - " stop_testing = time()\n", - "\n", - " result_train = 100*(tm.predict(graphs_train) == Y_train).mean()\n", - "\n", - " print(\"%d %.2f %.2f %.2f %.2f\" % (i, result_train, result_test, stop_training-start_training, stop_testing-start_testing))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/applications/products_recommendation/tm_classifier.py b/examples/applications/products_recommendation/tm_classifier.py index b390764a..1a2928d0 100644 --- a/examples/applications/products_recommendation/tm_classifier.py +++ b/examples/applications/products_recommendation/tm_classifier.py @@ -1,60 +1,59 @@ -import logging import argparse from tmu.models.classification.vanilla_classifier import TMClassifier from tmu.tools import BenchmarkTimer -from tmu.util.cuda_profiler import CudaProfiler import prepare_dataset +import pandas as pd +import os -_LOGGER = logging.getLogger(__name__) - -def metrics(args): - return dict( - accuracy=[], - train_time=[], - test_time=[], - args=vars(args) - ) - -def main(args): - experiment_results = metrics(args) - data = prepare_dataset.aug_amazon_products() +def main(args): + results = [] + data = prepare_dataset.aug_amazon_products(noise_ratio = args.dataset_noise_ratio) x, y = prepare_dataset.construct_x_y(data) X_train, X_test, Y_train, Y_test = prepare_dataset.one_hot_encoding(x,y) - tm = TMClassifier( number_of_clauses=args.num_clauses, T=args.T, s=args.s, max_included_literals=args.max_included_literals, platform=args.platform, - weighted_clauses=args.weighted_clauses + weighted_clauses=args.weighted_clauses, ) - _LOGGER.info(f"Running {TMClassifier} for {args.epochs}") + for epoch in range(args.epochs): benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") with benchmark_total: benchmark1 = BenchmarkTimer(logger=None, text="Training Time") with benchmark1: - res = tm.fit( - X_train, - Y_train, - ) - - experiment_results["train_time"].append(benchmark1.elapsed()) + tm.fit(X_train, Y_train) + train_time = benchmark1.elapsed() benchmark2 = BenchmarkTimer(logger=None, text="Testing Time") with benchmark2: - result = 100 * (tm.predict(X_test) == Y_test).mean() - experiment_results["accuracy"].append(result) - experiment_results["test_time"].append(benchmark2.elapsed()) - - _LOGGER.info(f"Epoch: {epoch + 1}, Accuracy: {result:.2f}, Training Time: {benchmark1.elapsed():.2f}s, " - f"Testing Time: {benchmark2.elapsed():.2f}s") - - if args.platform == "CUDA": - CudaProfiler().print_timings(benchmark=benchmark_total) - - return experiment_results + accuracy = 100 * (tm.predict(X_test) == Y_test).mean() + test_time = benchmark2.elapsed() + total_time = benchmark_total.elapsed() + + # Append results for each epoch + results.append({ + "Algorithm": "TMClassifier", + "Noise_Ratio": args.dataset_noise_ratio, + "T": args.T, + "s": args.s, + "Max_Included_Literals": args.max_included_literals, + "Epochs": args.epochs, + "Platform": args.platform, + "Total_Time": total_time, + "Accuracy": accuracy, + }) + # Save results to CSV + results_df = pd.DataFrame(results) + results_file = "experiment_results.csv" + if os.path.exists(results_file): + results_df.to_csv(results_file, mode='a', index=False, header=False) + else: + results_df.to_csv(results_file, index=False) + print(f"Results saved to {results_file}") + def default_args(**kwargs): parser = argparse.ArgumentParser() parser.add_argument("--num_clauses", default=2000, type=int) @@ -64,6 +63,7 @@ def default_args(**kwargs): parser.add_argument("--platform", default="CPU_sparse", type=str, choices=["CPU", "CPU_sparse", "CUDA"]) parser.add_argument("--weighted_clauses", default=True, type=bool) parser.add_argument("--epochs", default=10, type=int) + parser.add_argument("--dataset_noise_ratio", default=0.01, type=float) args = parser.parse_args() for key, value in kwargs.items(): if key in args.__dict__: @@ -71,5 +71,4 @@ def default_args(**kwargs): return args if __name__ == "__main__": - results = main(default_args()) - _LOGGER.info(results) \ No newline at end of file + main(default_args()) \ No newline at end of file From 84d8012259f3a253f102b6321f508a1da474743d Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Wed, 25 Dec 2024 15:40:00 +0000 Subject: [PATCH 21/29] add results --- .../experiment_results.csv | 6 ------ examples/recomm_system/README.md | 2 ++ .../prepare_dataset.cpython-310.pyc | Bin examples/recomm_system/experiment_results.csv | 19 ++++++++++++++++++ .../graph_nn.py | 0 .../graph_tm.py | 0 .../main.sh | 0 .../prepare_dataset.py | 0 .../tm_classifier.py | 0 9 files changed, 21 insertions(+), 6 deletions(-) delete mode 100644 examples/applications/products_recommendation/experiment_results.csv create mode 100644 examples/recomm_system/README.md rename examples/{applications/products_recommendation => recomm_system}/__pycache__/prepare_dataset.cpython-310.pyc (100%) create mode 100644 examples/recomm_system/experiment_results.csv rename examples/{applications/products_recommendation => recomm_system}/graph_nn.py (100%) rename examples/{applications/products_recommendation => recomm_system}/graph_tm.py (100%) rename examples/{applications/products_recommendation => recomm_system}/main.sh (100%) rename examples/{applications/products_recommendation => recomm_system}/prepare_dataset.py (100%) rename examples/{applications/products_recommendation => recomm_system}/tm_classifier.py (100%) diff --git a/examples/applications/products_recommendation/experiment_results.csv b/examples/applications/products_recommendation/experiment_results.csv deleted file mode 100644 index d3f66d27..00000000 --- a/examples/applications/products_recommendation/experiment_results.csv +++ /dev/null @@ -1,6 +0,0 @@ -Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy -Graph NN,0.005,0,0,0,1000,CPU,0.03006434440612793,76.72131061553955 -GraphTM,0.005,10000,10.0,23,10,CUDA,34.547648191452026,98.46994535519126 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,89.6943154335022,76.63934426229508 -Graph NN,0.01,0,0,0,1000,CPU,0.01817464828491211,75.95628499984741 -GraphTM,0.01,10000,10.0,23,10,CUDA,34.95576763153076,98.44262295081967 diff --git a/examples/recomm_system/README.md b/examples/recomm_system/README.md new file mode 100644 index 00000000..e7fa211a --- /dev/null +++ b/examples/recomm_system/README.md @@ -0,0 +1,2 @@ +cd examples/recomm_system/ +bash main.sh \ No newline at end of file diff --git a/examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc b/examples/recomm_system/__pycache__/prepare_dataset.cpython-310.pyc similarity index 100% rename from examples/applications/products_recommendation/__pycache__/prepare_dataset.cpython-310.pyc rename to examples/recomm_system/__pycache__/prepare_dataset.cpython-310.pyc diff --git a/examples/recomm_system/experiment_results.csv b/examples/recomm_system/experiment_results.csv new file mode 100644 index 00000000..cb6e80f7 --- /dev/null +++ b/examples/recomm_system/experiment_results.csv @@ -0,0 +1,19 @@ +Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy +Graph NN,0.005,0,0,0,1000,CPU,0.03006434440612793,76.72131061553955 +GraphTM,0.005,10000,10.0,23,10,CUDA,34.547648191452026,98.46994535519126 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,89.6943154335022,76.63934426229508 +Graph NN,0.01,0,0,0,1000,CPU,0.01817464828491211,75.95628499984741 +GraphTM,0.01,10000,10.0,23,10,CUDA,34.95576763153076,98.44262295081967 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,96.10501098632812,74.93169398907104 +Graph NN,0.02,0,0,0,1000,CPU,0.03073263168334961,81.22950792312622 +GraphTM,0.02,10000,10.0,23,10,CUDA,36.0724892616272,97.43169398907104 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,95.67133641242981,72.40437158469946 +Graph NN,0.05,0,0,0,1000,CPU,0.014258623123168945,83.52459073066711 +GraphTM,0.05,10000,10.0,23,10,CUDA,38.86628317832947,95.0 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,96.7427487373352,64.65163934426229 +Graph NN,0.1,0,0,0,1000,CPU,0.022305965423583984,73.33333492279053 +GraphTM,0.1,10000,10.0,23,10,CUDA,37.45086216926575,90.08196721311475 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,90.45554423332214,49.8292349726776 +Graph NN,0.2,0,0,0,1000,CPU,0.03204679489135742,59.863388538360596 +GraphTM,0.2,10000,10.0,23,10,CUDA,16.268279790878296,78.77049180327869 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,96.16712856292725,20.184426229508194 diff --git a/examples/applications/products_recommendation/graph_nn.py b/examples/recomm_system/graph_nn.py similarity index 100% rename from examples/applications/products_recommendation/graph_nn.py rename to examples/recomm_system/graph_nn.py diff --git a/examples/applications/products_recommendation/graph_tm.py b/examples/recomm_system/graph_tm.py similarity index 100% rename from examples/applications/products_recommendation/graph_tm.py rename to examples/recomm_system/graph_tm.py diff --git a/examples/applications/products_recommendation/main.sh b/examples/recomm_system/main.sh similarity index 100% rename from examples/applications/products_recommendation/main.sh rename to examples/recomm_system/main.sh diff --git a/examples/applications/products_recommendation/prepare_dataset.py b/examples/recomm_system/prepare_dataset.py similarity index 100% rename from examples/applications/products_recommendation/prepare_dataset.py rename to examples/recomm_system/prepare_dataset.py diff --git a/examples/applications/products_recommendation/tm_classifier.py b/examples/recomm_system/tm_classifier.py similarity index 100% rename from examples/applications/products_recommendation/tm_classifier.py rename to examples/recomm_system/tm_classifier.py From d68ae7153845b1ed2f09ebf2b2726a9e21444b99 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Thu, 26 Dec 2024 16:01:53 +0000 Subject: [PATCH 22/29] fair comparisons --- examples/recomm_system/experiment_results.csv | 36 +++++++++++++++++++ examples/recomm_system/graph_nn.py | 12 +++---- examples/recomm_system/graph_tm.py | 10 +++--- examples/recomm_system/tm_classifier.py | 10 +++--- 4 files changed, 52 insertions(+), 16 deletions(-) diff --git a/examples/recomm_system/experiment_results.csv b/examples/recomm_system/experiment_results.csv index cb6e80f7..957f7701 100644 --- a/examples/recomm_system/experiment_results.csv +++ b/examples/recomm_system/experiment_results.csv @@ -17,3 +17,39 @@ TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,90.45554423332214,49.8292349726776 Graph NN,0.2,0,0,0,1000,CPU,0.03204679489135742,59.863388538360596 GraphTM,0.2,10000,10.0,23,10,CUDA,16.268279790878296,78.77049180327869 TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,96.16712856292725,20.184426229508194 +Graph NN,0.005,0,0,0,1000,CPU,0.0168764591217041,76.85792446136475 +GraphTM,0.005,10000,10.0,23,10,CUDA,31.40691065788269,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,88.05298614501953,76.74180327868852 +Graph NN,0.01,0,0,0,1000,CPU,0.01720118522644043,87.4316930770874 +GraphTM,0.01,10000,10.0,23,10,CUDA,31.529547214508057,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,89.19472336769104,74.93169398907104 +Graph NN,0.02,0,0,0,1000,CPU,0.014032602310180664,78.36065292358398 +GraphTM,0.02,10000,10.0,23,10,CUDA,32.8007595539093,97.62295081967213 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,94.56675243377686,72.6775956284153 +Graph NN,0.05,0,0,0,1000,CPU,0.016784191131591797,76.88524723052979 +GraphTM,0.05,10000,10.0,23,10,CUDA,34.84256434440613,94.75409836065573 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,96.4975814819336,64.1051912568306 +Graph NN,0.1,0,0,0,1000,CPU,0.014883041381835938,70.54644823074341 +GraphTM,0.1,10000,10.0,23,10,CUDA,36.750433683395386,89.97267759562841 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,96.35110449790955,50.341530054644814 +Graph NN,0.2,0,0,0,1000,CPU,0.03427433967590332,61.50273084640503 +GraphTM,0.2,10000,10.0,23,10,CUDA,39.63756251335144,79.01639344262294 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,97.00698733329773,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,370.7295939922333,87.5683069229126 +GraphTM,0.005,10000,10.0,23,10,CUDA,342.7878243923187,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,954.4101324081421,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,304.6031119823456,86.74863576889038 +GraphTM,0.01,10000,10.0,23,10,CUDA,346.8704605102539,98.25136612021858 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,978.3629264831543,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,403.2585175037384,75.30054450035095 +GraphTM,0.02,10000,10.0,23,10,CUDA,353.39254236221313,97.65027322404372 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.3300836086273,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,398.8085067272186,93.8524603843689 +GraphTM,0.05,10000,10.0,23,10,CUDA,368.16111874580383,94.59016393442623 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,960.4506890773773,63.661202185792355 +Graph NN,0.1,0,0,0,20000,CPU,388.4886665344238,75.43715834617615 +GraphTM,0.1,10000,10.0,23,10,CUDA,340.63327074050903,90.43715846994536 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,972.1077370643616,49.35109289617486 +Graph NN,0.2,0,0,0,20000,CPU,438.5506749153137,64.04371857643127 +GraphTM,0.2,10000,10.0,23,10,CUDA,357.2651107311249,77.89617486338798 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,948.7157049179077,20.116120218579233 diff --git a/examples/recomm_system/graph_nn.py b/examples/recomm_system/graph_nn.py index 30292db9..9ef5fbed 100644 --- a/examples/recomm_system/graph_nn.py +++ b/examples/recomm_system/graph_nn.py @@ -61,9 +61,9 @@ def forward(self, x, edge_index): ).t() test_labels = torch.tensor(Y_test, dtype=torch.float) # Training Loop with Accuracy Logging - for epoch in range(args.epochs): - benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") - with benchmark_total: + benchmark_total = BenchmarkTimer(logger=None, text="Epochs Time") + with benchmark_total: + for epoch in range(args.epochs): benchmark1 = BenchmarkTimer(logger=None, text="Training Time") with benchmark1: # Training Phase @@ -91,8 +91,8 @@ def forward(self, x, edge_index): # Compute accuracy accuracy = ((test_predicted_ratings.round() == test_labels).float().mean().item()) * 100 test_time = benchmark2.elapsed() - total_time = benchmark_total.elapsed() - # Append results for each epoch + total_time = benchmark_total.elapsed() + # Append results for each epoch results.append({ "Algorithm": "Graph NN", "Noise_Ratio": args.dataset_noise_ratio, @@ -118,7 +118,7 @@ def forward(self, x, edge_index): def default_args(**kwargs): parser = argparse.ArgumentParser() parser.add_argument("--platform", default="CPU", type=str, choices=["CPU", "CUDA"]) - parser.add_argument("--epochs", default=1000, type=int) + parser.add_argument("--epochs", default=20000, type=int) parser.add_argument("--dataset_noise_ratio", default=0.01, type=float) args = parser.parse_args() for key, value in kwargs.items(): diff --git a/examples/recomm_system/graph_tm.py b/examples/recomm_system/graph_tm.py index 0ec2171c..d1464c75 100644 --- a/examples/recomm_system/graph_tm.py +++ b/examples/recomm_system/graph_tm.py @@ -106,9 +106,9 @@ def main(args): double_hashing = args.double_hashing ) - for epoch in range(args.epochs): - benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") - with benchmark_total: + benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") + with benchmark_total: + for epoch in range(args.epochs): benchmark1 = BenchmarkTimer(logger=None, text="Training Time") with benchmark1: tm.fit(graphs_train, Y_train, epochs=1, incremental=True) @@ -118,8 +118,8 @@ def main(args): with benchmark2: accuracy = 100*(tm.predict(graphs_test) == Y_test).mean() test_time = benchmark2.elapsed() - total_time = benchmark_total.elapsed() - # result_train = 100*(tm.predict(graphs_train) == Y_train).mean() + total_time = benchmark_total.elapsed() + # result_train = 100*(tm.predict(graphs_train) == Y_train).mean() results.append({ "Algorithm": "GraphTM", "Noise_Ratio": args.dataset_noise_ratio, diff --git a/examples/recomm_system/tm_classifier.py b/examples/recomm_system/tm_classifier.py index 1a2928d0..876f8c4f 100644 --- a/examples/recomm_system/tm_classifier.py +++ b/examples/recomm_system/tm_classifier.py @@ -19,9 +19,9 @@ def main(args): weighted_clauses=args.weighted_clauses, ) - for epoch in range(args.epochs): - benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") - with benchmark_total: + benchmark_total = BenchmarkTimer(logger=None, text="Epoch Time") + with benchmark_total: + for epoch in range(args.epochs): benchmark1 = BenchmarkTimer(logger=None, text="Training Time") with benchmark1: tm.fit(X_train, Y_train) @@ -30,9 +30,9 @@ def main(args): with benchmark2: accuracy = 100 * (tm.predict(X_test) == Y_test).mean() test_time = benchmark2.elapsed() - total_time = benchmark_total.elapsed() + total_time = benchmark_total.elapsed() - # Append results for each epoch + # Append results for each epoch results.append({ "Algorithm": "TMClassifier", "Noise_Ratio": args.dataset_noise_ratio, From c3d895b2840f9cb98fd19512ba7df5682d231647 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Mon, 17 Feb 2025 11:34:50 +0000 Subject: [PATCH 23/29] update --- examples/recomm_system/experiment_results.csv | 234 +++++++++++---- .../recomm_system/experiment_results_old.csv | 271 ++++++++++++++++++ examples/recomm_system/graph_tm.py | 6 + examples/recomm_system/main.sh | 28 +- examples/recomm_system/test.ipynb | 186 ++++++++++++ 5 files changed, 662 insertions(+), 63 deletions(-) create mode 100644 examples/recomm_system/experiment_results_old.csv create mode 100644 examples/recomm_system/test.ipynb diff --git a/examples/recomm_system/experiment_results.csv b/examples/recomm_system/experiment_results.csv index 957f7701..b394dad6 100644 --- a/examples/recomm_system/experiment_results.csv +++ b/examples/recomm_system/experiment_results.csv @@ -1,55 +1,181 @@ Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy -Graph NN,0.005,0,0,0,1000,CPU,0.03006434440612793,76.72131061553955 -GraphTM,0.005,10000,10.0,23,10,CUDA,34.547648191452026,98.46994535519126 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,89.6943154335022,76.63934426229508 -Graph NN,0.01,0,0,0,1000,CPU,0.01817464828491211,75.95628499984741 -GraphTM,0.01,10000,10.0,23,10,CUDA,34.95576763153076,98.44262295081967 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,96.10501098632812,74.93169398907104 -Graph NN,0.02,0,0,0,1000,CPU,0.03073263168334961,81.22950792312622 -GraphTM,0.02,10000,10.0,23,10,CUDA,36.0724892616272,97.43169398907104 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,95.67133641242981,72.40437158469946 -Graph NN,0.05,0,0,0,1000,CPU,0.014258623123168945,83.52459073066711 -GraphTM,0.05,10000,10.0,23,10,CUDA,38.86628317832947,95.0 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,96.7427487373352,64.65163934426229 -Graph NN,0.1,0,0,0,1000,CPU,0.022305965423583984,73.33333492279053 -GraphTM,0.1,10000,10.0,23,10,CUDA,37.45086216926575,90.08196721311475 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,90.45554423332214,49.8292349726776 -Graph NN,0.2,0,0,0,1000,CPU,0.03204679489135742,59.863388538360596 -GraphTM,0.2,10000,10.0,23,10,CUDA,16.268279790878296,78.77049180327869 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,96.16712856292725,20.184426229508194 -Graph NN,0.005,0,0,0,1000,CPU,0.0168764591217041,76.85792446136475 -GraphTM,0.005,10000,10.0,23,10,CUDA,31.40691065788269,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,88.05298614501953,76.74180327868852 -Graph NN,0.01,0,0,0,1000,CPU,0.01720118522644043,87.4316930770874 -GraphTM,0.01,10000,10.0,23,10,CUDA,31.529547214508057,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,89.19472336769104,74.93169398907104 -Graph NN,0.02,0,0,0,1000,CPU,0.014032602310180664,78.36065292358398 -GraphTM,0.02,10000,10.0,23,10,CUDA,32.8007595539093,97.62295081967213 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,94.56675243377686,72.6775956284153 -Graph NN,0.05,0,0,0,1000,CPU,0.016784191131591797,76.88524723052979 -GraphTM,0.05,10000,10.0,23,10,CUDA,34.84256434440613,94.75409836065573 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,96.4975814819336,64.1051912568306 -Graph NN,0.1,0,0,0,1000,CPU,0.014883041381835938,70.54644823074341 -GraphTM,0.1,10000,10.0,23,10,CUDA,36.750433683395386,89.97267759562841 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,96.35110449790955,50.341530054644814 -Graph NN,0.2,0,0,0,1000,CPU,0.03427433967590332,61.50273084640503 -GraphTM,0.2,10000,10.0,23,10,CUDA,39.63756251335144,79.01639344262294 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,97.00698733329773,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,370.7295939922333,87.5683069229126 -GraphTM,0.005,10000,10.0,23,10,CUDA,342.7878243923187,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,954.4101324081421,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,304.6031119823456,86.74863576889038 -GraphTM,0.01,10000,10.0,23,10,CUDA,346.8704605102539,98.25136612021858 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,978.3629264831543,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,403.2585175037384,75.30054450035095 -GraphTM,0.02,10000,10.0,23,10,CUDA,353.39254236221313,97.65027322404372 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.3300836086273,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,398.8085067272186,93.8524603843689 -GraphTM,0.05,10000,10.0,23,10,CUDA,368.16111874580383,94.59016393442623 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,960.4506890773773,63.661202185792355 -Graph NN,0.1,0,0,0,20000,CPU,388.4886665344238,75.43715834617615 -GraphTM,0.1,10000,10.0,23,10,CUDA,340.63327074050903,90.43715846994536 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,972.1077370643616,49.35109289617486 -Graph NN,0.2,0,0,0,20000,CPU,438.5506749153137,64.04371857643127 -GraphTM,0.2,10000,10.0,23,10,CUDA,357.2651107311249,77.89617486338798 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,948.7157049179077,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,418.9250466823578,75.62841773033142 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.35683226585388,98.68852459016394 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1059.1634759902954,76.81010928961749 +Graph NN,0.01,0,0,0,20000,CPU,550.6980571746826,94.50819492340088 +GraphTM,0.01,10000,10.0,23,10,CUDA,114.06276345252991,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1051.916612625122,75.3415300546448 +Graph NN,0.02,0,0,0,20000,CPU,475.44024682044983,75.30054450035095 +GraphTM,0.02,10000,10.0,23,10,CUDA,121.55624794960022,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1043.9487817287445,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,411.8552327156067,80.98360896110535 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.7814338207245,94.20765027322405 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1044.2656917572021,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,484.6550889015198,68.7158465385437 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.34457921981812,89.72677595628416 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1061.191523551941,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,483.8463816642761,71.28415107727051 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.18810439109802,78.49726775956285 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1071.927158355713,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,473.5806052684784,86.36612296104431 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.18979954719543,98.60655737704917 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,979.0509588718414,76.74180327868852 +Graph NN,0.01,0,0,0,20000,CPU,444.6897065639496,93.55190992355347 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.48035550117493,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1007.9654748439789,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,386.32835030555725,93.22404265403748 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.46316766738892,97.73224043715847 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1017.5866801738739,73.25819672131148 +Graph NN,0.05,0,0,0,20000,CPU,417.78410935401917,73.1693983078003 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.64952206611633,95.08196721311475 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,945.0465729236603,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,481.6537721157074,77.18579173088074 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.57958960533142,90.08196721311475 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,938.0212676525116,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,391.36059975624084,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.49347591400146,77.65027322404372 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,940.9758951663971,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,480.5005066394806,75.68305730819702 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.65927052497864,98.19672131147541 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.7581994533539,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,449.22584795951843,76.36612057685852 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.07226181030273,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1010.8711988925934,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,403.96647000312805,96.85792326927185 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.02044725418091,97.78688524590164 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1011.7896072864532,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,460.688773393631,85.00000238418579 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.04891228675842,94.69945355191257 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1014.1492829322815,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,407.9346880912781,74.1256833076477 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.6586093902588,90.08196721311475 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,990.8282098770142,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,437.8108870983124,65.60109257698059 +GraphTM,0.2,10000,10.0,23,10,CUDA,168.44772601127625,78.93442622950819 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1022.1848647594452,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,430.3925087451935,89.20764923095703 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.6658935546875,98.68852459016394 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1016.199923992157,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,396.3338620662689,84.23497080802917 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.67849016189575,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,944.4602844715118,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,434.91951632499695,93.25136542320251 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.31921482086182,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,933.2245874404907,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,483.2671537399292,80.32786846160889 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.68922591209412,94.78142076502732 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,994.6384744644165,64.13934426229508 +Graph NN,0.1,0,0,0,20000,CPU,424.9935986995697,81.33879899978638 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.08107113838196,89.59016393442623 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,944.0273253917694,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,333.49274706840515,61.50273084640503 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.906751871109,78.98907103825137 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,965.9725024700165,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,377.28471970558167,75.68305730819702 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.61631536483765,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,976.8008840084076,76.67349726775956 +Graph NN,0.01,0,0,0,20000,CPU,473.2922372817993,76.06557607650757 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.87212014198303,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,942.7254059314728,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,357.36573815345764,75.40983557701111 +GraphTM,0.02,10000,10.0,23,10,CUDA,119.41612005233765,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,985.81947016716,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,440.75843334198,73.08743000030518 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.8215868473053,94.91803278688525 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,997.739678144455,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,426.73446226119995,88.55191469192505 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.54467248916626,89.94535519125682 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,980.096907377243,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,387.20843958854675,75.71038007736206 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.7962884902954,77.56830601092896 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,987.0616261959076,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,455.586905002594,83.41529965400696 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.7424705028534,98.5792349726776 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,998.4698519706726,76.81010928961749 +Graph NN,0.01,0,0,0,20000,CPU,466.44022035598755,98.52458834648132 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.78495740890503,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,932.3163437843323,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,455.35024762153625,88.96175026893616 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.741384267807,97.75956284153006 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,974.3740100860596,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,399.9565739631653,73.60655665397644 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.17181992530823,94.67213114754098 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,971.1499485969543,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,447.5498752593994,70.8743155002594 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.6928951740265,89.80874316939891 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,962.4737737178802,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,403.6350507736206,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.02189421653748,78.16939890710383 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,947.2696743011475,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,470.0121097564697,81.20218515396118 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.51706099510193,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,974.2360310554504,76.74180327868852 +Graph NN,0.01,0,0,0,20000,CPU,466.69573068618774,76.06557607650757 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.95063591003418,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,948.407201051712,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,288.0073969364166,92.92349815368652 +GraphTM,0.02,10000,10.0,23,10,CUDA,119.34772634506226,97.48633879781421 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.228814125061,39.51502732240437 +Graph NN,0.05,0,0,0,20000,CPU,477.7228500843048,89.86338973045349 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.2427453994751,94.86338797814207 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,964.5819866657257,34.56284153005464 +Graph NN,0.1,0,0,0,20000,CPU,459.15181946754456,71.22950553894043 +GraphTM,0.1,10000,10.0,23,10,CUDA,148.52941298484802,89.67213114754098 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,981.4810082912445,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,356.59899377822876,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.7598683834076,76.85792349726775 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,959.9282560348511,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,378.94336581230164,80.32786846160889 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.55144882202148,98.44262295081967 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.1284465789795,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,407.1111581325531,94.31694149971008 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.06348276138306,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,941.711061000824,39.65163934426229 +Graph NN,0.02,0,0,0,20000,CPU,402.2970163822174,79.80874180793762 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.20444130897522,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,998.2885782718658,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,400.97751235961914,85.30054688453674 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.81029963493347,94.78142076502732 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1003.2194263935089,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,413.25741934776306,74.59016442298889 +GraphTM,0.1,10000,10.0,23,10,CUDA,148.70455861091614,89.89071038251366 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,974.4099938869476,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,369.36416029930115,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.01750564575195,78.55191256830601 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,990.2080008983612,20.184426229508194 +Graph NN,0.005,0,0,0,20000,CPU,440.5256702899933,90.4644787311554 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.76434278488159,98.55191256830601 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1004.704318523407,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,385.76011848449707,77.62295007705688 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.28425002098083,98.44262295081967 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,953.8945541381836,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,422.2995481491089,90.71038365364075 +GraphTM,0.02,10000,10.0,23,10,CUDA,121.29091334342957,97.6775956284153 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1002.099497795105,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,383.33483958244324,81.8306028842926 +GraphTM,0.05,10000,10.0,23,10,CUDA,134.72863698005676,94.53551912568307 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,936.831921339035,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,320.32143545150757,83.60655903816223 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.56500816345215,89.15300546448087 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,955.8687121868134,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,432.34014868736267,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.61127710342407,79.12568306010928 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,945.0617082118988,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,458.87039852142334,79.37158346176147 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.9952290058136,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,985.8775904178619,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,453.55728340148926,76.06557607650757 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.85269451141357,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,941.0662143230438,75.06830601092896 +Graph NN,0.02,0,0,0,20000,CPU,416.2407822608948,91.66666865348816 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.69959592819214,97.78688524590164 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,973.9127674102783,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,311.2621831893921,75.46448111534119 +GraphTM,0.05,10000,10.0,23,10,CUDA,134.66055345535278,94.89071038251366 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,953.3380017280579,63.25136612021858 +Graph NN,0.1,0,0,0,20000,CPU,425.43416261672974,73.79781603813171 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.67951107025146,90.27322404371586 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,967.5897221565247,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,379.8497235774994,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.7126281261444,77.81420765027323 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,955.9427745342255,20.116120218579233 diff --git a/examples/recomm_system/experiment_results_old.csv b/examples/recomm_system/experiment_results_old.csv new file mode 100644 index 00000000..f715ba6a --- /dev/null +++ b/examples/recomm_system/experiment_results_old.csv @@ -0,0 +1,271 @@ +Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy +Graph NN,0.005,0,0,0,1000,CPU,0.03006434440612793,76.72131061553955 +GraphTM,0.005,10000,10.0,23,10,CUDA,34.547648191452026,98.46994535519126 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,89.6943154335022,76.63934426229508 +Graph NN,0.01,0,0,0,1000,CPU,0.01817464828491211,75.95628499984741 +GraphTM,0.01,10000,10.0,23,10,CUDA,34.95576763153076,98.44262295081967 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,96.10501098632812,74.93169398907104 +Graph NN,0.02,0,0,0,1000,CPU,0.03073263168334961,81.22950792312622 +GraphTM,0.02,10000,10.0,23,10,CUDA,36.0724892616272,97.43169398907104 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,95.67133641242981,72.40437158469946 +Graph NN,0.05,0,0,0,1000,CPU,0.014258623123168945,83.52459073066711 +GraphTM,0.05,10000,10.0,23,10,CUDA,38.86628317832947,95.0 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,96.7427487373352,64.65163934426229 +Graph NN,0.1,0,0,0,1000,CPU,0.022305965423583984,73.33333492279053 +GraphTM,0.1,10000,10.0,23,10,CUDA,37.45086216926575,90.08196721311475 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,90.45554423332214,49.8292349726776 +Graph NN,0.2,0,0,0,1000,CPU,0.03204679489135742,59.863388538360596 +GraphTM,0.2,10000,10.0,23,10,CUDA,16.268279790878296,78.77049180327869 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,96.16712856292725,20.184426229508194 +Graph NN,0.005,0,0,0,1000,CPU,0.0168764591217041,76.85792446136475 +GraphTM,0.005,10000,10.0,23,10,CUDA,31.40691065788269,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,88.05298614501953,76.74180327868852 +Graph NN,0.01,0,0,0,1000,CPU,0.01720118522644043,87.4316930770874 +GraphTM,0.01,10000,10.0,23,10,CUDA,31.529547214508057,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,89.19472336769104,74.93169398907104 +Graph NN,0.02,0,0,0,1000,CPU,0.014032602310180664,78.36065292358398 +GraphTM,0.02,10000,10.0,23,10,CUDA,32.8007595539093,97.62295081967213 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,94.56675243377686,72.6775956284153 +Graph NN,0.05,0,0,0,1000,CPU,0.016784191131591797,76.88524723052979 +GraphTM,0.05,10000,10.0,23,10,CUDA,34.84256434440613,94.75409836065573 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,96.4975814819336,64.1051912568306 +Graph NN,0.1,0,0,0,1000,CPU,0.014883041381835938,70.54644823074341 +GraphTM,0.1,10000,10.0,23,10,CUDA,36.750433683395386,89.97267759562841 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,96.35110449790955,50.341530054644814 +Graph NN,0.2,0,0,0,1000,CPU,0.03427433967590332,61.50273084640503 +GraphTM,0.2,10000,10.0,23,10,CUDA,39.63756251335144,79.01639344262294 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,97.00698733329773,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,370.7295939922333,87.5683069229126 +GraphTM,0.005,10000,10.0,23,10,CUDA,342.7878243923187,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,954.4101324081421,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,304.6031119823456,86.74863576889038 +GraphTM,0.01,10000,10.0,23,10,CUDA,346.8704605102539,98.25136612021858 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,978.3629264831543,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,403.2585175037384,75.30054450035095 +GraphTM,0.02,10000,10.0,23,10,CUDA,353.39254236221313,97.65027322404372 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.3300836086273,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,398.8085067272186,93.8524603843689 +GraphTM,0.05,10000,10.0,23,10,CUDA,368.16111874580383,94.59016393442623 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,960.4506890773773,63.661202185792355 +Graph NN,0.1,0,0,0,20000,CPU,388.4886665344238,75.43715834617615 +GraphTM,0.1,10000,10.0,23,10,CUDA,340.63327074050903,90.43715846994536 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,972.1077370643616,49.35109289617486 +Graph NN,0.2,0,0,0,20000,CPU,438.5506749153137,64.04371857643127 +GraphTM,0.2,10000,10.0,23,10,CUDA,357.2651107311249,77.89617486338798 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,948.7157049179077,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,335.8319003582001,94.97267603874207 +GraphTM,0.005,10000,10.0,23,10,CUDA,343.08735728263855,98.63387978142076 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.0340785980225,76.74180327868852 +Graph NN,0.01,0,0,0,20000,CPU,380.5575759410858,94.37158703804016 +GraphTM,0.01,10000,10.0,23,10,CUDA,346.9574134349823,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,948.3826260566711,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,317.0974416732788,80.6010901927948 +GraphTM,0.02,10000,10.0,23,10,CUDA,352.5908226966858,97.5136612021858 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,966.0719907283783,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,472.30924010276794,73.08743000030518 +GraphTM,0.05,10000,10.0,23,10,CUDA,352.63378834724426,94.18032786885246 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,959.5826976299286,64.31010928961749 +Graph NN,0.1,0,0,0,20000,CPU,461.1769962310791,82.45901465415955 +GraphTM,0.1,10000,10.0,23,10,CUDA,384.25392842292786,89.80874316939891 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,968.517664194107,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,338.83801436424255,61.39343976974487 +GraphTM,0.2,10000,10.0,23,10,CUDA,406.0366141796112,79.37158469945356 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,956.5074710845947,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,449.974244594574,99.07103776931763 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.82642030715942,98.63387978142076 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,958.8415122032166,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,340.71677923202515,91.557377576828 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.30413746833801,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,954.5596807003021,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,395.9958527088165,90.95628261566162 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.9222981929779,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,969.4929764270782,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,480.05427837371826,84.83606576919556 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.44805693626404,94.67213114754098 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,960.4112854003906,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,383.12051796913147,70.8743155002594 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.93119883537292,89.86338797814207 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,951.469316482544,49.35109289617486 +Graph NN,0.2,0,0,0,20000,CPU,463.9883725643158,66.22951030731201 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.47470378875732,78.16939890710383 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,960.5258178710938,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,475.9830324649811,82.54098296165466 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.21395993232727,98.7431693989071 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1007.2876415252686,76.74180327868852 +Graph NN,0.01,0,0,0,20000,CPU,383.468213558197,84.89071130752563 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.81892561912537,98.16939890710383 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1012.9538216590881,75.0 +Graph NN,0.02,0,0,0,20000,CPU,420.129834651947,78.87977957725525 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.55745768547058,97.75956284153006 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1015.7262468338013,72.43852459016394 +Graph NN,0.05,0,0,0,20000,CPU,402.9082715511322,88.90710473060608 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.1779272556305,94.69945355191257 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1003.5450174808502,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,465.9741690158844,71.61202430725098 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.92307353019714,90.21857923497268 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,993.3001370429993,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,477.5556457042694,61.967211961746216 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.91576671600342,78.71584699453553 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,968.9711816310883,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,432.9368100166321,87.9781424999237 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.05442261695862,98.4153005464481 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,996.241945028305,77.11748633879782 +Graph NN,0.01,0,0,0,20000,CPU,487.0275945663452,76.06557607650757 +GraphTM,0.01,10000,10.0,23,10,CUDA,114.20750546455383,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,965.2801012992859,74.89754098360656 +Graph NN,0.02,0,0,0,20000,CPU,469.96120142936707,84.61748361587524 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.18490934371948,97.62295081967213 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,996.0747539997101,73.05327868852459 +Graph NN,0.05,0,0,0,20000,CPU,391.52739334106445,94.4535493850708 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.62234830856323,94.89071038251366 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1008.111634016037,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,393.4089164733887,82.24043846130371 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.06821942329407,90.21857923497268 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1029.8733656406403,46.89207650273224 +Graph NN,0.2,0,0,0,20000,CPU,457.90059518814087,64.50819969177246 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.8122251033783,78.5792349726776 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,994.4631915092468,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,468.43779039382935,93.66120100021362 +GraphTM,0.005,10000,10.0,23,10,CUDA,791.0080873966217,98.66120218579235 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1003.8278872966766,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,432.6524693965912,76.06557607650757 +GraphTM,0.01,10000,10.0,23,10,CUDA,114.20011568069458,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1002.5212485790253,74.55601092896174 +Graph NN,0.02,0,0,0,20000,CPU,369.3357195854187,77.92349457740784 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.16606998443604,97.78688524590164 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1012.7241668701172,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,425.18350195884705,73.49726557731628 +GraphTM,0.05,10000,10.0,23,10,CUDA,134.74739480018616,94.53551912568307 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,989.5920696258545,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,490.52463579177856,74.23497438430786 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.663067817688,90.05464480874316 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1006.5979704856873,49.86338797814208 +Graph NN,0.2,0,0,0,20000,CPU,430.0901610851288,55.51912784576416 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.53561758995056,78.52459016393442 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,978.9952318668365,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,348.87419414520264,88.87978196144104 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.35069704055786,98.49726775956285 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1027.553718328476,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,459.8675227165222,94.97267603874207 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.4369592666626,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1020.3086180686951,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,402.3793728351593,98.08743000030518 +GraphTM,0.02,10000,10.0,23,10,CUDA,121.04798412322998,97.78688524590164 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1001.1654710769653,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,372.8648886680603,77.81420946121216 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.42569255828857,94.78142076502732 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1007.9532639980316,64.31010928961749 +Graph NN,0.1,0,0,0,20000,CPU,379.2149317264557,88.55191469192505 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.3813440799713,89.50819672131148 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1043.259267091751,49.65846994535519 +Graph NN,0.2,0,0,0,20000,CPU,327.1461730003357,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.63331365585327,77.75956284153006 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1010.9232707023621,20.081967213114755 +Graph NN,0.005,0,0,0,20000,CPU,365.3540139198303,84.56284403800964 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.86703443527222,98.55191256830601 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,985.2240512371063,76.84426229508196 +Graph NN,0.01,0,0,0,20000,CPU,419.19047832489014,90.65573811531067 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.7970187664032,98.19672131147541 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1064.9949398040771,75.27322404371584 +Graph NN,0.02,0,0,0,20000,CPU,331.5898778438568,82.13114738464355 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.9221625328064,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,995.9801988601685,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,471.61706471443176,76.4207661151886 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.71256685256958,94.31693989071039 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1049.4032156467438,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,408.78746509552,75.76502561569214 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.7326798439026,89.86338797814207 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1033.6956369876862,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,606.6176900863647,75.84699392318726 +GraphTM,0.2,10000,10.0,23,10,CUDA,767.3086304664612,79.18032786885246 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1133.7219278812408,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,581.8730342388153,75.68305730819702 +GraphTM,0.005,10000,10.0,23,10,CUDA,331.4337913990021,98.68852459016394 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1139.0209171772003,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,625.7649390697479,79.91803288459778 +GraphTM,0.01,10000,10.0,23,10,CUDA,390.8302972316742,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1126.1463103294373,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,400.4486656188965,88.87978196144104 +GraphTM,0.02,10000,10.0,23,10,CUDA,1433.5869204998016,97.73224043715847 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,950.7241444587708,72.43852459016394 +Graph NN,0.05,0,0,0,20000,CPU,425.54064321517944,88.22404146194458 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.85678553581238,95.0 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,980.2039513587952,40.26639344262295 +Graph NN,0.1,0,0,0,20000,CPU,452.5277452468872,75.38251280784607 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.88782930374146,89.80874316939891 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1026.7852320671082,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,470.88474774360657,69.67213153839111 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.65682435035706,78.38797814207649 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1025.9789564609528,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,415.4326367378235,75.68305730819702 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.78167200088501,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1003.7237763404846,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,444.45101857185364,92.65027046203613 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.90761637687683,98.14207650273225 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,952.2491714954376,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,320.37370920181274,93.90710592269897 +GraphTM,0.02,10000,10.0,23,10,CUDA,119.93352174758911,97.78688524590164 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,943.684113740921,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,481.5506682395935,73.08743000030518 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.72563362121582,94.75409836065573 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1023.0283312797546,63.661202185792355 +Graph NN,0.1,0,0,0,20000,CPU,493.5546169281006,70.92896103858948 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.45619106292725,89.80874316939891 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1015.6581709384918,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,413.9959945678711,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.9294879436493,78.77049180327869 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,986.7937209606171,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,378.53097796440125,75.68305730819702 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.98681783676147,98.30601092896175 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1019.9160070419312,76.74180327868852 +Graph NN,0.01,0,0,0,20000,CPU,474.00093841552734,91.0109281539917 +GraphTM,0.01,10000,10.0,23,10,CUDA,111.94242978096008,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,999.8850407600403,75.03415300546447 +Graph NN,0.02,0,0,0,20000,CPU,346.5858099460602,79.3169379234314 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.32013273239136,97.81420765027322 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,937.4906117916107,71.92622950819673 +Graph NN,0.05,0,0,0,20000,CPU,408.48123002052307,79.61748838424683 +GraphTM,0.05,10000,10.0,23,10,CUDA,134.27622246742249,94.72677595628414 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,998.1966772079468,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,373.10851979255676,70.8743155002594 +GraphTM,0.1,10000,10.0,23,10,CUDA,148.95248794555664,89.86338797814207 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,993.9887461662292,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,388.21142077445984,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,168.77049660682678,76.93989071038251 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,947.7270972728729,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,370.7274992465973,75.79234838485718 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.92479467391968,98.27868852459017 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,944.8954434394836,76.74180327868852 +Graph NN,0.01,0,0,0,20000,CPU,382.68008041381836,90.8196747303009 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.02455401420593,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,958.4739623069763,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,466.3325071334839,96.22950553894043 +GraphTM,0.02,10000,10.0,23,10,CUDA,121.06816530227661,97.6775956284153 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,952.7009084224701,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,462.6835868358612,75.79234838485718 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.21898555755615,94.53551912568307 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,974.2475302219391,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,425.79654932022095,87.18579411506653 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.70053339004517,90.0 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1005.1148529052734,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,454.7309219837189,60.10928750038147 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.75228261947632,78.68852459016394 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,949.2937788963318,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,570.247394323349,75.68305730819702 +GraphTM,0.005,10000,10.0,23,10,CUDA,478.04068207740784,98.5792349726776 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1043.9900722503662,76.22950819672131 +Graph NN,0.01,0,0,0,20000,CPU,428.5804445743561,98.68852496147156 +GraphTM,0.01,10000,10.0,23,10,CUDA,522.4638862609863,98.44262295081967 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1060.4919381141663,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,432.5051038265228,76.25682950019836 +GraphTM,0.02,10000,10.0,23,10,CUDA,465.56538343429565,97.73224043715847 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1074.2418582439423,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,492.7251534461975,85.16393303871155 +GraphTM,0.05,10000,10.0,23,10,CUDA,688.4105927944183,94.91803278688525 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1055.8136265277863,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,584.2829260826111,78.22404503822327 +GraphTM,0.1,10000,10.0,23,10,CUDA,625.4286091327667,90.13661202185791 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1055.7545056343079,48.83879781420765 +Graph NN,0.2,0,0,0,20000,CPU,318.2997555732727,67.40437150001526 +GraphTM,0.2,10000,10.0,23,10,CUDA,1264.404123544693,77.62295081967213 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1000.3779845237732,20.081967213114755 diff --git a/examples/recomm_system/graph_tm.py b/examples/recomm_system/graph_tm.py index d1464c75..d03d3be9 100644 --- a/examples/recomm_system/graph_tm.py +++ b/examples/recomm_system/graph_tm.py @@ -14,8 +14,14 @@ def main(args): x, y = prepare_dataset.construct_x_y(data) X_train, X_test, Y_train, Y_test = prepare_dataset.train_test_split(x,y) users = data['user_id'].unique() + print("Users: ",len(users)) + items = data['product_id'].unique() + print("Items: ",len(items)) + categories = data['category'].unique() + print("Categories: ",len(categories)) + # Initialize Graphs with symbols for GTM number_of_nodes = 3 symbols = [] diff --git a/examples/recomm_system/main.sh b/examples/recomm_system/main.sh index 8c7a22ad..a5db0425 100644 --- a/examples/recomm_system/main.sh +++ b/examples/recomm_system/main.sh @@ -1,16 +1,26 @@ +#!/bin/bash + echo `date`, Setup the environment ... set -e # exit if error models="graph_tm tm_classifier graph_nn" dataset_noise_ratios="0.005 0.01 0.02 0.05 0.1 0.2" +num_iterations=10 # Number of times to repeat the experiments + +for (( i=1; i<=num_iterations; i++ )) +do + echo "Iteration $i of $num_iterations" + + for N in $dataset_noise_ratios; do + echo `date`, Running Graph NN ... + python3 graph_nn.py --dataset_noise_ratio $N + + echo `date`, Running Graph Tsetlin Machine ... + python3 graph_tm.py --dataset_noise_ratio $N + + echo `date`, Running Tsetlin Machine Classifier ... + python3 tm_classifier.py --dataset_noise_ratio $N + done +done -for N in $dataset_noise_ratios; do - echo `date`, Running Graph NN ... - python3 graph_nn.py --dataset_noise_ratio $N - echo `date`, Running Graph Tsetlin Machine ... - python3 graph_tm.py --dataset_noise_ratio $N - - echo `date`, Running Tsetlin Machine Classifier ... - python3 tm_classifier.py --dataset_noise_ratio $N -done \ No newline at end of file diff --git a/examples/recomm_system/test.ipynb b/examples/recomm_system/test.ipynb new file mode 100644 index 00000000..4e44624f --- /dev/null +++ b/examples/recomm_system/test.ipynb @@ -0,0 +1,186 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating training data\n", + "Warning: Looks like you're using an outdated `kagglehub` version (installed: 0.3.5), please consider upgrading to the latest version (0.3.6).\n", + "Path to dataset files: /root/.cache/kagglehub/datasets/karkavelrajaj/amazon-sales-dataset/versions/1\n", + "Original data shape: (1465, 16)\n", + "Expanded data shape: (14640, 4)\n", + "Dataset saved to noisy_dataset_0.005.csv\n", + "Creating training data\n", + "Warning: Looks like you're using an outdated `kagglehub` version (installed: 0.3.5), please consider upgrading to the latest version (0.3.6).\n", + "Path to dataset files: /root/.cache/kagglehub/datasets/karkavelrajaj/amazon-sales-dataset/versions/1\n", + "Original data shape: (1465, 16)\n", + "Expanded data shape: (14640, 4)\n", + "Dataset saved to noisy_dataset_0.01.csv\n", + "Creating training data\n", + "Warning: Looks like you're using an outdated `kagglehub` version (installed: 0.3.5), please consider upgrading to the latest version (0.3.6).\n", + "Path to dataset files: /root/.cache/kagglehub/datasets/karkavelrajaj/amazon-sales-dataset/versions/1\n", + "Original data shape: (1465, 16)\n", + "Expanded data shape: (14640, 4)\n", + "Dataset saved to noisy_dataset_0.02.csv\n", + "Creating training data\n", + "Warning: Looks like you're using an outdated `kagglehub` version (installed: 0.3.5), please consider upgrading to the latest version (0.3.6).\n", + "Path to dataset files: /root/.cache/kagglehub/datasets/karkavelrajaj/amazon-sales-dataset/versions/1\n", + "Original data shape: (1465, 16)\n", + "Expanded data shape: (14640, 4)\n", + "Dataset saved to noisy_dataset_0.05.csv\n", + "Creating training data\n", + "Warning: Looks like you're using an outdated `kagglehub` version (installed: 0.3.5), please consider upgrading to the latest version (0.3.6).\n", + "Path to dataset files: /root/.cache/kagglehub/datasets/karkavelrajaj/amazon-sales-dataset/versions/1\n", + "Original data shape: (1465, 16)\n", + "Expanded data shape: (14640, 4)\n", + "Dataset saved to noisy_dataset_0.1.csv\n", + "Creating training data\n", + "Warning: Looks like you're using an outdated `kagglehub` version (installed: 0.3.5), please consider upgrading to the latest version (0.3.6).\n", + "Path to dataset files: /root/.cache/kagglehub/datasets/karkavelrajaj/amazon-sales-dataset/versions/1\n", + "Original data shape: (1465, 16)\n", + "Expanded data shape: (14640, 4)\n", + "Dataset saved to noisy_dataset_0.2.csv\n" + ] + } + ], + "source": [ + "import prepare_dataset\n", + "import pandas as pd\n", + "import os\n", + "\n", + "dataset_noise_ratios = [0.005,0.01,0.02,0.05,0.1,0.2]\n", + "for noise in dataset_noise_ratios:\n", + " data = prepare_dataset.aug_amazon_products(noise_ratio = noise)\n", + " df = pd.DataFrame(data)\n", + " noise_dataset_file = f\"noisy_dataset_{noise}.csv\"\n", + " if os.path.exists(noise_dataset_file):\n", + " df.to_csv(noise_dataset_file, mode='a', index=False, header=False)\n", + " else:\n", + " df.to_csv(noise_dataset_file, index=False)\n", + " print(f\"Dataset saved to {noise_dataset_file}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Algorithm Noise_Ratio T s Max_Included_Literals Epochs \\\n", + "0 Graph NN 0.005 0 0.0 0 20000 \n", + "1 GraphTM 0.005 10000 10.0 23 10 \n", + "2 TMClassifier 0.005 10000 10.0 32 10 \n", + "3 Graph NN 0.010 0 0.0 0 20000 \n", + "4 GraphTM 0.010 10000 10.0 23 10 \n", + ".. ... ... ... ... ... ... \n", + "175 GraphTM 0.100 10000 10.0 23 10 \n", + "176 TMClassifier 0.100 10000 10.0 32 10 \n", + "177 Graph NN 0.200 0 0.0 0 20000 \n", + "178 GraphTM 0.200 10000 10.0 23 10 \n", + "179 TMClassifier 0.200 10000 10.0 32 10 \n", + "\n", + " Platform Total_Time Accuracy \n", + "0 CPU 418.925047 75.628418 \n", + "1 CUDA 110.356832 98.688525 \n", + "2 CPU_sparse 1059.163476 76.810109 \n", + "3 CPU 550.698057 94.508195 \n", + "4 CUDA 114.062763 98.415301 \n", + ".. ... ... ... \n", + "175 CUDA 150.679511 90.273224 \n", + "176 CPU_sparse 967.589722 49.590164 \n", + "177 CPU 379.849724 64.153004 \n", + "178 CUDA 169.712628 77.814208 \n", + "179 CPU_sparse 955.942775 20.116120 \n", + "\n", + "[180 rows x 9 columns]\n", + "\n", + "\\begin{table}[h!]\n", + "\\centering\n", + "\\begin{tabular}{|c|c|c|c|}\n", + "\\hline\n", + "\\textbf{Noise Ratio} & \\textbf{GCN (\\%)} & \\textbf{GTM (\\%)} & \\textbf{TMClassifier (\\%)} \\\\ \\hline\n", + "0.005 & 81.73 & 98.62 & 76.70 \\\\ \\hline\n", + "0.01 & 84.73 & 98.34 & 71.43 \\\\ \\hline\n", + "0.02 & 87.81 & 97.76 & 69.09 \\\\ \\hline\n", + "0.05 & 79.86 & 94.74 & 61.04 \\\\ \\hline\n", + "0.1 & 76.40 & 89.82 & 49.60 \\\\ \\hline\n", + "0.2 & 65.90 & 78.22 & 20.12 \\\\ \\hline\n", + "\\end{tabular}\n", + "\\caption{Average accuracy comparison of GCN, GraphTM, and TMClassifier for varying noise ratios.}\n", + "\\label{tab:recomm_sys_accuracy}\n", + "\\end{table}\n" + ] + } + ], + "source": [ + "import pandas as pd\n", + "data = pd.read_csv(\"experiment_results.csv\")\n", + "\n", + "# Extract records within the specified range, e.g., rows 3 to 5 (0-indexed)\n", + "# This assumes each algorithm data spans three consecutive rows\n", + "start_index = 0\n", + "range_records = data.iloc[start_index:len(data)]\n", + "print(range_records)\n", + "# Create a dictionary to store the accuracy values\n", + "noise_accuracies = {}\n", + "\n", + "# Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy\n", + "# Group the data by Algorithm and Noise Ratio to calculate average accuracies\n", + "grouped_data = data.groupby(['Algorithm', 'Noise_Ratio']).agg({'Accuracy': 'mean'}).reset_index()\n", + "\n", + "# Pivot the data to get a structure suitable for LaTeX table generation\n", + "pivot_data = grouped_data.pivot(index='Noise_Ratio', columns='Algorithm', values='Accuracy')\n", + " \n", + "# Generate LaTeX table\n", + "latex_table = \"\"\"\n", + "\\\\begin{table}[h!]\n", + "\\\\centering\n", + "\\\\begin{tabular}{|c|c|c|c|}\n", + "\\\\hline\n", + "\\\\textbf{Noise Ratio} & \\\\textbf{GCN (\\\\%)} & \\\\textbf{GTM (\\\\%)} & \\\\textbf{TMClassifier (\\\\%)} \\\\\\\\ \\\\hline\n", + "\"\"\"\n", + "\n", + "# Iterate over the pivot data to construct the table rows\n", + "for noise_ratio, row in pivot_data.iterrows():\n", + " latex_table += f\"{noise_ratio} & \"\n", + " latex_table += f\"{row['Graph NN']:.2f} & {row['GraphTM']:.2f} & {row['TMClassifier']:.2f} \\\\\\\\ \\\\hline\\n\"\n", + "\n", + "latex_table += \"\\\\end{tabular}\\n\"\n", + "latex_table += \"\\\\caption{Average accuracy comparison of GCN, GraphTM, and TMClassifier for varying noise ratios.}\\n\"\n", + "latex_table += \"\\\\label{tab:recomm_sys_accuracy}\\n\"\n", + "latex_table += \"\\\\end{table}\"\n", + "\n", + "print(latex_table)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 26e208394e7dfd0d2dbd3cf0f9ce16e4457916fb Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Tue, 25 Feb 2025 08:54:16 +0000 Subject: [PATCH 24/29] adding exp id --- examples/recomm_system/experiment_results.csv | 183 +----------------- .../experiment_results_ensamble.csv | 181 +++++++++++++++++ examples/recomm_system/graph_nn.py | 4 +- examples/recomm_system/graph_tm.py | 2 + examples/recomm_system/main.sh | 9 +- examples/recomm_system/test.ipynb | 31 +-- examples/recomm_system/tm_classifier.py | 2 + 7 files changed, 198 insertions(+), 214 deletions(-) create mode 100644 examples/recomm_system/experiment_results_ensamble.csv diff --git a/examples/recomm_system/experiment_results.csv b/examples/recomm_system/experiment_results.csv index b394dad6..09e4f8e7 100644 --- a/examples/recomm_system/experiment_results.csv +++ b/examples/recomm_system/experiment_results.csv @@ -1,181 +1,2 @@ -Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy -Graph NN,0.005,0,0,0,20000,CPU,418.9250466823578,75.62841773033142 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.35683226585388,98.68852459016394 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1059.1634759902954,76.81010928961749 -Graph NN,0.01,0,0,0,20000,CPU,550.6980571746826,94.50819492340088 -GraphTM,0.01,10000,10.0,23,10,CUDA,114.06276345252991,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1051.916612625122,75.3415300546448 -Graph NN,0.02,0,0,0,20000,CPU,475.44024682044983,75.30054450035095 -GraphTM,0.02,10000,10.0,23,10,CUDA,121.55624794960022,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1043.9487817287445,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,411.8552327156067,80.98360896110535 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.7814338207245,94.20765027322405 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1044.2656917572021,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,484.6550889015198,68.7158465385437 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.34457921981812,89.72677595628416 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1061.191523551941,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,483.8463816642761,71.28415107727051 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.18810439109802,78.49726775956285 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1071.927158355713,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,473.5806052684784,86.36612296104431 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.18979954719543,98.60655737704917 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,979.0509588718414,76.74180327868852 -Graph NN,0.01,0,0,0,20000,CPU,444.6897065639496,93.55190992355347 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.48035550117493,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1007.9654748439789,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,386.32835030555725,93.22404265403748 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.46316766738892,97.73224043715847 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1017.5866801738739,73.25819672131148 -Graph NN,0.05,0,0,0,20000,CPU,417.78410935401917,73.1693983078003 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.64952206611633,95.08196721311475 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,945.0465729236603,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,481.6537721157074,77.18579173088074 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.57958960533142,90.08196721311475 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,938.0212676525116,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,391.36059975624084,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.49347591400146,77.65027322404372 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,940.9758951663971,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,480.5005066394806,75.68305730819702 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.65927052497864,98.19672131147541 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.7581994533539,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,449.22584795951843,76.36612057685852 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.07226181030273,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1010.8711988925934,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,403.96647000312805,96.85792326927185 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.02044725418091,97.78688524590164 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1011.7896072864532,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,460.688773393631,85.00000238418579 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.04891228675842,94.69945355191257 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1014.1492829322815,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,407.9346880912781,74.1256833076477 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.6586093902588,90.08196721311475 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,990.8282098770142,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,437.8108870983124,65.60109257698059 -GraphTM,0.2,10000,10.0,23,10,CUDA,168.44772601127625,78.93442622950819 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1022.1848647594452,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,430.3925087451935,89.20764923095703 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.6658935546875,98.68852459016394 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1016.199923992157,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,396.3338620662689,84.23497080802917 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.67849016189575,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,944.4602844715118,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,434.91951632499695,93.25136542320251 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.31921482086182,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,933.2245874404907,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,483.2671537399292,80.32786846160889 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.68922591209412,94.78142076502732 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,994.6384744644165,64.13934426229508 -Graph NN,0.1,0,0,0,20000,CPU,424.9935986995697,81.33879899978638 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.08107113838196,89.59016393442623 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,944.0273253917694,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,333.49274706840515,61.50273084640503 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.906751871109,78.98907103825137 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,965.9725024700165,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,377.28471970558167,75.68305730819702 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.61631536483765,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,976.8008840084076,76.67349726775956 -Graph NN,0.01,0,0,0,20000,CPU,473.2922372817993,76.06557607650757 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.87212014198303,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,942.7254059314728,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,357.36573815345764,75.40983557701111 -GraphTM,0.02,10000,10.0,23,10,CUDA,119.41612005233765,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,985.81947016716,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,440.75843334198,73.08743000030518 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.8215868473053,94.91803278688525 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,997.739678144455,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,426.73446226119995,88.55191469192505 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.54467248916626,89.94535519125682 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,980.096907377243,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,387.20843958854675,75.71038007736206 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.7962884902954,77.56830601092896 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,987.0616261959076,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,455.586905002594,83.41529965400696 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.7424705028534,98.5792349726776 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,998.4698519706726,76.81010928961749 -Graph NN,0.01,0,0,0,20000,CPU,466.44022035598755,98.52458834648132 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.78495740890503,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,932.3163437843323,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,455.35024762153625,88.96175026893616 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.741384267807,97.75956284153006 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,974.3740100860596,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,399.9565739631653,73.60655665397644 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.17181992530823,94.67213114754098 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,971.1499485969543,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,447.5498752593994,70.8743155002594 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.6928951740265,89.80874316939891 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,962.4737737178802,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,403.6350507736206,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.02189421653748,78.16939890710383 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,947.2696743011475,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,470.0121097564697,81.20218515396118 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.51706099510193,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,974.2360310554504,76.74180327868852 -Graph NN,0.01,0,0,0,20000,CPU,466.69573068618774,76.06557607650757 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.95063591003418,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,948.407201051712,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,288.0073969364166,92.92349815368652 -GraphTM,0.02,10000,10.0,23,10,CUDA,119.34772634506226,97.48633879781421 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.228814125061,39.51502732240437 -Graph NN,0.05,0,0,0,20000,CPU,477.7228500843048,89.86338973045349 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.2427453994751,94.86338797814207 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,964.5819866657257,34.56284153005464 -Graph NN,0.1,0,0,0,20000,CPU,459.15181946754456,71.22950553894043 -GraphTM,0.1,10000,10.0,23,10,CUDA,148.52941298484802,89.67213114754098 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,981.4810082912445,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,356.59899377822876,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.7598683834076,76.85792349726775 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,959.9282560348511,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,378.94336581230164,80.32786846160889 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.55144882202148,98.44262295081967 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.1284465789795,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,407.1111581325531,94.31694149971008 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.06348276138306,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,941.711061000824,39.65163934426229 -Graph NN,0.02,0,0,0,20000,CPU,402.2970163822174,79.80874180793762 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.20444130897522,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,998.2885782718658,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,400.97751235961914,85.30054688453674 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.81029963493347,94.78142076502732 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1003.2194263935089,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,413.25741934776306,74.59016442298889 -GraphTM,0.1,10000,10.0,23,10,CUDA,148.70455861091614,89.89071038251366 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,974.4099938869476,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,369.36416029930115,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.01750564575195,78.55191256830601 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,990.2080008983612,20.184426229508194 -Graph NN,0.005,0,0,0,20000,CPU,440.5256702899933,90.4644787311554 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.76434278488159,98.55191256830601 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1004.704318523407,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,385.76011848449707,77.62295007705688 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.28425002098083,98.44262295081967 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,953.8945541381836,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,422.2995481491089,90.71038365364075 -GraphTM,0.02,10000,10.0,23,10,CUDA,121.29091334342957,97.6775956284153 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1002.099497795105,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,383.33483958244324,81.8306028842926 -GraphTM,0.05,10000,10.0,23,10,CUDA,134.72863698005676,94.53551912568307 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,936.831921339035,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,320.32143545150757,83.60655903816223 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.56500816345215,89.15300546448087 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,955.8687121868134,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,432.34014868736267,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.61127710342407,79.12568306010928 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,945.0617082118988,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,458.87039852142334,79.37158346176147 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.9952290058136,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,985.8775904178619,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,453.55728340148926,76.06557607650757 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.85269451141357,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,941.0662143230438,75.06830601092896 -Graph NN,0.02,0,0,0,20000,CPU,416.2407822608948,91.66666865348816 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.69959592819214,97.78688524590164 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,973.9127674102783,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,311.2621831893921,75.46448111534119 -GraphTM,0.05,10000,10.0,23,10,CUDA,134.66055345535278,94.89071038251366 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,953.3380017280579,63.25136612021858 -Graph NN,0.1,0,0,0,20000,CPU,425.43416261672974,73.79781603813171 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.67951107025146,90.27322404371586 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,967.5897221565247,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,379.8497235774994,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.7126281261444,77.81420765027323 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,955.9427745342255,20.116120218579233 +Exp_id,Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy +20250225083536,Graph NN,0.005,0,0,0,2000,CPU,44.10563063621521,80.87431788444519 diff --git a/examples/recomm_system/experiment_results_ensamble.csv b/examples/recomm_system/experiment_results_ensamble.csv new file mode 100644 index 00000000..b394dad6 --- /dev/null +++ b/examples/recomm_system/experiment_results_ensamble.csv @@ -0,0 +1,181 @@ +Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy +Graph NN,0.005,0,0,0,20000,CPU,418.9250466823578,75.62841773033142 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.35683226585388,98.68852459016394 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1059.1634759902954,76.81010928961749 +Graph NN,0.01,0,0,0,20000,CPU,550.6980571746826,94.50819492340088 +GraphTM,0.01,10000,10.0,23,10,CUDA,114.06276345252991,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1051.916612625122,75.3415300546448 +Graph NN,0.02,0,0,0,20000,CPU,475.44024682044983,75.30054450035095 +GraphTM,0.02,10000,10.0,23,10,CUDA,121.55624794960022,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1043.9487817287445,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,411.8552327156067,80.98360896110535 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.7814338207245,94.20765027322405 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1044.2656917572021,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,484.6550889015198,68.7158465385437 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.34457921981812,89.72677595628416 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1061.191523551941,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,483.8463816642761,71.28415107727051 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.18810439109802,78.49726775956285 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1071.927158355713,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,473.5806052684784,86.36612296104431 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.18979954719543,98.60655737704917 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,979.0509588718414,76.74180327868852 +Graph NN,0.01,0,0,0,20000,CPU,444.6897065639496,93.55190992355347 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.48035550117493,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1007.9654748439789,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,386.32835030555725,93.22404265403748 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.46316766738892,97.73224043715847 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1017.5866801738739,73.25819672131148 +Graph NN,0.05,0,0,0,20000,CPU,417.78410935401917,73.1693983078003 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.64952206611633,95.08196721311475 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,945.0465729236603,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,481.6537721157074,77.18579173088074 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.57958960533142,90.08196721311475 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,938.0212676525116,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,391.36059975624084,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.49347591400146,77.65027322404372 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,940.9758951663971,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,480.5005066394806,75.68305730819702 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.65927052497864,98.19672131147541 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.7581994533539,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,449.22584795951843,76.36612057685852 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.07226181030273,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1010.8711988925934,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,403.96647000312805,96.85792326927185 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.02044725418091,97.78688524590164 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1011.7896072864532,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,460.688773393631,85.00000238418579 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.04891228675842,94.69945355191257 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1014.1492829322815,64.00273224043715 +Graph NN,0.1,0,0,0,20000,CPU,407.9346880912781,74.1256833076477 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.6586093902588,90.08196721311475 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,990.8282098770142,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,437.8108870983124,65.60109257698059 +GraphTM,0.2,10000,10.0,23,10,CUDA,168.44772601127625,78.93442622950819 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1022.1848647594452,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,430.3925087451935,89.20764923095703 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.6658935546875,98.68852459016394 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1016.199923992157,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,396.3338620662689,84.23497080802917 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.67849016189575,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,944.4602844715118,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,434.91951632499695,93.25136542320251 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.31921482086182,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,933.2245874404907,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,483.2671537399292,80.32786846160889 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.68922591209412,94.78142076502732 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,994.6384744644165,64.13934426229508 +Graph NN,0.1,0,0,0,20000,CPU,424.9935986995697,81.33879899978638 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.08107113838196,89.59016393442623 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,944.0273253917694,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,333.49274706840515,61.50273084640503 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.906751871109,78.98907103825137 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,965.9725024700165,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,377.28471970558167,75.68305730819702 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.61631536483765,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,976.8008840084076,76.67349726775956 +Graph NN,0.01,0,0,0,20000,CPU,473.2922372817993,76.06557607650757 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.87212014198303,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,942.7254059314728,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,357.36573815345764,75.40983557701111 +GraphTM,0.02,10000,10.0,23,10,CUDA,119.41612005233765,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,985.81947016716,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,440.75843334198,73.08743000030518 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.8215868473053,94.91803278688525 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,997.739678144455,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,426.73446226119995,88.55191469192505 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.54467248916626,89.94535519125682 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,980.096907377243,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,387.20843958854675,75.71038007736206 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.7962884902954,77.56830601092896 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,987.0616261959076,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,455.586905002594,83.41529965400696 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.7424705028534,98.5792349726776 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,998.4698519706726,76.81010928961749 +Graph NN,0.01,0,0,0,20000,CPU,466.44022035598755,98.52458834648132 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.78495740890503,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,932.3163437843323,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,455.35024762153625,88.96175026893616 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.741384267807,97.75956284153006 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,974.3740100860596,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,399.9565739631653,73.60655665397644 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.17181992530823,94.67213114754098 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,971.1499485969543,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,447.5498752593994,70.8743155002594 +GraphTM,0.1,10000,10.0,23,10,CUDA,149.6928951740265,89.80874316939891 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,962.4737737178802,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,403.6350507736206,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.02189421653748,78.16939890710383 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,947.2696743011475,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,470.0121097564697,81.20218515396118 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.51706099510193,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,974.2360310554504,76.74180327868852 +Graph NN,0.01,0,0,0,20000,CPU,466.69573068618774,76.06557607650757 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.95063591003418,98.4153005464481 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,948.407201051712,74.86338797814209 +Graph NN,0.02,0,0,0,20000,CPU,288.0073969364166,92.92349815368652 +GraphTM,0.02,10000,10.0,23,10,CUDA,119.34772634506226,97.48633879781421 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.228814125061,39.51502732240437 +Graph NN,0.05,0,0,0,20000,CPU,477.7228500843048,89.86338973045349 +GraphTM,0.05,10000,10.0,23,10,CUDA,135.2427453994751,94.86338797814207 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,964.5819866657257,34.56284153005464 +Graph NN,0.1,0,0,0,20000,CPU,459.15181946754456,71.22950553894043 +GraphTM,0.1,10000,10.0,23,10,CUDA,148.52941298484802,89.67213114754098 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,981.4810082912445,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,356.59899377822876,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.7598683834076,76.85792349726775 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,959.9282560348511,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,378.94336581230164,80.32786846160889 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.55144882202148,98.44262295081967 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.1284465789795,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,407.1111581325531,94.31694149971008 +GraphTM,0.01,10000,10.0,23,10,CUDA,112.06348276138306,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,941.711061000824,39.65163934426229 +Graph NN,0.02,0,0,0,20000,CPU,402.2970163822174,79.80874180793762 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.20444130897522,97.8415300546448 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,998.2885782718658,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,400.97751235961914,85.30054688453674 +GraphTM,0.05,10000,10.0,23,10,CUDA,136.81029963493347,94.78142076502732 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1003.2194263935089,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,413.25741934776306,74.59016442298889 +GraphTM,0.1,10000,10.0,23,10,CUDA,148.70455861091614,89.89071038251366 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,974.4099938869476,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,369.36416029930115,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,170.01750564575195,78.55191256830601 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,990.2080008983612,20.184426229508194 +Graph NN,0.005,0,0,0,20000,CPU,440.5256702899933,90.4644787311554 +GraphTM,0.005,10000,10.0,23,10,CUDA,109.76434278488159,98.55191256830601 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1004.704318523407,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,385.76011848449707,77.62295007705688 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.28425002098083,98.44262295081967 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,953.8945541381836,74.93169398907104 +Graph NN,0.02,0,0,0,20000,CPU,422.2995481491089,90.71038365364075 +GraphTM,0.02,10000,10.0,23,10,CUDA,121.29091334342957,97.6775956284153 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1002.099497795105,72.1311475409836 +Graph NN,0.05,0,0,0,20000,CPU,383.33483958244324,81.8306028842926 +GraphTM,0.05,10000,10.0,23,10,CUDA,134.72863698005676,94.53551912568307 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,936.831921339035,64.1051912568306 +Graph NN,0.1,0,0,0,20000,CPU,320.32143545150757,83.60655903816223 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.56500816345215,89.15300546448087 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,955.8687121868134,49.62431693989071 +Graph NN,0.2,0,0,0,20000,CPU,432.34014868736267,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.61127710342407,79.12568306010928 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,945.0617082118988,20.116120218579233 +Graph NN,0.005,0,0,0,20000,CPU,458.87039852142334,79.37158346176147 +GraphTM,0.005,10000,10.0,23,10,CUDA,110.9952290058136,98.82513661202185 +TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,985.8775904178619,76.63934426229508 +Graph NN,0.01,0,0,0,20000,CPU,453.55728340148926,76.06557607650757 +GraphTM,0.01,10000,10.0,23,10,CUDA,113.85269451141357,98.27868852459017 +TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,941.0662143230438,75.06830601092896 +Graph NN,0.02,0,0,0,20000,CPU,416.2407822608948,91.66666865348816 +GraphTM,0.02,10000,10.0,23,10,CUDA,120.69959592819214,97.78688524590164 +TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,973.9127674102783,72.40437158469946 +Graph NN,0.05,0,0,0,20000,CPU,311.2621831893921,75.46448111534119 +GraphTM,0.05,10000,10.0,23,10,CUDA,134.66055345535278,94.89071038251366 +TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,953.3380017280579,63.25136612021858 +Graph NN,0.1,0,0,0,20000,CPU,425.43416261672974,73.79781603813171 +GraphTM,0.1,10000,10.0,23,10,CUDA,150.67951107025146,90.27322404371586 +TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,967.5897221565247,49.59016393442623 +Graph NN,0.2,0,0,0,20000,CPU,379.8497235774994,64.15300369262695 +GraphTM,0.2,10000,10.0,23,10,CUDA,169.7126281261444,77.81420765027323 +TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,955.9427745342255,20.116120218579233 diff --git a/examples/recomm_system/graph_nn.py b/examples/recomm_system/graph_nn.py index 9ef5fbed..ef6922a7 100644 --- a/examples/recomm_system/graph_nn.py +++ b/examples/recomm_system/graph_nn.py @@ -94,6 +94,7 @@ def forward(self, x, edge_index): total_time = benchmark_total.elapsed() # Append results for each epoch results.append({ + "Exp_id": args.exp_id, "Algorithm": "Graph NN", "Noise_Ratio": args.dataset_noise_ratio, "T": 0, @@ -118,8 +119,9 @@ def forward(self, x, edge_index): def default_args(**kwargs): parser = argparse.ArgumentParser() parser.add_argument("--platform", default="CPU", type=str, choices=["CPU", "CUDA"]) - parser.add_argument("--epochs", default=20000, type=int) + parser.add_argument("--epochs", default=2000, type=int) parser.add_argument("--dataset_noise_ratio", default=0.01, type=float) + parser.add_argument("--exp_id", default="", type=str) args = parser.parse_args() for key, value in kwargs.items(): if key in args.__dict__: diff --git a/examples/recomm_system/graph_tm.py b/examples/recomm_system/graph_tm.py index d03d3be9..27c30828 100644 --- a/examples/recomm_system/graph_tm.py +++ b/examples/recomm_system/graph_tm.py @@ -127,6 +127,7 @@ def main(args): total_time = benchmark_total.elapsed() # result_train = 100*(tm.predict(graphs_train) == Y_train).mean() results.append({ + "Exp_id": args.exp_id, "Algorithm": "GraphTM", "Noise_Ratio": args.dataset_noise_ratio, "T": args.T, @@ -163,6 +164,7 @@ def default_args(**kwargs): parser.add_argument("--noise", default=0.01, type=float) parser.add_argument("--max-included-literals", default=23, type=int) parser.add_argument("--dataset_noise_ratio", default=0.01, type=float) + parser.add_argument("--exp_id", default="", type=str) args = parser.parse_args() for key, value in kwargs.items(): if key in args.__dict__: diff --git a/examples/recomm_system/main.sh b/examples/recomm_system/main.sh index a5db0425..82b03778 100644 --- a/examples/recomm_system/main.sh +++ b/examples/recomm_system/main.sh @@ -6,6 +6,9 @@ set -e # exit if error models="graph_tm tm_classifier graph_nn" dataset_noise_ratios="0.005 0.01 0.02 0.05 0.1 0.2" num_iterations=10 # Number of times to repeat the experiments +exp_id=$(date +%Y%m%d%H%M%S) + +echo 'Experiment ID: ' $exp_id for (( i=1; i<=num_iterations; i++ )) do @@ -13,13 +16,13 @@ do for N in $dataset_noise_ratios; do echo `date`, Running Graph NN ... - python3 graph_nn.py --dataset_noise_ratio $N + python3 graph_nn.py --dataset_noise_ratio $N --exp_id $exp_id echo `date`, Running Graph Tsetlin Machine ... - python3 graph_tm.py --dataset_noise_ratio $N + python3 graph_tm.py --dataset_noise_ratio $N --exp_id $exp_id echo `date`, Running Tsetlin Machine Classifier ... - python3 tm_classifier.py --dataset_noise_ratio $N + python3 tm_classifier.py --dataset_noise_ratio $N --exp_id $exp_id done done diff --git a/examples/recomm_system/test.ipynb b/examples/recomm_system/test.ipynb index 4e44624f..9873a435 100644 --- a/examples/recomm_system/test.ipynb +++ b/examples/recomm_system/test.ipynb @@ -67,40 +67,13 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - " Algorithm Noise_Ratio T s Max_Included_Literals Epochs \\\n", - "0 Graph NN 0.005 0 0.0 0 20000 \n", - "1 GraphTM 0.005 10000 10.0 23 10 \n", - "2 TMClassifier 0.005 10000 10.0 32 10 \n", - "3 Graph NN 0.010 0 0.0 0 20000 \n", - "4 GraphTM 0.010 10000 10.0 23 10 \n", - ".. ... ... ... ... ... ... \n", - "175 GraphTM 0.100 10000 10.0 23 10 \n", - "176 TMClassifier 0.100 10000 10.0 32 10 \n", - "177 Graph NN 0.200 0 0.0 0 20000 \n", - "178 GraphTM 0.200 10000 10.0 23 10 \n", - "179 TMClassifier 0.200 10000 10.0 32 10 \n", - "\n", - " Platform Total_Time Accuracy \n", - "0 CPU 418.925047 75.628418 \n", - "1 CUDA 110.356832 98.688525 \n", - "2 CPU_sparse 1059.163476 76.810109 \n", - "3 CPU 550.698057 94.508195 \n", - "4 CUDA 114.062763 98.415301 \n", - ".. ... ... ... \n", - "175 CUDA 150.679511 90.273224 \n", - "176 CPU_sparse 967.589722 49.590164 \n", - "177 CPU 379.849724 64.153004 \n", - "178 CUDA 169.712628 77.814208 \n", - "179 CPU_sparse 955.942775 20.116120 \n", - "\n", - "[180 rows x 9 columns]\n", "\n", "\\begin{table}[h!]\n", "\\centering\n", @@ -128,7 +101,7 @@ "# This assumes each algorithm data spans three consecutive rows\n", "start_index = 0\n", "range_records = data.iloc[start_index:len(data)]\n", - "print(range_records)\n", + "# print(range_records)\n", "# Create a dictionary to store the accuracy values\n", "noise_accuracies = {}\n", "\n", diff --git a/examples/recomm_system/tm_classifier.py b/examples/recomm_system/tm_classifier.py index 876f8c4f..cb6cb458 100644 --- a/examples/recomm_system/tm_classifier.py +++ b/examples/recomm_system/tm_classifier.py @@ -34,6 +34,7 @@ def main(args): # Append results for each epoch results.append({ + "Exp_id": args.exp_id, "Algorithm": "TMClassifier", "Noise_Ratio": args.dataset_noise_ratio, "T": args.T, @@ -64,6 +65,7 @@ def default_args(**kwargs): parser.add_argument("--weighted_clauses", default=True, type=bool) parser.add_argument("--epochs", default=10, type=int) parser.add_argument("--dataset_noise_ratio", default=0.01, type=float) + parser.add_argument("--exp_id", default="", type=str) args = parser.parse_args() for key, value in kwargs.items(): if key in args.__dict__: From f057bbe0829815da8fc7a8edff522031161a1272 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Fri, 28 Feb 2025 07:39:30 +0000 Subject: [PATCH 25/29] update --- examples/recomm_system/experiment_results.csv | 94 +++++++++++++++++++ examples/recomm_system/test.ipynb | 25 +++-- 2 files changed, 106 insertions(+), 13 deletions(-) diff --git a/examples/recomm_system/experiment_results.csv b/examples/recomm_system/experiment_results.csv index 09e4f8e7..a2902234 100644 --- a/examples/recomm_system/experiment_results.csv +++ b/examples/recomm_system/experiment_results.csv @@ -1,2 +1,96 @@ Exp_id,Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy 20250225083536,Graph NN,0.005,0,0,0,2000,CPU,44.10563063621521,80.87431788444519 +20250225090119,Graph NN,0.005,0,0,0,2000,CPU,49.34887194633484,84.45355296134949 +20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,109.94287848472595,98.82513661202185 +20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,944.8344459533691,76.63934426229508 +20250225090119,Graph NN,0.01,0,0,0,2000,CPU,36.30448269844055,81.99453353881836 +20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,113.23237609863281,98.4153005464481 +20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,936.9574060440063,74.93169398907104 +20250225090119,Graph NN,0.02,0,0,0,2000,CPU,38.477863073349,87.54098415374756 +20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.61202812194824,97.73224043715847 +20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1001.9182693958282,72.1311475409836 +20250225090119,Graph NN,0.05,0,0,0,2000,CPU,48.03118896484375,79.20765280723572 +20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,135.30033922195435,95.10928961748634 +20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,962.4823818206787,64.17349726775956 +20250225090119,Graph NN,0.1,0,0,0,2000,CPU,35.70058226585388,73.55191111564636 +20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,150.0443034172058,90.19125683060109 +20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,936.1979103088379,49.35109289617486 +20250225090119,Graph NN,0.2,0,0,0,2000,CPU,40.174824714660645,63.22404146194458 +20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,169.9642357826233,77.95081967213115 +20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,959.6620714664459,20.116120218579233 +20250225090119,Graph NN,0.005,0,0,0,2000,CPU,35.725218534469604,89.75409865379333 +20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,110.31502270698547,98.60655737704917 +20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1024.4315028190613,76.74180327868852 +20250225090119,Graph NN,0.01,0,0,0,2000,CPU,32.86650729179382,83.03278684616089 +20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,112.99009418487549,98.4153005464481 +20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1040.9654848575592,73.49726775956285 +20250225090119,Graph NN,0.02,0,0,0,2000,CPU,29.535728454589844,88.63387703895569 +20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.96075296401978,97.78688524590164 +20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1054.357099056244,72.78005464480874 +20250225090119,Graph NN,0.05,0,0,0,2000,CPU,47.468485832214355,75.10929107666016 +20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,134.4755368232727,95.08196721311475 +20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,949.9652540683746,63.79781420765027 +20250225090119,Graph NN,0.1,0,0,0,2000,CPU,38.58360719680786,71.967214345932 +20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,148.94670748710632,90.1639344262295 +20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1027.843326807022,49.14617486338798 +20250225090119,Graph NN,0.2,0,0,0,2000,CPU,37.01042413711548,66.42076373100281 +20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,169.78875064849854,79.94535519125682 +20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1019.9154229164124,20.116120218579233 +20250225090119,Graph NN,0.005,0,0,0,2000,CPU,46.6854362487793,75.84699392318726 +20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,110.07307553291321,98.82513661202185 +20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1019.9013290405273,76.74180327868852 +20250225090119,Graph NN,0.01,0,0,0,2000,CPU,41.087942600250244,83.77048969268799 +20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,112.69408297538757,98.4153005464481 +20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1012.4928967952728,75.10245901639344 +20250225090119,Graph NN,0.02,0,0,0,2000,CPU,42.92523193359375,88.44262361526489 +20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.11772298812866,97.59562841530055 +20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1006.3243997097015,72.1311475409836 +20250225090119,Graph NN,0.05,0,0,0,2000,CPU,36.374452352523804,85.49180030822754 +20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,134.70963144302368,94.15300546448087 +20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,959.455883026123,64.00273224043715 +20250225090119,Graph NN,0.1,0,0,0,2000,CPU,39.68649101257324,78.68852615356445 +20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,151.08690643310547,89.31693989071037 +20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1008.2197906970978,49.24863387978142 +20250225090119,Graph NN,0.2,0,0,0,2000,CPU,36.5257625579834,67.73223876953125 +20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,170.00959873199463,76.66666666666667 +20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,974.8701865673065,20.116120218579233 +20250225090119,Graph NN,0.005,0,0,0,2000,CPU,42.808833599090576,87.62295246124268 +20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,110.3779969215393,98.63387978142076 +20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,959.8772912025452,76.63934426229508 +20250225090119,Graph NN,0.01,0,0,0,2000,CPU,42.59048676490784,82.07650184631348 +20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,113.87734937667847,98.4153005464481 +20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,990.1339011192322,74.93169398907104 +20250225090119,Graph NN,0.02,0,0,0,2000,CPU,35.95067048072815,79.64481115341187 +20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.58509016036987,97.8415300546448 +20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,976.5062215328217,71.9603825136612 +20250225090119,Graph NN,0.05,0,0,0,2000,CPU,49.44124245643616,76.09289884567261 +20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,136.56197214126587,94.89071038251366 +20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,971.7774770259857,63.25136612021858 +20250225090119,Graph NN,0.1,0,0,0,2000,CPU,45.797210931777954,73.5792338848114 +20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,149.67395901679993,89.23497267759562 +20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,979.9483859539032,49.59016393442623 +20250225090119,Graph NN,0.2,0,0,0,2000,CPU,41.42583513259888,68.27868819236755 +20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,170.87367057800293,79.18032786885246 +20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,961.5906836986542,20.081967213114755 +20250225090119,Graph NN,0.005,0,0,0,2000,CPU,43.93612337112427,79.20765280723572 +20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,109.73634815216064,98.63387978142076 +20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,965.0153555870056,76.63934426229508 +20250225090119,Graph NN,0.01,0,0,0,2000,CPU,49.67618227005005,92.45901703834534 +20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,113.5588014125824,98.4153005464481 +20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,983.9533984661102,74.4535519125683 +20250225090119,Graph NN,0.02,0,0,0,2000,CPU,36.16115427017212,80.87431788444519 +20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.58146834373474,97.81420765027322 +20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,944.9977910518646,71.51639344262296 +20250225090119,Graph NN,0.05,0,0,0,2000,CPU,48.164318561553955,83.77048969268799 +20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,135.97020173072815,94.75409836065573 +20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,937.3424286842346,64.1051912568306 +20250225090119,Graph NN,0.1,0,0,0,2000,CPU,46.862754344940186,70.84699273109436 +20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,149.9700825214386,89.72677595628416 +20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,990.8207032680511,49.86338797814208 +20250225090119,Graph NN,0.2,0,0,0,2000,CPU,48.0979220867157,63.66119980812073 +20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,170.79332089424133,78.63387978142077 +20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,990.5863327980042,20.116120218579233 +20250225090119,Graph NN,0.005,0,0,0,2000,CPU,39.39827084541321,83.2513689994812 +20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,109.96842241287231,98.52459016393442 +20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,941.0268743038177,76.63934426229508 +20250225090119,Graph NN,0.01,0,0,0,2000,CPU,37.69904541969299,93.44262480735779 diff --git a/examples/recomm_system/test.ipynb b/examples/recomm_system/test.ipynb index 9873a435..c5c06961 100644 --- a/examples/recomm_system/test.ipynb +++ b/examples/recomm_system/test.ipynb @@ -67,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -80,12 +80,12 @@ "\\begin{tabular}{|c|c|c|c|}\n", "\\hline\n", "\\textbf{Noise Ratio} & \\textbf{GCN (\\%)} & \\textbf{GTM (\\%)} & \\textbf{TMClassifier (\\%)} \\\\ \\hline\n", - "0.005 & 81.73 & 98.62 & 76.70 \\\\ \\hline\n", - "0.01 & 84.73 & 98.34 & 71.43 \\\\ \\hline\n", - "0.02 & 87.81 & 97.76 & 69.09 \\\\ \\hline\n", - "0.05 & 79.86 & 94.74 & 61.04 \\\\ \\hline\n", - "0.1 & 76.40 & 89.82 & 49.60 \\\\ \\hline\n", - "0.2 & 65.90 & 78.22 & 20.12 \\\\ \\hline\n", + "0.005 & 83.36 & 98.67 & 76.67 \\\\ \\hline\n", + "0.01 & 86.13 & 98.42 & 74.58 \\\\ \\hline\n", + "0.02 & 85.03 & 97.75 & 72.10 \\\\ \\hline\n", + "0.05 & 79.93 & 94.80 & 63.87 \\\\ \\hline\n", + "0.1 & 73.73 & 89.73 & 49.44 \\\\ \\hline\n", + "0.2 & 65.86 & 78.48 & 20.11 \\\\ \\hline\n", "\\end{tabular}\n", "\\caption{Average accuracy comparison of GCN, GraphTM, and TMClassifier for varying noise ratios.}\n", "\\label{tab:recomm_sys_accuracy}\n", @@ -96,18 +96,17 @@ "source": [ "import pandas as pd\n", "data = pd.read_csv(\"experiment_results.csv\")\n", + "exp_id = \"20250225090119\" \n", + "data['Exp_id'] = data['Exp_id'].astype(str)\n", + "filtered_data = data[data['Exp_id'] == exp_id]\n", + "# print(filtered_data)\n", "\n", - "# Extract records within the specified range, e.g., rows 3 to 5 (0-indexed)\n", - "# This assumes each algorithm data spans three consecutive rows\n", - "start_index = 0\n", - "range_records = data.iloc[start_index:len(data)]\n", - "# print(range_records)\n", "# Create a dictionary to store the accuracy values\n", "noise_accuracies = {}\n", "\n", "# Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy\n", "# Group the data by Algorithm and Noise Ratio to calculate average accuracies\n", - "grouped_data = data.groupby(['Algorithm', 'Noise_Ratio']).agg({'Accuracy': 'mean'}).reset_index()\n", + "grouped_data = filtered_data.groupby(['Algorithm', 'Noise_Ratio']).agg({'Accuracy': 'mean'}).reset_index()\n", "\n", "# Pivot the data to get a structure suitable for LaTeX table generation\n", "pivot_data = grouped_data.pivot(index='Noise_Ratio', columns='Algorithm', values='Accuracy')\n", From de8eb1b65a68af29572ac6c9c370207845ab9fc1 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Fri, 28 Feb 2025 07:40:11 +0000 Subject: [PATCH 26/29] update --- examples/recomm_system/test.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/recomm_system/test.ipynb b/examples/recomm_system/test.ipynb index c5c06961..07b30e69 100644 --- a/examples/recomm_system/test.ipynb +++ b/examples/recomm_system/test.ipynb @@ -67,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 1, "metadata": {}, "outputs": [ { From bfdf40c91faef3868a184e64813dfbb5da077487 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Thu, 10 Apr 2025 10:19:34 +0000 Subject: [PATCH 27/29] rerun --- examples/recomm_system/experiment_results.csv | 275 ++++++++++++------ .../experiment_results_ensamble.csv | 181 ------------ .../recomm_system/experiment_results_old.csv | 271 ----------------- examples/recomm_system/test.ipynb | 16 +- 4 files changed, 188 insertions(+), 555 deletions(-) delete mode 100644 examples/recomm_system/experiment_results_ensamble.csv delete mode 100644 examples/recomm_system/experiment_results_old.csv diff --git a/examples/recomm_system/experiment_results.csv b/examples/recomm_system/experiment_results.csv index a2902234..45da5c4e 100644 --- a/examples/recomm_system/experiment_results.csv +++ b/examples/recomm_system/experiment_results.csv @@ -1,96 +1,181 @@ Exp_id,Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy -20250225083536,Graph NN,0.005,0,0,0,2000,CPU,44.10563063621521,80.87431788444519 -20250225090119,Graph NN,0.005,0,0,0,2000,CPU,49.34887194633484,84.45355296134949 -20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,109.94287848472595,98.82513661202185 -20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,944.8344459533691,76.63934426229508 -20250225090119,Graph NN,0.01,0,0,0,2000,CPU,36.30448269844055,81.99453353881836 -20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,113.23237609863281,98.4153005464481 -20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,936.9574060440063,74.93169398907104 -20250225090119,Graph NN,0.02,0,0,0,2000,CPU,38.477863073349,87.54098415374756 -20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.61202812194824,97.73224043715847 -20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1001.9182693958282,72.1311475409836 -20250225090119,Graph NN,0.05,0,0,0,2000,CPU,48.03118896484375,79.20765280723572 -20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,135.30033922195435,95.10928961748634 -20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,962.4823818206787,64.17349726775956 -20250225090119,Graph NN,0.1,0,0,0,2000,CPU,35.70058226585388,73.55191111564636 -20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,150.0443034172058,90.19125683060109 -20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,936.1979103088379,49.35109289617486 -20250225090119,Graph NN,0.2,0,0,0,2000,CPU,40.174824714660645,63.22404146194458 -20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,169.9642357826233,77.95081967213115 -20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,959.6620714664459,20.116120218579233 -20250225090119,Graph NN,0.005,0,0,0,2000,CPU,35.725218534469604,89.75409865379333 -20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,110.31502270698547,98.60655737704917 -20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1024.4315028190613,76.74180327868852 -20250225090119,Graph NN,0.01,0,0,0,2000,CPU,32.86650729179382,83.03278684616089 -20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,112.99009418487549,98.4153005464481 -20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1040.9654848575592,73.49726775956285 -20250225090119,Graph NN,0.02,0,0,0,2000,CPU,29.535728454589844,88.63387703895569 -20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.96075296401978,97.78688524590164 -20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1054.357099056244,72.78005464480874 -20250225090119,Graph NN,0.05,0,0,0,2000,CPU,47.468485832214355,75.10929107666016 -20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,134.4755368232727,95.08196721311475 -20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,949.9652540683746,63.79781420765027 -20250225090119,Graph NN,0.1,0,0,0,2000,CPU,38.58360719680786,71.967214345932 -20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,148.94670748710632,90.1639344262295 -20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1027.843326807022,49.14617486338798 -20250225090119,Graph NN,0.2,0,0,0,2000,CPU,37.01042413711548,66.42076373100281 -20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,169.78875064849854,79.94535519125682 -20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1019.9154229164124,20.116120218579233 -20250225090119,Graph NN,0.005,0,0,0,2000,CPU,46.6854362487793,75.84699392318726 -20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,110.07307553291321,98.82513661202185 -20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1019.9013290405273,76.74180327868852 -20250225090119,Graph NN,0.01,0,0,0,2000,CPU,41.087942600250244,83.77048969268799 -20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,112.69408297538757,98.4153005464481 -20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1012.4928967952728,75.10245901639344 -20250225090119,Graph NN,0.02,0,0,0,2000,CPU,42.92523193359375,88.44262361526489 -20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.11772298812866,97.59562841530055 -20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1006.3243997097015,72.1311475409836 -20250225090119,Graph NN,0.05,0,0,0,2000,CPU,36.374452352523804,85.49180030822754 -20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,134.70963144302368,94.15300546448087 -20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,959.455883026123,64.00273224043715 -20250225090119,Graph NN,0.1,0,0,0,2000,CPU,39.68649101257324,78.68852615356445 -20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,151.08690643310547,89.31693989071037 -20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1008.2197906970978,49.24863387978142 -20250225090119,Graph NN,0.2,0,0,0,2000,CPU,36.5257625579834,67.73223876953125 -20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,170.00959873199463,76.66666666666667 -20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,974.8701865673065,20.116120218579233 -20250225090119,Graph NN,0.005,0,0,0,2000,CPU,42.808833599090576,87.62295246124268 -20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,110.3779969215393,98.63387978142076 -20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,959.8772912025452,76.63934426229508 -20250225090119,Graph NN,0.01,0,0,0,2000,CPU,42.59048676490784,82.07650184631348 -20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,113.87734937667847,98.4153005464481 -20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,990.1339011192322,74.93169398907104 -20250225090119,Graph NN,0.02,0,0,0,2000,CPU,35.95067048072815,79.64481115341187 -20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.58509016036987,97.8415300546448 -20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,976.5062215328217,71.9603825136612 -20250225090119,Graph NN,0.05,0,0,0,2000,CPU,49.44124245643616,76.09289884567261 -20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,136.56197214126587,94.89071038251366 -20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,971.7774770259857,63.25136612021858 -20250225090119,Graph NN,0.1,0,0,0,2000,CPU,45.797210931777954,73.5792338848114 -20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,149.67395901679993,89.23497267759562 -20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,979.9483859539032,49.59016393442623 -20250225090119,Graph NN,0.2,0,0,0,2000,CPU,41.42583513259888,68.27868819236755 -20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,170.87367057800293,79.18032786885246 -20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,961.5906836986542,20.081967213114755 -20250225090119,Graph NN,0.005,0,0,0,2000,CPU,43.93612337112427,79.20765280723572 -20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,109.73634815216064,98.63387978142076 -20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,965.0153555870056,76.63934426229508 -20250225090119,Graph NN,0.01,0,0,0,2000,CPU,49.67618227005005,92.45901703834534 -20250225090119,GraphTM,0.01,10000,10.0,23,10,CUDA,113.5588014125824,98.4153005464481 -20250225090119,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,983.9533984661102,74.4535519125683 -20250225090119,Graph NN,0.02,0,0,0,2000,CPU,36.16115427017212,80.87431788444519 -20250225090119,GraphTM,0.02,10000,10.0,23,10,CUDA,120.58146834373474,97.81420765027322 -20250225090119,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,944.9977910518646,71.51639344262296 -20250225090119,Graph NN,0.05,0,0,0,2000,CPU,48.164318561553955,83.77048969268799 -20250225090119,GraphTM,0.05,10000,10.0,23,10,CUDA,135.97020173072815,94.75409836065573 -20250225090119,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,937.3424286842346,64.1051912568306 -20250225090119,Graph NN,0.1,0,0,0,2000,CPU,46.862754344940186,70.84699273109436 -20250225090119,GraphTM,0.1,10000,10.0,23,10,CUDA,149.9700825214386,89.72677595628416 -20250225090119,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,990.8207032680511,49.86338797814208 -20250225090119,Graph NN,0.2,0,0,0,2000,CPU,48.0979220867157,63.66119980812073 -20250225090119,GraphTM,0.2,10000,10.0,23,10,CUDA,170.79332089424133,78.63387978142077 -20250225090119,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,990.5863327980042,20.116120218579233 -20250225090119,Graph NN,0.005,0,0,0,2000,CPU,39.39827084541321,83.2513689994812 -20250225090119,GraphTM,0.005,10000,10.0,23,10,CUDA,109.96842241287231,98.52459016393442 -20250225090119,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,941.0268743038177,76.63934426229508 -20250225090119,Graph NN,0.01,0,0,0,2000,CPU,37.69904541969299,93.44262480735779 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,47.380565881729126,84.23497080802917 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,110.12741780281067,98.63387978142076 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1190.9095215797424,77.11748633879782 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,49.00558853149414,92.65027046203613 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,113.65191793441772,98.44262295081967 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1020.6083555221558,74.86338797814209 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,44.6860625743866,77.13114619255066 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,121.2872724533081,97.78688524590164 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1246.0178999900818,72.40437158469946 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,82.58793544769287,88.46994638442993 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,137.15939092636108,94.39890710382514 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1317.742176771164,63.25136612021858 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,54.852065563201904,76.4207661151886 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,151.09674072265625,89.89071038251366 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1123.5956239700317,49.59016393442623 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,51.210848808288574,68.93442869186401 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,170.72992277145386,78.5792349726776 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1148.6567842960358,20.116120218579233 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,48.660605907440186,86.63934469223022 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,110.17098808288574,98.82513661202185 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1061.7185904979706,76.63934426229508 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,49.778627157211304,95.76502442359924 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,113.88378477096558,98.4153005464481 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1058.3029556274414,74.93169398907104 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,39.869826555252075,76.5573799610138 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,120.86488842964172,97.6775956284153 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1157.85533452034,72.40437158469946 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,39.27051615715027,80.21857738494873 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,137.07859206199646,94.42622950819673 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1060.4789934158325,64.1051912568306 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,41.18854546546936,78.032785654068 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,150.01649594306946,89.86338797814207 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1074.8029758930206,49.21448087431694 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,42.942272901535034,68.22404265403748 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,170.39786314964294,78.0327868852459 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1051.4041996002197,20.081967213114755 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,48.943641662597656,80.43715953826904 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,111.18853044509888,98.82513661202185 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1000.6668944358826,76.74180327868852 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,34.4648540019989,84.59016680717468 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,113.77461814880371,98.27868852459017 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1045.2479929924011,74.93169398907104 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,40.32768535614014,77.40437388420105 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,120.75347566604614,97.8415300546448 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1042.6038060188293,72.1311475409836 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,49.051427602767944,76.85792446136475 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,135.81657576560974,94.89071038251366 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1049.5465006828308,63.69535519125683 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,50.19066071510315,74.07103776931763 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,150.23873829841614,89.69945355191257 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1161.7163217067719,48.80464480874317 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,42.93249225616455,63.06011080741882 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,169.8643877506256,79.20765027322403 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,968.4304020404816,20.116120218579233 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,46.011924266815186,80.24590015411377 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,109.72403120994568,98.66120218579235 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1174.494342327118,76.74180327868852 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,41.743159532547,80.02732396125793 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,114.41021490097046,98.4153005464481 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1171.6064977645874,74.93169398907104 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,44.349541664123535,87.45901584625244 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,121.4791738986969,97.45901639344262 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,952.0120975971222,71.65300546448088 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,48.69317936897278,75.92896223068237 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,136.3904469013214,94.4535519125683 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,969.868058681488,64.00273224043715 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,44.044572591781616,70.8743155002594 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,149.6289074420929,89.8360655737705 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,953.6086061000824,50.10245901639344 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,44.549598932266235,61.284154653549194 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,170.53832936286926,79.53551912568307 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,972.7086639404297,20.116120218579233 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,47.114877223968506,81.69398903846741 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,109.53987145423889,98.68852459016394 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,957.2526223659515,76.63934426229508 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,37.89606070518494,85.65573692321777 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,114.25655388832092,98.30601092896175 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1173.4506571292877,74.93169398907104 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,47.68080997467041,83.36065411567688 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,120.15364933013916,97.8688524590164 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1153.5412156581879,72.1311475409836 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,41.10796904563904,83.41529965400696 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,136.6818916797638,95.0 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,967.7902429103851,63.25136612021858 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,36.63528251647949,82.81420469284058 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,150.54849863052368,89.31693989071037 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,965.3704278469086,49.62431693989071 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,40.28898596763611,64.61748480796814 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,169.49659419059753,79.97267759562841 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1158.38462972641,20.21857923497268 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,43.29892086982727,77.95081734657288 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,110.77093839645386,98.68852459016394 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,944.0426867008209,76.63934426229508 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,40.48178577423096,91.17486476898193 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,114.66628408432007,98.3879781420765 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1151.3295328617096,74.93169398907104 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,46.342252254486084,91.42076373100281 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,121.12805104255676,97.70491803278688 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,956.9201290607452,72.37021857923497 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,48.09459686279297,90.16393423080444 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,136.35990571975708,94.31693989071039 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,951.3514447212219,64.00273224043715 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,47.61181974411011,77.04917788505554 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,149.66685557365417,90.40983606557377 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1159.4669754505157,49.62431693989071 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,40.52361035346985,61.666667461395264 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,170.45302724838257,79.09836065573771 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,942.1310601234436,20.116120218579233 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,42.33190155029297,80.79234957695007 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,110.67640900611877,98.82513661202185 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1151.425032377243,76.63934426229508 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,46.83778142929077,79.94535565376282 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,113.82480311393738,98.25136612021858 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,959.6910009384155,74.86338797814209 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,46.91451978683472,79.26229238510132 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,121.25436019897461,97.81420765027322 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,973.5784142017365,72.1311475409836 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,45.216925859451294,79.56284284591675 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,136.08299708366394,94.89071038251366 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,941.2294843196869,64.1051912568306 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,35.09868001937866,70.24590373039246 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,150.008531332016,89.97267759562841 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,967.8004837036133,49.62431693989071 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,40.60944890975952,60.76502799987793 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,170.61232328414917,78.52459016393442 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1174.2148485183716,20.116120218579233 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,44.02885293960571,86.72131299972534 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,110.33011960983276,98.82513661202185 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1164.813972711563,76.74180327868852 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,34.82557439804077,91.83059930801392 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,113.68903136253357,98.30601092896175 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1142.874398946762,74.86338797814209 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,38.12274146080017,84.09836292266846 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,120.88822174072266,97.89617486338797 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,958.9832980632782,72.60928961748634 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,47.38658022880554,83.63388180732727 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,136.52869582176208,95.0 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,982.7437946796417,64.00273224043715 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,50.3098578453064,78.49726676940918 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,150.58712220191956,90.10928961748634 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,952.3902399539948,48.97540983606557 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,47.68881940841675,67.54098534584045 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,170.5669903755188,78.44262295081967 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1160.643584728241,20.116120218579233 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,42.35506534576416,80.71038126945496 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,110.72827911376953,98.46994535519126 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1161.2603483200073,76.70765027322405 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,44.48380947113037,75.95628499984741 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,113.78427290916443,98.4153005464481 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1164.732885837555,74.93169398907104 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,41.45829200744629,88.27868700027466 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,121.14582562446594,97.62295081967213 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.4570569992065,72.40437158469946 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,44.6593804359436,75.7377028465271 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,136.09871077537537,94.72677595628414 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1170.4177556037903,64.1051912568306 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,41.33125162124634,78.38797569274902 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,150.5243456363678,89.61748633879782 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,940.6334030628204,49.62431693989071 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,43.79690456390381,63.387978076934814 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,170.47909784317017,77.34972677595628 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,951.3798985481262,20.116120218579233 +20250409090514,Graph NN,0.005,0,0,0,2000,CPU,44.20913028717041,94.4535493850708 +20250409090514,GraphTM,0.005,10000,10.0,23,10,CUDA,110.41194748878479,98.82513661202185 +20250409090514,TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1164.4012093544006,76.74180327868852 +20250409090514,Graph NN,0.01,0,0,0,2000,CPU,43.56287693977356,77.86885499954224 +20250409090514,GraphTM,0.01,10000,10.0,23,10,CUDA,113.86108899116516,98.25136612021858 +20250409090514,TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1165.0554220676422,74.55601092896174 +20250409090514,Graph NN,0.02,0,0,0,2000,CPU,43.18827676773071,90.76502919197083 +20250409090514,GraphTM,0.02,10000,10.0,23,10,CUDA,121.40065360069275,97.6775956284153 +20250409090514,TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1142.509984254837,72.1311475409836 +20250409090514,Graph NN,0.05,0,0,0,2000,CPU,46.11475706100464,87.2950792312622 +20250409090514,GraphTM,0.05,10000,10.0,23,10,CUDA,136.23513627052307,93.98907103825137 +20250409090514,TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1018.888409614563,64.1051912568306 +20250409090514,Graph NN,0.1,0,0,0,2000,CPU,46.72879457473755,72.92349934577942 +20250409090514,GraphTM,0.1,10000,10.0,23,10,CUDA,150.53106451034546,89.75409836065575 +20250409090514,TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1152.1242747306824,49.62431693989071 +20250409090514,Graph NN,0.2,0,0,0,2000,CPU,42.78840351104736,61.72131299972534 +20250409090514,GraphTM,0.2,10000,10.0,23,10,CUDA,170.45607113838196,78.5792349726776 +20250409090514,TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1150.6887817382812,20.184426229508194 diff --git a/examples/recomm_system/experiment_results_ensamble.csv b/examples/recomm_system/experiment_results_ensamble.csv deleted file mode 100644 index b394dad6..00000000 --- a/examples/recomm_system/experiment_results_ensamble.csv +++ /dev/null @@ -1,181 +0,0 @@ -Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy -Graph NN,0.005,0,0,0,20000,CPU,418.9250466823578,75.62841773033142 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.35683226585388,98.68852459016394 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1059.1634759902954,76.81010928961749 -Graph NN,0.01,0,0,0,20000,CPU,550.6980571746826,94.50819492340088 -GraphTM,0.01,10000,10.0,23,10,CUDA,114.06276345252991,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1051.916612625122,75.3415300546448 -Graph NN,0.02,0,0,0,20000,CPU,475.44024682044983,75.30054450035095 -GraphTM,0.02,10000,10.0,23,10,CUDA,121.55624794960022,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1043.9487817287445,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,411.8552327156067,80.98360896110535 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.7814338207245,94.20765027322405 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1044.2656917572021,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,484.6550889015198,68.7158465385437 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.34457921981812,89.72677595628416 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1061.191523551941,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,483.8463816642761,71.28415107727051 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.18810439109802,78.49726775956285 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1071.927158355713,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,473.5806052684784,86.36612296104431 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.18979954719543,98.60655737704917 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,979.0509588718414,76.74180327868852 -Graph NN,0.01,0,0,0,20000,CPU,444.6897065639496,93.55190992355347 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.48035550117493,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1007.9654748439789,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,386.32835030555725,93.22404265403748 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.46316766738892,97.73224043715847 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1017.5866801738739,73.25819672131148 -Graph NN,0.05,0,0,0,20000,CPU,417.78410935401917,73.1693983078003 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.64952206611633,95.08196721311475 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,945.0465729236603,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,481.6537721157074,77.18579173088074 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.57958960533142,90.08196721311475 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,938.0212676525116,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,391.36059975624084,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.49347591400146,77.65027322404372 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,940.9758951663971,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,480.5005066394806,75.68305730819702 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.65927052497864,98.19672131147541 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.7581994533539,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,449.22584795951843,76.36612057685852 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.07226181030273,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1010.8711988925934,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,403.96647000312805,96.85792326927185 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.02044725418091,97.78688524590164 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1011.7896072864532,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,460.688773393631,85.00000238418579 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.04891228675842,94.69945355191257 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1014.1492829322815,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,407.9346880912781,74.1256833076477 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.6586093902588,90.08196721311475 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,990.8282098770142,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,437.8108870983124,65.60109257698059 -GraphTM,0.2,10000,10.0,23,10,CUDA,168.44772601127625,78.93442622950819 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1022.1848647594452,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,430.3925087451935,89.20764923095703 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.6658935546875,98.68852459016394 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1016.199923992157,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,396.3338620662689,84.23497080802917 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.67849016189575,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,944.4602844715118,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,434.91951632499695,93.25136542320251 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.31921482086182,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,933.2245874404907,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,483.2671537399292,80.32786846160889 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.68922591209412,94.78142076502732 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,994.6384744644165,64.13934426229508 -Graph NN,0.1,0,0,0,20000,CPU,424.9935986995697,81.33879899978638 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.08107113838196,89.59016393442623 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,944.0273253917694,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,333.49274706840515,61.50273084640503 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.906751871109,78.98907103825137 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,965.9725024700165,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,377.28471970558167,75.68305730819702 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.61631536483765,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,976.8008840084076,76.67349726775956 -Graph NN,0.01,0,0,0,20000,CPU,473.2922372817993,76.06557607650757 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.87212014198303,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,942.7254059314728,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,357.36573815345764,75.40983557701111 -GraphTM,0.02,10000,10.0,23,10,CUDA,119.41612005233765,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,985.81947016716,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,440.75843334198,73.08743000030518 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.8215868473053,94.91803278688525 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,997.739678144455,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,426.73446226119995,88.55191469192505 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.54467248916626,89.94535519125682 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,980.096907377243,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,387.20843958854675,75.71038007736206 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.7962884902954,77.56830601092896 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,987.0616261959076,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,455.586905002594,83.41529965400696 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.7424705028534,98.5792349726776 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,998.4698519706726,76.81010928961749 -Graph NN,0.01,0,0,0,20000,CPU,466.44022035598755,98.52458834648132 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.78495740890503,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,932.3163437843323,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,455.35024762153625,88.96175026893616 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.741384267807,97.75956284153006 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,974.3740100860596,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,399.9565739631653,73.60655665397644 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.17181992530823,94.67213114754098 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,971.1499485969543,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,447.5498752593994,70.8743155002594 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.6928951740265,89.80874316939891 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,962.4737737178802,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,403.6350507736206,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.02189421653748,78.16939890710383 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,947.2696743011475,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,470.0121097564697,81.20218515396118 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.51706099510193,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,974.2360310554504,76.74180327868852 -Graph NN,0.01,0,0,0,20000,CPU,466.69573068618774,76.06557607650757 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.95063591003418,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,948.407201051712,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,288.0073969364166,92.92349815368652 -GraphTM,0.02,10000,10.0,23,10,CUDA,119.34772634506226,97.48633879781421 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.228814125061,39.51502732240437 -Graph NN,0.05,0,0,0,20000,CPU,477.7228500843048,89.86338973045349 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.2427453994751,94.86338797814207 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,964.5819866657257,34.56284153005464 -Graph NN,0.1,0,0,0,20000,CPU,459.15181946754456,71.22950553894043 -GraphTM,0.1,10000,10.0,23,10,CUDA,148.52941298484802,89.67213114754098 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,981.4810082912445,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,356.59899377822876,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.7598683834076,76.85792349726775 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,959.9282560348511,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,378.94336581230164,80.32786846160889 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.55144882202148,98.44262295081967 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.1284465789795,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,407.1111581325531,94.31694149971008 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.06348276138306,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,941.711061000824,39.65163934426229 -Graph NN,0.02,0,0,0,20000,CPU,402.2970163822174,79.80874180793762 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.20444130897522,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,998.2885782718658,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,400.97751235961914,85.30054688453674 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.81029963493347,94.78142076502732 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1003.2194263935089,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,413.25741934776306,74.59016442298889 -GraphTM,0.1,10000,10.0,23,10,CUDA,148.70455861091614,89.89071038251366 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,974.4099938869476,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,369.36416029930115,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.01750564575195,78.55191256830601 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,990.2080008983612,20.184426229508194 -Graph NN,0.005,0,0,0,20000,CPU,440.5256702899933,90.4644787311554 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.76434278488159,98.55191256830601 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1004.704318523407,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,385.76011848449707,77.62295007705688 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.28425002098083,98.44262295081967 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,953.8945541381836,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,422.2995481491089,90.71038365364075 -GraphTM,0.02,10000,10.0,23,10,CUDA,121.29091334342957,97.6775956284153 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1002.099497795105,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,383.33483958244324,81.8306028842926 -GraphTM,0.05,10000,10.0,23,10,CUDA,134.72863698005676,94.53551912568307 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,936.831921339035,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,320.32143545150757,83.60655903816223 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.56500816345215,89.15300546448087 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,955.8687121868134,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,432.34014868736267,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.61127710342407,79.12568306010928 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,945.0617082118988,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,458.87039852142334,79.37158346176147 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.9952290058136,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,985.8775904178619,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,453.55728340148926,76.06557607650757 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.85269451141357,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,941.0662143230438,75.06830601092896 -Graph NN,0.02,0,0,0,20000,CPU,416.2407822608948,91.66666865348816 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.69959592819214,97.78688524590164 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,973.9127674102783,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,311.2621831893921,75.46448111534119 -GraphTM,0.05,10000,10.0,23,10,CUDA,134.66055345535278,94.89071038251366 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,953.3380017280579,63.25136612021858 -Graph NN,0.1,0,0,0,20000,CPU,425.43416261672974,73.79781603813171 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.67951107025146,90.27322404371586 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,967.5897221565247,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,379.8497235774994,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.7126281261444,77.81420765027323 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,955.9427745342255,20.116120218579233 diff --git a/examples/recomm_system/experiment_results_old.csv b/examples/recomm_system/experiment_results_old.csv deleted file mode 100644 index f715ba6a..00000000 --- a/examples/recomm_system/experiment_results_old.csv +++ /dev/null @@ -1,271 +0,0 @@ -Algorithm,Noise_Ratio,T,s,Max_Included_Literals,Epochs,Platform,Total_Time,Accuracy -Graph NN,0.005,0,0,0,1000,CPU,0.03006434440612793,76.72131061553955 -GraphTM,0.005,10000,10.0,23,10,CUDA,34.547648191452026,98.46994535519126 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,89.6943154335022,76.63934426229508 -Graph NN,0.01,0,0,0,1000,CPU,0.01817464828491211,75.95628499984741 -GraphTM,0.01,10000,10.0,23,10,CUDA,34.95576763153076,98.44262295081967 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,96.10501098632812,74.93169398907104 -Graph NN,0.02,0,0,0,1000,CPU,0.03073263168334961,81.22950792312622 -GraphTM,0.02,10000,10.0,23,10,CUDA,36.0724892616272,97.43169398907104 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,95.67133641242981,72.40437158469946 -Graph NN,0.05,0,0,0,1000,CPU,0.014258623123168945,83.52459073066711 -GraphTM,0.05,10000,10.0,23,10,CUDA,38.86628317832947,95.0 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,96.7427487373352,64.65163934426229 -Graph NN,0.1,0,0,0,1000,CPU,0.022305965423583984,73.33333492279053 -GraphTM,0.1,10000,10.0,23,10,CUDA,37.45086216926575,90.08196721311475 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,90.45554423332214,49.8292349726776 -Graph NN,0.2,0,0,0,1000,CPU,0.03204679489135742,59.863388538360596 -GraphTM,0.2,10000,10.0,23,10,CUDA,16.268279790878296,78.77049180327869 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,96.16712856292725,20.184426229508194 -Graph NN,0.005,0,0,0,1000,CPU,0.0168764591217041,76.85792446136475 -GraphTM,0.005,10000,10.0,23,10,CUDA,31.40691065788269,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,88.05298614501953,76.74180327868852 -Graph NN,0.01,0,0,0,1000,CPU,0.01720118522644043,87.4316930770874 -GraphTM,0.01,10000,10.0,23,10,CUDA,31.529547214508057,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,89.19472336769104,74.93169398907104 -Graph NN,0.02,0,0,0,1000,CPU,0.014032602310180664,78.36065292358398 -GraphTM,0.02,10000,10.0,23,10,CUDA,32.8007595539093,97.62295081967213 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,94.56675243377686,72.6775956284153 -Graph NN,0.05,0,0,0,1000,CPU,0.016784191131591797,76.88524723052979 -GraphTM,0.05,10000,10.0,23,10,CUDA,34.84256434440613,94.75409836065573 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,96.4975814819336,64.1051912568306 -Graph NN,0.1,0,0,0,1000,CPU,0.014883041381835938,70.54644823074341 -GraphTM,0.1,10000,10.0,23,10,CUDA,36.750433683395386,89.97267759562841 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,96.35110449790955,50.341530054644814 -Graph NN,0.2,0,0,0,1000,CPU,0.03427433967590332,61.50273084640503 -GraphTM,0.2,10000,10.0,23,10,CUDA,39.63756251335144,79.01639344262294 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,97.00698733329773,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,370.7295939922333,87.5683069229126 -GraphTM,0.005,10000,10.0,23,10,CUDA,342.7878243923187,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,954.4101324081421,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,304.6031119823456,86.74863576889038 -GraphTM,0.01,10000,10.0,23,10,CUDA,346.8704605102539,98.25136612021858 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,978.3629264831543,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,403.2585175037384,75.30054450035095 -GraphTM,0.02,10000,10.0,23,10,CUDA,353.39254236221313,97.65027322404372 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,971.3300836086273,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,398.8085067272186,93.8524603843689 -GraphTM,0.05,10000,10.0,23,10,CUDA,368.16111874580383,94.59016393442623 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,960.4506890773773,63.661202185792355 -Graph NN,0.1,0,0,0,20000,CPU,388.4886665344238,75.43715834617615 -GraphTM,0.1,10000,10.0,23,10,CUDA,340.63327074050903,90.43715846994536 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,972.1077370643616,49.35109289617486 -Graph NN,0.2,0,0,0,20000,CPU,438.5506749153137,64.04371857643127 -GraphTM,0.2,10000,10.0,23,10,CUDA,357.2651107311249,77.89617486338798 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,948.7157049179077,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,335.8319003582001,94.97267603874207 -GraphTM,0.005,10000,10.0,23,10,CUDA,343.08735728263855,98.63387978142076 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,947.0340785980225,76.74180327868852 -Graph NN,0.01,0,0,0,20000,CPU,380.5575759410858,94.37158703804016 -GraphTM,0.01,10000,10.0,23,10,CUDA,346.9574134349823,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,948.3826260566711,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,317.0974416732788,80.6010901927948 -GraphTM,0.02,10000,10.0,23,10,CUDA,352.5908226966858,97.5136612021858 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,966.0719907283783,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,472.30924010276794,73.08743000030518 -GraphTM,0.05,10000,10.0,23,10,CUDA,352.63378834724426,94.18032786885246 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,959.5826976299286,64.31010928961749 -Graph NN,0.1,0,0,0,20000,CPU,461.1769962310791,82.45901465415955 -GraphTM,0.1,10000,10.0,23,10,CUDA,384.25392842292786,89.80874316939891 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,968.517664194107,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,338.83801436424255,61.39343976974487 -GraphTM,0.2,10000,10.0,23,10,CUDA,406.0366141796112,79.37158469945356 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,956.5074710845947,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,449.974244594574,99.07103776931763 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.82642030715942,98.63387978142076 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,958.8415122032166,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,340.71677923202515,91.557377576828 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.30413746833801,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,954.5596807003021,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,395.9958527088165,90.95628261566162 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.9222981929779,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,969.4929764270782,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,480.05427837371826,84.83606576919556 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.44805693626404,94.67213114754098 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,960.4112854003906,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,383.12051796913147,70.8743155002594 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.93119883537292,89.86338797814207 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,951.469316482544,49.35109289617486 -Graph NN,0.2,0,0,0,20000,CPU,463.9883725643158,66.22951030731201 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.47470378875732,78.16939890710383 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,960.5258178710938,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,475.9830324649811,82.54098296165466 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.21395993232727,98.7431693989071 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1007.2876415252686,76.74180327868852 -Graph NN,0.01,0,0,0,20000,CPU,383.468213558197,84.89071130752563 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.81892561912537,98.16939890710383 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1012.9538216590881,75.0 -Graph NN,0.02,0,0,0,20000,CPU,420.129834651947,78.87977957725525 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.55745768547058,97.75956284153006 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1015.7262468338013,72.43852459016394 -Graph NN,0.05,0,0,0,20000,CPU,402.9082715511322,88.90710473060608 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.1779272556305,94.69945355191257 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1003.5450174808502,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,465.9741690158844,71.61202430725098 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.92307353019714,90.21857923497268 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,993.3001370429993,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,477.5556457042694,61.967211961746216 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.91576671600342,78.71584699453553 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,968.9711816310883,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,432.9368100166321,87.9781424999237 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.05442261695862,98.4153005464481 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,996.241945028305,77.11748633879782 -Graph NN,0.01,0,0,0,20000,CPU,487.0275945663452,76.06557607650757 -GraphTM,0.01,10000,10.0,23,10,CUDA,114.20750546455383,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,965.2801012992859,74.89754098360656 -Graph NN,0.02,0,0,0,20000,CPU,469.96120142936707,84.61748361587524 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.18490934371948,97.62295081967213 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,996.0747539997101,73.05327868852459 -Graph NN,0.05,0,0,0,20000,CPU,391.52739334106445,94.4535493850708 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.62234830856323,94.89071038251366 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1008.111634016037,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,393.4089164733887,82.24043846130371 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.06821942329407,90.21857923497268 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1029.8733656406403,46.89207650273224 -Graph NN,0.2,0,0,0,20000,CPU,457.90059518814087,64.50819969177246 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.8122251033783,78.5792349726776 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,994.4631915092468,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,468.43779039382935,93.66120100021362 -GraphTM,0.005,10000,10.0,23,10,CUDA,791.0080873966217,98.66120218579235 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1003.8278872966766,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,432.6524693965912,76.06557607650757 -GraphTM,0.01,10000,10.0,23,10,CUDA,114.20011568069458,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1002.5212485790253,74.55601092896174 -Graph NN,0.02,0,0,0,20000,CPU,369.3357195854187,77.92349457740784 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.16606998443604,97.78688524590164 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1012.7241668701172,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,425.18350195884705,73.49726557731628 -GraphTM,0.05,10000,10.0,23,10,CUDA,134.74739480018616,94.53551912568307 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,989.5920696258545,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,490.52463579177856,74.23497438430786 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.663067817688,90.05464480874316 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1006.5979704856873,49.86338797814208 -Graph NN,0.2,0,0,0,20000,CPU,430.0901610851288,55.51912784576416 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.53561758995056,78.52459016393442 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,978.9952318668365,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,348.87419414520264,88.87978196144104 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.35069704055786,98.49726775956285 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1027.553718328476,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,459.8675227165222,94.97267603874207 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.4369592666626,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1020.3086180686951,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,402.3793728351593,98.08743000030518 -GraphTM,0.02,10000,10.0,23,10,CUDA,121.04798412322998,97.78688524590164 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1001.1654710769653,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,372.8648886680603,77.81420946121216 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.42569255828857,94.78142076502732 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1007.9532639980316,64.31010928961749 -Graph NN,0.1,0,0,0,20000,CPU,379.2149317264557,88.55191469192505 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.3813440799713,89.50819672131148 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1043.259267091751,49.65846994535519 -Graph NN,0.2,0,0,0,20000,CPU,327.1461730003357,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.63331365585327,77.75956284153006 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1010.9232707023621,20.081967213114755 -Graph NN,0.005,0,0,0,20000,CPU,365.3540139198303,84.56284403800964 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.86703443527222,98.55191256830601 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,985.2240512371063,76.84426229508196 -Graph NN,0.01,0,0,0,20000,CPU,419.19047832489014,90.65573811531067 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.7970187664032,98.19672131147541 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1064.9949398040771,75.27322404371584 -Graph NN,0.02,0,0,0,20000,CPU,331.5898778438568,82.13114738464355 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.9221625328064,97.8415300546448 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,995.9801988601685,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,471.61706471443176,76.4207661151886 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.71256685256958,94.31693989071039 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1049.4032156467438,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,408.78746509552,75.76502561569214 -GraphTM,0.1,10000,10.0,23,10,CUDA,150.7326798439026,89.86338797814207 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1033.6956369876862,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,606.6176900863647,75.84699392318726 -GraphTM,0.2,10000,10.0,23,10,CUDA,767.3086304664612,79.18032786885246 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1133.7219278812408,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,581.8730342388153,75.68305730819702 -GraphTM,0.005,10000,10.0,23,10,CUDA,331.4337913990021,98.68852459016394 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1139.0209171772003,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,625.7649390697479,79.91803288459778 -GraphTM,0.01,10000,10.0,23,10,CUDA,390.8302972316742,98.27868852459017 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1126.1463103294373,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,400.4486656188965,88.87978196144104 -GraphTM,0.02,10000,10.0,23,10,CUDA,1433.5869204998016,97.73224043715847 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,950.7241444587708,72.43852459016394 -Graph NN,0.05,0,0,0,20000,CPU,425.54064321517944,88.22404146194458 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.85678553581238,95.0 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,980.2039513587952,40.26639344262295 -Graph NN,0.1,0,0,0,20000,CPU,452.5277452468872,75.38251280784607 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.88782930374146,89.80874316939891 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1026.7852320671082,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,470.88474774360657,69.67213153839111 -GraphTM,0.2,10000,10.0,23,10,CUDA,169.65682435035706,78.38797814207649 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1025.9789564609528,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,415.4326367378235,75.68305730819702 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.78167200088501,98.82513661202185 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1003.7237763404846,76.63934426229508 -Graph NN,0.01,0,0,0,20000,CPU,444.45101857185364,92.65027046203613 -GraphTM,0.01,10000,10.0,23,10,CUDA,112.90761637687683,98.14207650273225 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,952.2491714954376,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,320.37370920181274,93.90710592269897 -GraphTM,0.02,10000,10.0,23,10,CUDA,119.93352174758911,97.78688524590164 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,943.684113740921,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,481.5506682395935,73.08743000030518 -GraphTM,0.05,10000,10.0,23,10,CUDA,135.72563362121582,94.75409836065573 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1023.0283312797546,63.661202185792355 -Graph NN,0.1,0,0,0,20000,CPU,493.5546169281006,70.92896103858948 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.45619106292725,89.80874316939891 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1015.6581709384918,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,413.9959945678711,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.9294879436493,78.77049180327869 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,986.7937209606171,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,378.53097796440125,75.68305730819702 -GraphTM,0.005,10000,10.0,23,10,CUDA,110.98681783676147,98.30601092896175 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1019.9160070419312,76.74180327868852 -Graph NN,0.01,0,0,0,20000,CPU,474.00093841552734,91.0109281539917 -GraphTM,0.01,10000,10.0,23,10,CUDA,111.94242978096008,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,999.8850407600403,75.03415300546447 -Graph NN,0.02,0,0,0,20000,CPU,346.5858099460602,79.3169379234314 -GraphTM,0.02,10000,10.0,23,10,CUDA,120.32013273239136,97.81420765027322 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,937.4906117916107,71.92622950819673 -Graph NN,0.05,0,0,0,20000,CPU,408.48123002052307,79.61748838424683 -GraphTM,0.05,10000,10.0,23,10,CUDA,134.27622246742249,94.72677595628414 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,998.1966772079468,64.1051912568306 -Graph NN,0.1,0,0,0,20000,CPU,373.10851979255676,70.8743155002594 -GraphTM,0.1,10000,10.0,23,10,CUDA,148.95248794555664,89.86338797814207 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,993.9887461662292,49.62431693989071 -Graph NN,0.2,0,0,0,20000,CPU,388.21142077445984,64.15300369262695 -GraphTM,0.2,10000,10.0,23,10,CUDA,168.77049660682678,76.93989071038251 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,947.7270972728729,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,370.7274992465973,75.79234838485718 -GraphTM,0.005,10000,10.0,23,10,CUDA,109.92479467391968,98.27868852459017 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,944.8954434394836,76.74180327868852 -Graph NN,0.01,0,0,0,20000,CPU,382.68008041381836,90.8196747303009 -GraphTM,0.01,10000,10.0,23,10,CUDA,113.02455401420593,98.4153005464481 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,958.4739623069763,74.86338797814209 -Graph NN,0.02,0,0,0,20000,CPU,466.3325071334839,96.22950553894043 -GraphTM,0.02,10000,10.0,23,10,CUDA,121.06816530227661,97.6775956284153 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,952.7009084224701,72.1311475409836 -Graph NN,0.05,0,0,0,20000,CPU,462.6835868358612,75.79234838485718 -GraphTM,0.05,10000,10.0,23,10,CUDA,136.21898555755615,94.53551912568307 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,974.2475302219391,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,425.79654932022095,87.18579411506653 -GraphTM,0.1,10000,10.0,23,10,CUDA,149.70053339004517,90.0 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1005.1148529052734,49.59016393442623 -Graph NN,0.2,0,0,0,20000,CPU,454.7309219837189,60.10928750038147 -GraphTM,0.2,10000,10.0,23,10,CUDA,170.75228261947632,78.68852459016394 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,949.2937788963318,20.116120218579233 -Graph NN,0.005,0,0,0,20000,CPU,570.247394323349,75.68305730819702 -GraphTM,0.005,10000,10.0,23,10,CUDA,478.04068207740784,98.5792349726776 -TMClassifier,0.005,10000,10.0,32,10,CPU_sparse,1043.9900722503662,76.22950819672131 -Graph NN,0.01,0,0,0,20000,CPU,428.5804445743561,98.68852496147156 -GraphTM,0.01,10000,10.0,23,10,CUDA,522.4638862609863,98.44262295081967 -TMClassifier,0.01,10000,10.0,32,10,CPU_sparse,1060.4919381141663,74.93169398907104 -Graph NN,0.02,0,0,0,20000,CPU,432.5051038265228,76.25682950019836 -GraphTM,0.02,10000,10.0,23,10,CUDA,465.56538343429565,97.73224043715847 -TMClassifier,0.02,10000,10.0,32,10,CPU_sparse,1074.2418582439423,72.40437158469946 -Graph NN,0.05,0,0,0,20000,CPU,492.7251534461975,85.16393303871155 -GraphTM,0.05,10000,10.0,23,10,CUDA,688.4105927944183,94.91803278688525 -TMClassifier,0.05,10000,10.0,32,10,CPU_sparse,1055.8136265277863,64.00273224043715 -Graph NN,0.1,0,0,0,20000,CPU,584.2829260826111,78.22404503822327 -GraphTM,0.1,10000,10.0,23,10,CUDA,625.4286091327667,90.13661202185791 -TMClassifier,0.1,10000,10.0,32,10,CPU_sparse,1055.7545056343079,48.83879781420765 -Graph NN,0.2,0,0,0,20000,CPU,318.2997555732727,67.40437150001526 -GraphTM,0.2,10000,10.0,23,10,CUDA,1264.404123544693,77.62295081967213 -TMClassifier,0.2,10000,10.0,32,10,CPU_sparse,1000.3779845237732,20.081967213114755 diff --git a/examples/recomm_system/test.ipynb b/examples/recomm_system/test.ipynb index 07b30e69..bf5c1fac 100644 --- a/examples/recomm_system/test.ipynb +++ b/examples/recomm_system/test.ipynb @@ -67,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -80,12 +80,12 @@ "\\begin{tabular}{|c|c|c|c|}\n", "\\hline\n", "\\textbf{Noise Ratio} & \\textbf{GCN (\\%)} & \\textbf{GTM (\\%)} & \\textbf{TMClassifier (\\%)} \\\\ \\hline\n", - "0.005 & 83.36 & 98.67 & 76.67 \\\\ \\hline\n", - "0.01 & 86.13 & 98.42 & 74.58 \\\\ \\hline\n", - "0.02 & 85.03 & 97.75 & 72.10 \\\\ \\hline\n", - "0.05 & 79.93 & 94.80 & 63.87 \\\\ \\hline\n", - "0.1 & 73.73 & 89.73 & 49.44 \\\\ \\hline\n", - "0.2 & 65.86 & 78.48 & 20.11 \\\\ \\hline\n", + "0.005 & 83.39 & 98.73 & 76.73 \\\\ \\hline\n", + "0.01 & 85.55 & 98.35 & 74.87 \\\\ \\hline\n", + "0.02 & 83.57 & 97.73 & 72.24 \\\\ \\hline\n", + "0.05 & 82.13 & 94.61 & 63.86 \\\\ \\hline\n", + "0.1 & 75.93 & 89.85 & 49.48 \\\\ \\hline\n", + "0.2 & 64.12 & 78.73 & 20.13 \\\\ \\hline\n", "\\end{tabular}\n", "\\caption{Average accuracy comparison of GCN, GraphTM, and TMClassifier for varying noise ratios.}\n", "\\label{tab:recomm_sys_accuracy}\n", @@ -96,7 +96,7 @@ "source": [ "import pandas as pd\n", "data = pd.read_csv(\"experiment_results.csv\")\n", - "exp_id = \"20250225090119\" \n", + "exp_id = \"20250409090514\" \n", "data['Exp_id'] = data['Exp_id'].astype(str)\n", "filtered_data = data[data['Exp_id'] == exp_id]\n", "# print(filtered_data)\n", From cedabda4f099e8a2ceaf0870b511647cdf7dfb17 Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Sat, 10 May 2025 08:46:21 +0000 Subject: [PATCH 28/29] calc total time --- examples/recomm_system/test.ipynb | 45 +++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/examples/recomm_system/test.ipynb b/examples/recomm_system/test.ipynb index bf5c1fac..320a30bf 100644 --- a/examples/recomm_system/test.ipynb +++ b/examples/recomm_system/test.ipynb @@ -132,6 +132,51 @@ "\n", "print(latex_table)" ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Averages across all noise ratios:\n", + "Algorithm: Graph NN, Average Accuracy: 79.11%, Average Total Time: 44.80s\n", + "Algorithm: GraphTM, Average Accuracy: 93.00%, Average Total Time: 133.75s\n", + "Algorithm: TMClassifier, Average Accuracy: 59.55%, Average Total Time: 1068.99s\n" + ] + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "# Read the CSV file\n", + "data = pd.read_csv(\"experiment_results.csv\")\n", + "\n", + "# Define the experiment ID you want to filter\n", + "exp_id = \"20250409090514\"\n", + "\n", + "# Ensure that Exp_id is treated as a string\n", + "data['Exp_id'] = data['Exp_id'].astype(str)\n", + "\n", + "# Filter the data based on the experiment ID\n", + "filtered_data = data[data['Exp_id'] == exp_id]\n", + "\n", + "# Group the data by Algorithm to calculate average accuracies and total time across all noise ratios\n", + "grouped_data = filtered_data.groupby('Algorithm').agg({'Accuracy': 'mean', 'Total_Time': 'mean'}).reset_index()\n", + "\n", + "# Print the average results for each algorithm across all noise ratios\n", + "print(\"Averages across all noise ratios:\")\n", + "for _, row in grouped_data.iterrows():\n", + " algorithm = row['Algorithm']\n", + " average_accuracy = row['Accuracy']\n", + " average_total_time = row['Total_Time']\n", + " \n", + " # Print the results\n", + " print(f\"Algorithm: {algorithm}, Average Accuracy: {average_accuracy:.2f}%, Average Total Time: {average_total_time:.2f}s\")\n" + ] } ], "metadata": { From 14b0b689ac6559ec51f5dd91ecc8d1f06d13fe7a Mon Sep 17 00:00:00 2001 From: Ahmed Khalid Date: Tue, 20 May 2025 11:10:28 +0000 Subject: [PATCH 29/29] prepare as sup. mat. --- examples/recomm_system/README.md | 17 +++- .../recomm_system/experiment_results.xlsx | Bin 0 -> 33687 bytes examples/recomm_system/main.py | 36 ++++++++ examples/recomm_system/test.ipynb | 82 ++++++++++++++++++ 4 files changed, 134 insertions(+), 1 deletion(-) create mode 100644 examples/recomm_system/experiment_results.xlsx create mode 100644 examples/recomm_system/main.py diff --git a/examples/recomm_system/README.md b/examples/recomm_system/README.md index e7fa211a..c03a4deb 100644 --- a/examples/recomm_system/README.md +++ b/examples/recomm_system/README.md @@ -1,2 +1,17 @@ +# Recommender System Experiments + +**How to run:** +```sh cd examples/recomm_system/ -bash main.sh \ No newline at end of file +python3 main.py +``` + +**Files:** +- `main.py` — Runs all experiments, calls each model script for various noise ratios, saves results to `experiment_results.csv`. +- `graph_nn.py` — Graph Neural Network (GCN) experiment. +- `graph_tm.py` — Graph Tsetlin Machine experiment. +- `tm_classifier.py` — Tsetlin Machine Classifier experiment. +- `prepare_dataset.py` — Dataset download, noise injection, preprocessing. +- `experiment_results.csv` — Results log (auto-generated). +- `test.ipynb` — Summarizes results, generates LaTeX tables. + diff --git a/examples/recomm_system/experiment_results.xlsx b/examples/recomm_system/experiment_results.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..54f854981d2bdf3ad205a300b352aca0698f15f7 GIT binary patch literal 33687 zcmeFY<9}sMv@ROkRwo^GY}>YN+xCiW+qTV)jZQkYZRh5F_rCY;bI$$`_L(1Q)tdA9 zt(sMHJkJ=b<{0u)pkQb~5I|5sKtO~*3T5QZe}RF3GQPi}0zrXj3E9~?o7g(*DSOzP zIO)*3+gSfB00SY<0|NQR|KH<(@Cr1g$k?vYBLzQ!dqM`R2^K7fD4?Y@iqD(iHeCW^ zIa(th60kQJZS@dKN<{KECu#moYx?c6wP4SFfBR}K2Eo!5#CW;#=_=(+35c3})nFc1(gtsMue#JeR~mB@-Hf7 z#H3Z-957!T{4gZ+MWP?*Yui;)Mj1NV6H$rXKmxok^ZVS$M&MKDY1gQ~RPSGN=<$i} zVmM_iBB)?^{)UoqBa>iPxt^KpJHvgCwv4~xZ@wDtLt?X#xwc?7TwhGZ8;i27Ys{_H zgu`F#@4)k7!FFb~kWubZ&YJ#5GZWRu>Bv7z5`nWi%^Q!g+L4hLXVA)PCvCUwE+`J_ zFc6bMY7Fn*Wnc)YVpkLtxLdGyKK31nyJw-)6vt6+xhIEz!z14Ya5`Oa7K0 zwT0InMaL$}zQBe|2H5E2f-0c)#`4PJ_yjEzRFu9|MAL#o3rOSS*j43Ee@PcrUhgWw zU6@qzcc;)irFC^Y8d3>7Hzrq6neSLuf$^S2kfU4VL0`jd0Ti=9-{Z<4$M-yu& zI@*8Q|EmQ5gX#ZYre2jGBL&KU6mkXlBAo2zTB9VIx26+0mNEVSW=Q|gXG@PO>iBpP z6>!dP!S~Fnb<6Qce=uz*bT35maz#lIfd*umE_A96NPV=mK_VwQibkJ{4+IcdSDaNo zFhx@yS(X zb^%sWSq+mzUVboq&%bIB*fXB19~u{lf>&d8%}P3>a2~(R!!1$_a1%w(Y63K3^%JNP z`>PI0g8lX?&G3tu64~1cIXdqVSuQYn3B+(Ibu>Ou&h!|Eml7*OlxjD_q*GRrrOF9h zf`4Pf;@Cr-ZXK8W8=~6Y4jRYhVLx;UZ_5oFC1IO}vRJ1M1B%zP|DbR%RLU~hFesEMJ3&sL-BxkzJuz0`VcTLc|xPVirDY%qd zCpvpgkaNe$+;r4Qc6{+2jEN@?+ZQmC%&8`4Df8-)-vs#IvNF)eKjg4Esf~8CO?vmp zn#aHtClg_G`4T&)6bKNy+&7r4Inp|7cOSF(VEFYCSh5??8q(Kzd_p*sVJH$*iQV=J zk$|84_9m~gh}ps0WxFRF{-x|BsF+V?kA}RNYdKt`uE(AAJYi$hSzv+bR}Z;o%4ew8 zIgQ=CsLFKJgP@q@>#=b%+M|c=?dxpAkU1G}qqp6hJC7S`!>!oD(%!9lGm(yGqoT(Y za35{GeKj%|{fmzPd)rX9{(+c5`w-CGHtz25(p9-&m2)$rdQ#Q+ot2nPrX_?sU8;Z*)BN&a641N^brw)qv!{$|hcDcF{bJz~o!GGrZU3iGrNO-V|e zb7s=%!sztzU|Adp~>}!USY)ZX~|B(m<)}i41C(` zih-lVYZ^ox#~_fTH+Ehe98vGE|DDz{ z&V7lV5CQ?=76SoceOt+YYORyGiHWll-M=3U|A=l@(pt-%L^Kc) zD6eK&tkN24wI|(26r;38J*%?SZ4{~$+*CjX%z87)b~h8ntjypvOnC%uZ}(kU4zIs1}glNQPf@ zSCk|z>+iKZI1>e?$x1AHCx=4Olh;j5i7WTQYM|x_Y&X^dZnhwVp|qs;>pqO;odfHH zBr99B&O}Ho5Ex2OTgSsoatD~iR%%jqRLMW_o+}+*p!*WNKgt@c-(`vem`IzcWiZEl zGw-V3^ZD*6xDst2wt}JGdH|TVxb6ZXS5g#FGgZeRXWghNSq3fIlxw#{WVMOLDAd0e zEX**3yJh*JkFuTVrMVL6#S__cCJXFeL!-pNM-a4BasZ5EiS%zMC4@I4#B2h5?C1AO zn@r9xaOm`)SfnI5KW2-QaIK!hVnK{qn;@>P*Yz{AqvNN{$fw{cV2JaK$Km_}$e1U_ z6mf`ydEC_cwhI-wu(gID{Kgla_f2WKa2SdF-fVDnhjig?#FgpFmnJj!45Bw+Ad>`KDc!1*QU{z6RmD@=NOWNwmk*muA*}AbR2lv zOHz-eW=$Q=KXFeX#h_|1p)K&95}o~BIuij`(=G67$$g$Z7Z;tnpWy%4;J2B5eI;dE z)snPEhkQjaY$?@LN}{housZGDJcz+{f$l50#xpc@e)$qqbhrpNERvE2pcc6=PGLlP zdU!o-Ecz68HNupz3+I?0HubL|Y3{__BI^)7$J7edXXyxjo~h zc32<4*lU^@aThLBp}<=<(EVdXK>U>xYvKL%OHzQf$jD7tld26HJQt-JCgpa48DXKT zc41mA##C)fa*P3k$x)Z-K-K`rECr$N*jWA(`CR6QE+A))nN zSFBaq2&wWEgYyaTeC^Z?g~D8^H@6JzmIG(Whq|iu?71vO#7p%V-r>v2F#C@+gCIZd z;2Xxjkut;4KDR_yk9nK3-DhOTC(EqwNoDUO2YVJ-BZN^7e_L#Zt*7J^BQDS4qE&Py ztT_2{1`gy5eVgRG9G~EIZ%UOMogZJEeD(1@>0Scnc9xGojZS)bzWf?zugc<|?l-@OYA>K0j;#bk`GV13SSTdW`ohS8R_JmFj79$kpy==0MiTBxq9-7XWTsWaD{My{RFbi$(M>V09QJ5S_*^`k6)GSGVTf&77Ikb77+ z)cf`Od~wFqB_(^f4lHKPL#yC#ga0~H9-0w1>s{h|O>!2-CP9znc68e8Jx-jgQR*1U&GL}|MmbirC-0P1-^JxQ zmffy7a#_O0k-?8;lOss=L101p*RSU|+Uj9;fE;4HH7ix`5&j`GRDI2ydcT~iCk(1B zD3W)OdsOD%>fC7dl;#niK?a*$IDiMCFT<{XkPk$dnDlv=Lu<}(#Ff|jFvYZcse>l# z-NHx7kXoKfRNhBh)yglsET)&OB?5cdSgd_Ux{;1prsrZq-t4&*Z1V01^ODlhLGQ_5 ze#$d@+}3%ICVQ>0Bj=N)+EgG7L)PwFz*_2=;nBX{RoWS`8rKU#`^>pUE6X-RTi1wq zx=p-d(5Mmq7yf|;+D8@#;!C<$BXsz#Q*<|igV#%mJJ4rSDrTX+^Qb-RVX>zVN?!_i<-Z^4#gAiT@p}0qD`b5bF82eqGYdzF(6h(;lCM~yF4e^lyqTR8IyI^B z`NvC7>h+qth88i8e%)j$X^VB(<~kQ~(G7+i?KG{3H3+ey#`DSf{@i^JdaJWHOFfaatFWf&I z*n^da7^;gdTI$4NoOIza9i)qy!ysDSU%BS0Oa`hkpA#qw#Mj4 zB2^kD%#x#(mmrWP{W*}Y=Jz2v zPIP5NouACH0?ZEW7#B z#okSwX=Ymob@t={1ro5yoL&lzh8q75AV!Q827IyVD3#r4uV`=G4UHU$DyKWtVY*|W{=@&Fmcj6zWB5faa zor)>v8k|XgXVT#64#7INhtGFH8oA)vtN~9(x`MLA`p*cyWY}#WyO?HfFm{VdTpix4 z!EC0nY*`x?T?R^$1a~L}$gX0)EfT4bWMl7^(4{Aqh+NFMKYLIO39LvW9etE(bo_}W zwvwc!-u*`!v6H%lS4jnT;-wRe2|W_M*UO5&|HE>sY?D?UF9y?N#L`zCn8Zs`SLGyD z{0LQXj7W%}aYQ=mEHWPC-2PcUKcy80OJAwFd$bMuRbQsqJC6z~s*=NmIG;*x;=emUl)r2o{ZEGcD}ixCK9 zoT_Q_p?o@k5IKVISTsdcpeTh^_32NK#fSWdLO${k{Z2B)9#F`(6pmo&;OpB+17rp=tad8D&5Exg1eeP`g$| zzgCq+Hd0v2Qg`eHf#EYqK!qCE*h$UyaHn@+}n=B?|wSAGXb&h;>Fd+!DBVq1l&^WD& zK=eZoYl4U+PWkF+o9&>q&3W8?>2F!f@;^ZC7l7~I7(SQI7{H(OfL3rnKq}Pomu-*k zco0{&stZXdp(9(GI?;*~t)Rr$?yM6{=waYWEV1V-X9g!*Kp4;C2n{;21o3ZucNqIZ zR!vD_Yj_Vd`eCdJ!$C|RW~tKU=;X%LtB)_IK< zLfL>4%G*Q(XPKsA>K!{J4%-U=#`iQX{~a=PL&1<(8Bfe2^X`D1REC1xKe5FOPFqUJ zT1TBoqW95VW1q84F%}x*#C4c1&MhCoZX-M4cW`M0MR(vnBf7Hkv)!2>g2Z?DFhPVK ztULu8ZHhD9U+U~G{b#Mb7|O5M?x0=Fibi$4LtiMt*TzMos31-hzI;HS_LqxjL*g() zSuxNZGU6AU-$avgRXj;-wq^MIf6)>C|FdNvSWVXOog)8O%bh0Nj#MYzU}8a49M0$<9uKP1}c4rPqC+&ry4bb$fRF%J!yOvzp}qgjEG2tv=n@al8CXV6uX!oJnn z_ieyFe@x@8k{6Ba<0e~fS&FBTPfF<#1SO_wo!({h0-aDzafkMa1^I)&@lnlzmb1Cx+{t+rr<3_>BU>Oi_-@ z@_RFeeB(tpyrrGm&v(_OZuIS$x~@JU>u=|^$Tw_iQ(IJ*$;U<$Bwz<566;s#q>VsQ zg6&1%W+#x?_w_$0^4j-sN2J1JILAPq#$QD&NDKG8#o9Es{5mlVn)jqCnist*|6=Q9 z7CJ4A-nxR0NgU`sV14&)p5N)MXLX3g2((D6{zQosjfg#(ZwSe;UtS<8|GJoEHZdbP^k^xk}rYg6w$75o(tlAKcHJ_vET)XWYMsoaLnLdP}<_{P15YOwvf|$SXq*jWqjI5Zt5uu`2tAI zSBUaa*g{|)$RwNdwZaS*Tu6T-Oy6(%rIg&EKBA3N6(y=1=CM+)G_5D%HwJVi1VjH+ za2q?UB%o;G~YgTjec$+EXDUsL{mj-jC3E;2(kuH?K2S!4sqYl z^Ak!=rEHmEG3Tg!F3w|U^S&)RQwO8Gtf&%)asC&D&r6N7v_?@L2B>n8K-Lkr(*+o3 zN1WidOK{f^RyLh%G=kfEEoAh!f4F9G;Mt6H;m+9*&g%4~XHo49`V7i$6mv{=+>>a_9M)%ojpHG` z9dXJPp^)jKO94NAZq~0z(2ycuajB5QGbS%y%>65EKQ@a`#3d)fsmvh_A$=ILf@&;sjh)HD`W00MK3-u7pHQ!;=|0zu)plTM9I7$dMWF49ZVWwr=M+`w( z1s!wq=;0Jm6<{S!K5>X==1w{Pk#X{EzW7P1w&A#2CY1Gs7IUlT&lZ~LRmj)&cd}2$2HuZbM$W7b*(tkmYd}mwsd!7 z6ppCQEnIx}?;<515o07+VJ&;8s+GOKi8t91Ryf6Bt zZntN87b@-A6_IShW5(OB_qR3}n;d>EKRUT~kNy@tF@75pOm zO6X^FY);;#|5)(Rd=Z7^O7@_Axuow>t&m9sVK9L5H_+XgXkwSA6Rpz5`57La0q+_B zCrNSHeargHS$@+qj14HdA);95J0bm;hd{8r`9s7IQ)<;pJ8F~-PdU7tVt@ON@|Ut7 z1(so0_$hMH;~l!1*WdRf@i1zKV8G1Z&;$~7;^6L2xR-RYgydFvO|}jt(E_WC3qp{F zrYENu%C#ffbE1q7|Jf+Lu&j@lrye~=s`sUQW=Tm1kv={`Px>Xn>X41BtRKG>n2euy zZV7&t`x%Q1n%g=BhHP2vPKKnKEPXoXD%Qn2U}j4O#i{%w<`V}ia-v@w{ue5RX=u5p z`L4`Tl46%;Malq>!;sJXbZlkm{KfH4bi2|?Buwq1CHsuPCc9p4P6ypZdN<7pfVH{_ zBm0-Q_Y$UWHRqgk#6Y0mGd# zuKU2>N9+PficKUtM#3XAULWAr?!Fm{U*bvdimsN)mCYE*=?tez6NPAYAthpi!OIZ^ zN+tKuR7cV02!Ehh>XoXr5t*dn%=gdDqewe7C8!(ZQw=e*a;AwXH>Y4@s_&Sb;P~BT zPE%Fkp;k^78!gM1G()4tems=B%rv;rzEHNQO6Mvk<-G^2MY#uOLHQKO)}z!nDcZ<{ zTQz=wO)O?Dd5L_w(!SKBarL2flcDZxd!Bl@e@C0O_lh6)QwpTGx$qsA`Eo@gMzF3)eBmQ=vHzsO9uv2lQv|n zd3)w9a}d}%vAO>BBVn0+f~Jq>5aI+p57ya1T^E`SawS~8aS-I{NVbfXI{~&@#Nq=} zaS~0?ne*7fvwIw^645__lOfvSaYw2Mz6wR3tY8X3<_eY}iJ*f;g_ts_q!?bI=G;Hw zw*hjqx0CzE$YVHNryMv%$aKlICX@fXHu!b%g3w_?tu2ZCZuOA%>|f`XbXXMmBOV&q zGA@iHpndJ8n6W7n$R$hyU8)@;B8*ZVh*MET3TKu-i~`}7jT`cGY5W_6wq^W2Xcb`d z-2go3tlg+4S_Emzoj;e>NCI27EXtN>p+_Q^A1~=w9*$US#`e+t(IKXo3uQ|gLT9{^ zvnK%5McjOekTxc5WFOjiklOXZW=7mYFTPDag>?on2a0@Ya~fmBVn;L%;8A2X_U)I>I*-mFL}qO#4e*$ z@d}!ufxCQicUY3l&~%PeeEY6F7zan5>)Cl%x5wA4bKjJR5)Ua|2)@Xq0SMOYgGwaX zm;vdgP~ZYPvm`1aGSR9~M@uY6WZJwa4!a)*pIfIsFiHd`M-;R@XCk_Dv%Z|h`8h(L zv`^DJE6{}}qc;IfC`UPmNA5}P{T+qJ#GfQ@Zioo*52eeMlg9F?=)6D@2J~U%P+Oe* z%~^4`ic8Zhvy`TiXqgvjCxEYowR~CcHX-$T&k_xacWhNsV=Gdc`MnRt-6GTkRkW5J zc*0Vp&X2NK@FZHaR=w#jio@whumDfmd}il#$J8B!fLy6w3!K?k(uLK^ryGAoHuE~c z4e(tzDVKmgd-_rM?)8Cz>k*M|x%dE9$ORwkJfIc`JN4awl3A&GUx0f$of$c@P+^i3 zU9i{IIAWY-^Wi?}deu6t|pA@a*vRvz@9JB~;ZM%r$U@eJ}-Qg|DJwPGPvu-V#m;hskp z(int3J|-Q%E|G}`{U<62Zo?Av$V~$P$JpUIHC#!VpPE4 zg$JkX?VJ9$@s;qDJE|>o&o8fwmgM7Dx^uA2&U#SSNGq~2G0gkuf7H&JAAOky{+6bV z{F;V??=6d+X0{@i`Pt`<6M689o3_$nE*<5lV@p_OdKP>`TX^eAv?jK5==JZVFs-?0 zH(>@VlG`Rl$=o6sZN<%IUYgV$l|lPkcm}Q;bg2|x#daRx#tlyJNhg*RI^ zN+=9D50k8Sd?`wtxyql)(Bg>f^7ui(Jk&XZ9Ui38@f~R>VC~9F zrxQNf^3fd~Q3VLy%2LFL7AHk7={GR?4W&ohjJ1YNnURY=?*&Lmn}uW4PLJoEN*X++ zkFuqOQKB|tx0Wr;{;k=3p5u@;~Wji6uQpq|bEvB;SZQ#$X zZnwpC@p2|DcQW53LtTBX~Rg7oYl-)Ws8pO(6Jo0o=^KVNN=7LH-{)L1fAsD z_nR=^OU>1}=oF$1`wPmu;A~2Hch)$!(CMchty!hPAg`~&roR_1GhCrA)sh1xEvQ<6 z$lU6#w0!mNZ5r#Mzt|~89k1)Bg;*x`W&L1w2|qK~l|>C)E2y;9grGd;KJte$2TzA! zS7UN%(?mBss>(z&0rU@;c(Vf(VN(0dodv_d7peV+V^+{7aC;2S<|;D{ysrwEN#^tu zDC$iX6-uOzE%D0(#Ut6+mdi5vSkjr5i#|<7#)~-C&0H!4qhjrynKqJ14YRNv`Z8mz zsb#3>=AkFA+~I6GN@+JEf)xI5N%|UDgkVz>zgg}gs?qck*<<%9dC7NIo?z0XgYtev zutd#a4JZ->QvyTRU~}Q`%>UfmGr}QVUkA<+7vTD~)!>wK$&uRHD?WMOJ>Kr zP1FqOitTak*rs#oZziPgmX}tF7&{0{FoYlAJ>YN9tbQ;&D32YwM_UFb$6Ug~S%7W6 zmxgOMdPW_qpWC{uY4iCKh9=G6>U#^5KH=^aON^+zr&L!0kscZ?PBMlM5>73OmSUxJ zSlJ*&CnDC~AMzlpG+jxBYu{#iFg0K0*Ur~eNRYILSLtp#k3%L5*xRB5m%zY5PJ>FY z<|EwHHiA6#EaV-Tldn|ZE==VrL4s1tl%RuB;jbxhf`d51oD|B7PNh*Azf!8qS1wEw zp@UgIiCcBox)f(Dj}gp%5x-_h8HSdz}&t=&ktA zUu+)q1t$rFmK@=Ad*jWrvEDsaxzCShd_CSTcNcfLxqi=n|Gq8%w_zTYfbpk?V>GLy{{R#rIZ@?)*&v&$9`H~l z+>zws1O;m_jrY4dxN(Ar=XQx;`$S*S*rhx~2y=4(JH-E4flFC0+_FsJ+cvatD#0VtT(i$kt)1TuSPV340qBt z)*`Pa^>I{Qxa!Hq8aV+&n@rLoDATX$CQCyJtn+v7S*Z3D&|kCQraFj+>30**?TYw@ z>XgssK-nLk|9jHL3;=kq%kU@TdCioUlcIgXj%3^wNN_|rdMFPUPv3JnGa zaOFen&bPgMSFRSRyMRre4-tBJPn!qggW2-J=JvfYH>{i2rtrFhsoA-l2e_t6wr~!w zLBQwgqG@@41IR4(s(wFXV;TXO{~V0?C0?!6(8m?o<{q>($n5lkjudPJs#G?6q7G+uyh=xKZe_4XJJ z6K0K^H7YN9oQZbdTcsJsa-(<^n#<%neFV)m!%@{t@B$FxgElFwoa)#-PrG`uLo7p& z{S;4M_`&uZW$?YE4kaXVy?G8}7I_y5w1W88@C>vA_l8-LrYlbgQ$ML5`8~FXk9790 z^x1NB@!(m)=zcmYOac35lgz!+vYGU=^3eSO(Se4(NdaHMG?U>s_hS2VMUH+3D%^Ld z%2AP9YZ_CNb#cn;`vm$JQ758esl#~xBN~nm!jPdu=Qjqe5mrTV&&zp_APzd>(06a7 z9d;MT5((vDU&1o>ws{oiF;vdljWSe>Eq+t6jgHq^2A4Q*rrHCXSvhh;9J>O&iPuT*y< zT`it&rQ_p{Z*cPzQf-0~$l93gZU>ExtrYInS5MR5))YI~j5WYvOP)hFz$Xq+Qf83X zMWM-4a)Wwx@kUK%wg^pCRJ*XufoApE4QKr>`w|duzcV;!aq<7!!SCTsVo-Zhl++X&l z3gr;nmRo@-JE4s_oeb5p5dV~J=3#(p*~DX141l zkpXs6SgOpRn`DY>V=%ix59gG(OFndawg;C|`*IPvOv6ST7@X?I(CcPdqGd-=squ6G z=`qS5cc{o|A*L+XZt(9{p^UPG?nS;&1J=-?bWEfT=wGbmr09z9^-+wGnN^7|WF-+$ zL+0xb_jh)K0i?gejrTe0oc+r81-rL>E%y7)T2jXh1xzc>pXS3_oLyllc%?9=^`f8G z0-H(n0y<|3UPv<Shj6O-z1(C;@d&`uE055o2V2ss!g{T^lyW|*g=&Zhj8a6| zw-m?!atCI@n@EkU+t|o=Oypw2byy9YC|=cFbzSuM*8WMMdK{6hx6rf@vCF))e%*0> zL8&yvY*sgEfAgHauuk{^R^gQC{?y$oT%9&qP=bARXpr4>r;xeoRRty4Dd!Zs(UKXo zfMVDA5V-zOw!}45QpX%%hcG>1w&wi`moS1Pi>yY?bzus#m(vvQhanjT$Li&!u}-h2t=E#Dk>|g+DU`Q*}jkR^#F?z_W2eq!GLmeR_Pw>Y;+B zD|rR4(KcAdYEDE;$3}M1rP|i?55h#wYLS5vMcZmKQPUWB$UFzvor=}2Z|mk<+Qm&&w|afS+aJn47^Zu8}V)Dh+j^`VRp2~8*9)4N$Y5f zFYPAV?&t1Qz;ZTrY~{N5o;=GY+ZI3A&ZphazIc8rJn)A)Oa#h6gW0OZL4(G5+a=Fr zffc+Eo|UG|z0D*14*KmpKD?Nb=(>44pqk4koMhcz)-vX`LSJ?S(T9A*ZSS{sKr2?Z zlap3o@s7)t*njKjRdq;qF20M_riFokzUx^2$pdwA_OLc_`X~8wNZZD4R}|?}xBRQW zVKH{-%Aa@}O?*AaSw^ly`9f_3Yj>D#lvT}q>1#9nVdC-@C1t2i(5-*?ZStJEk*Tw> zMJ$(9qRJcbj%a+o-g5o80;NQNHFo|${o`Zo3|^&}{KNTjL<1*g$ws{__ZQzMJ>|F! zp05}5pdxTs6ILaiNqfgq6OlVKYH!<^ssT(dbp!W}6#HbVX8pL3pRRr($scK*E30sV zV#{FrOfaIK;kmsvYTZAjRDH2|#2l1up;IbI`Y(=5G7dhS;3{n4))14T=VoL~64?Yt z|MGB1pzUkK(~RTxEj9cmf*J5}(V1F$_a_3reT#z=Tac-_%6!ijZ%GN=A}*54j>6E; z#E^`uUO*WII~BLH(_1*dS{&Tkw)XjJto&Z+{ba;$RJLl2X6%qcMQRUOLTskU2VF^Z zscNE19_yQ9=C{`j%NM?D^N<-%26goCTLKJjTLQkh+&O{Nx^0;WcIN!?oR?CyzL#f% z<*OD{$4eBHxy5;i#CJ3$i+`RlrYlhATz0#s)tT-(&gyXDsQ%Ogi5X6ZZ^-cIQ$xzx zj1#K6M%c`LgI6=>0KVgj_^3Q;Z!txrJY~7vL{9wS65?sO`7)RU|IF;iP%Wppy}oW= z88WWc(+~6Qog+&G5@IgbF`}_i@S=V4m^BN4$zBxDw6rLFKII_y!#00;FQ1JXJ%PBv zrCPDG<3@O8YI`q>bOLnq={zcY1a-_|*>6WtQ$iQ_v1VF+mY&xqSohQYaz`7G6`YM0JkA$2N!B+>j00mWzD)KyZszx)gZWlk zAvUPQe9QPhitp@Er$;P%nV2rDw*4V$Se)N}kr%895uH}xsOR^1o31w@ znM1#!)PXaoPNb5%gBxfgH_f-AI8Z>_gf%#v+96u|=yRF&YU;RQ*`*PFrHj}c@!(4-G~9X?D7rsX^*rWt^CLA)SX6)O$e z&&x=|d0XJo5<*k2^PQqtc4i#z+R}>$;k}kYwsyXDXPgL%!SlBSwl_WFnNUfizrMc- z=?=FfAa9GFX7h**5)xmHgTuz|@)j{YmQUpZmhtbO zQG0f2rJveO<;apkiF2m;MQ1}&<1_N;03Ek=YMUNGkH$~U;{H>h`MrA4xcT~Do_X=R zwW>Fd4%ytH?P4vv;mnR0XI=N^lDBL;J&}bb89_yD#j5mrcU5qK3G0;nr}HEC^)bgr zi~K~d-oiaT!n`XIEBFNQh`$p9z5R8H#v%CSRAscy&BUsN-?r{lTzt z`W$QKfYy~lAnNYTnSmgSar`;$+<%J=2LuFEBS>$9DwB-%CW(hJK#J(rrH%~HIloX2 zZs_mQuye97;c1UgjtEv&Aq~zV5JzVKA#CmfHQ2II%&=qJ6=MfMcjSRKLj?t#xs#$g z36w;Ijj)4<5dEd}=WI}kOY(&u#-!IAV+|~Vn|vis1#BUp=*R}Cqqik_i=eBUZQmy2 zG1~GEM_M!H&kjl~e(ZRmJU(h$*Rh8)pmn`ZJq=iw|S}mlI9rJYvb5B)SH3nwz1jz%tUoRr-s;QRn z!)R*uy^D61HRDn#Vqbl`XauwU18jdSSExCW#+G$OIt?5opfQ!IHPSR4m9}htRb4U8 z^Rh#Vaf5jS%B$~^AN~ZQc~6^FWxUEs%C*CSl$itqk~bkBl3dURb$%V=vbP1g0DF}z zoEpnqYpK(tP*S7X4C)Kqa$~|GSyD$m!C?$-I@R-g2a61V{|4BEHrD@yTvktu#-<*P zIywcHv83L7jLEyW5aLd-pfaNSsV+>15qTTdhz|kV;Z2KifSmX%5L|~B^ZOh_YCR7W z$3HzK90=I;3+<{DF^_Y<& z9{YPiG+1Qn)Z@eiWY+!c{r{lWxBx9%(PdpDnE-sfeECavy^$tj!z-BJhGDOE(&Gbk zn1jeCJFVGXDztGl0u}52GR6Gav@xw^jHb)QTsDPNH!P)D5+*)`KMdUF@H3cf<%w>X z+6KuMab78pO=|ex@*{t^Wy%F%D`yxnl;XLO>+>s%t&74?nFb<6< zN^cM$=@s0wWZmTqYGF;F6N$&}bWDEy`EmtG$uq#ep^d-@L`2>%3ESYoA@aZ(%gA8LCJZzsfB@xdgU?U`VxfxRZn;3Ivyctw&V=WB9zjLlc8#b9VE- z7mq#OMKb-mgYPa3S?L~H=8f~RS$zOXbHk^7chTl;=8I8z{ZMmsBV||eSyI#9K@O{z z?DgwkQ#C*v1RxGfrGnT72lb9~#txtMA}$1YeQl?A=kxw}w5D?QtYPZD;&`<7^t)>B zD4+Q(tc|MUyFP`!t$|Yq#(A_s9)zX5?PzSi-8ycsAIDQZO#~+?n>xMB@cEc#xX&>= z5uLew8^m9SKfmW5p}%^2yzjrk6LmYvt}UBo87$Lv4H)NYHAxC`NMhAyfvYzQAYu0| zMAaM&64euCd?EPn>SMU?+B4t3
-A}X~1jEl?-98HXsoEPCBpsS~ZICx@~yhPb$A41%<={Ejv?wamzo6u1{HFksV$L9v-@wONT|m+-a;@Spub2!K=}W$TJ{#M zcFxKMhTowi-T&YJcjstb-Nvqv9qH3|{sVHzO;S9di+n8NP(-WTt)iv7dxkb`h_i1r zjdkwy^Nq&{N+R)c)3voU@s)JjfctLp`DP_?EYv1PBUW2Q5JPFL^Y||Yond;IAy4yH zn;wVIS_Lx|O3D~O?&3%7Ibx%pUwYE6!l~C|+zqq80^I=!m0l2@S%hwBFA=5>|| zX2U*sPm$t*v{Bg~TNTRb1k1tpX{Oe#vtZk4VFNadVUKqnQOAH*^ixEUh2<8pV-X0@GXHpEB8e98}G>iyG1Z^mEIVR%>qy-S4s+NMQ2(>R;Bxyk4g8UCI; z>J_8zB@oKR>Wiq7`^PB1quuXN<@13?2W*M> z3)tfuJO?8Cx-5MzBQ{~eSIR;QSh=cKTCytKo&=?cJ4s_Cg# z5SI{GlfsG`PSo1KNX9PF2cP2@Yz5?p;YKEMGK6+M^f^ip4EkPh+)|80m?pp$adl5TyTp(0^^8H;l_}hW zIkxHVL2PJmX7U1r+tPC)@?1)j$xqmYt!6E=BW%@+8QQ1I!As7Fw@x!9GfP#d0#xnL z9#bpzcR2dFPq}=#JDfu%lCP+1RQtOypHmA(2&!`|{71BAzUo;U*&M}Wh@%o0?ndZX z|Ki49mr$)e(NwFg6UlO3VR~uxM87!9$guH(dE;I55eVmFzQ(Y@5sh5Y93EQ~_GvtH z^_@ZL+95xOnooc|WnSHQuReGgI0f6h#YJ7r?#~qiW9UWwKz-DQZqO(k9vb6AZv2m3 z_y2u2JXY5%iQ>C3eePT0{>naMt-{^8s?ib=iJ*w_e}Dfyl$oSkBoG8)k|O z7Nw-%=B?M4%UAI;nBQW+Y94kP=6MpWi=x+8BikxHcTp7tv`@u02Hd)tci*7Qd;+4t zbhoc9oK+A(IJ0m)w3&nql;_?6Pb5$P>n-W>-nULp!(C|^gMzxxL65dD$?3m+eD=KyBg{VGm=ub-nbA_61CjEn4$QK|@O)2k>?O zhx7-8)CCr9()lJX_USjMLoxyDTKqMf6W1ouT9o`^{ngNi6|(J&Gn z2$M`IKmE`mL0_Z^Ge*@H2_8^ramRN2A|VojDgv&qk@a=@!%P|tFKOQ#&3lCv+%Vie z39XegL+XGE1xM?oF8ngNt4kU>W0y?d<~k;?AM&8amIH^F7fsn!&2zLRDZarF2gzL; zOX{l$?{&qa^@Y2s4O71LjYz%UQ}372tJHFFO;g!hEQO-vmL7`5e@+><)95{-e8GTA zAwBDKQ5=7<<9;GAqIm(wB31TSiMmr5ch)F!V;@Wmp5!p(PLYO>KcyE{i zF0MCk(r?ZR0~i}f4}wi=P$bmTnB@Y(B-r;nghzh>0PHCO21N6sR;v(zS=LxT(;XDJ9)-8bMoAh_TgB{FwtkM9hLF zdV?}`adf0{t(23{NAFv)r7ai4F+!||tP*RHuuZIaYNl{1)JPbLE~U2Wg3p?0Gq=p! z>(Hb)Oq_FL3@zclkCL~g)+o;34X$^5Wh1wE+SE_EIWBRcWN9Qu%HhR-0KaQ=IOe3S z&(Bbh2MaAjNr{`u2YWTDhd3fh{p$IU_<05`z}fetnpHrO>0jYwMoPRB3U6^USzYt z8V9Ww$j!5JEumDy(3w!1WJcM=b}OIC9i4r8=DW(Im3U$j@op^!8%bhx)Qf3gMTVGYMTI8+GMKUV8K)k+LGhL}K^!Zpm z6a_UgRY(GpzbRd=LLWJoVMl*}CW~~%YO8JY?G{rLSjU^>Xixrky((fVKwe48tb=tg zt)?5Rf=bAa6$o|p`HM6+tO;7%#>@b*{{(w zrhv&$d&T3VGGy-h&Afy85+hvC`J0Fo=N;(=+T6M7L7{hf+V9_>IGI zz5VcwIU=@D|7o@EV8vyx8(EIhPOG;{bIV#RrPUf=kb;C>9ULinF1*JFXP z_Jv=5>32 zqQyYw>Mdt=5X$0+8AlXl%f>cs&(PAqhJFw5wKxh9yj$vXRo0OA4noe5rs!mYoD53A zF(_p6p>h7sL%AJ+#R?mXQ+df@@@Zr9M&fEzbC*AwC;Ab%jJ;Tc%z#tWm=8;A2k-m` zPkJ=+XLlRo9d6dt!PpgR+fi%VM~>8G(WiC@^2e}#jkt zsaN^-xIrfPSvl}=`cPL3&Oi9^v zeA48t=Jqas>}KfVWlj_Itlm0WyftNuiB4+>=7cb>a|<(dHOjMhu1r6qVQ;4FU0I$E zT?6pmg_D~jkm*R>4_y}zA7{0iqC~$?hZjpVA*kK-399(-#QAO$tFUf2b0^WaE4Pe_ zELbaRwcB)i$_-Y`{`kSQ zU$*9Ndi1zmQ&-)p1K3VcFs}-!*a|_k}ddoScki?Ys zanFUR>)EOv`l&q-Xvxv_aLLJ44~&q_+vCdsJA!DH5SW)m%8iOxx#5w8)bR?=MPK-J zhVxci%@boHz10>E>=(sr{YIE^FA*m(3QWiF;qn1u-A7s2gZm@#{%1rtA}mrK=Xx#j z0H=^hiX1QnXvt=^Omx}cxt1eTFYe8lLohcp9^?UJYPGLR!wD)+*Ao~`-zRL&^8??a zrwTCWln0O>jGV4H>n>ZaPlV1witw2%sK8Ry9HU*lH=H+pa|!2ouH>fSmZf}QN&>%a ztPh>u2QhOR=G`MK?y!OsVbzT${y1+dO{7aHYJxK*N8;rEmH*us_hSI$gjWKo)WoOf zYfUY8X_NyoTOiy%gHryPEQ!h9nk_8iUh?`HS%4sBiq z0*hPCUaIqBsJlLK%$8!zhnq>=`U-P{Kxi_9V*{y>mNR(v7+~f-oVgx^ZPD!6Y!~~3 zGnzGkeSI@FlBsd$t}V?r>MkKpPlO+?b;8O{&Jf;t^YiBzPi_D0Plo4b>vAqg1#_Hb zh~cK)VwwrD{M4MIOo6m_98BaW!9`0XvQqd)up#YGAP_-%OCp2xcYGlX{c?dvM}x`? z68d?8J_hQIi!>Mt@s=yDnjY3XZAPE6)Zt3ovor+g`M1dj6w0#Z37l`cxhK;!JQl>9 zdGJ>x96`TAms%?8-<<|-`ZHJ?E4W>0a|czb*>@W*Ri}hJ>$H6IOt%&L;5**Ttpi=! z8yH)+dP9T6X{A0P5>XN`#>Lc{!w*|IS^DFF0bkfT{cy`Hbgx--?J{C{l~=^sp+~~~ z?Cg`+B`z?1)_h+=B(i9(#B7ENTDOqpx_%iT29Xw1V46yPcjEr>1+v4s~JqP-LBQV+edTTU_(_oqkdpbftSAcsgR5k`YjG7=kUL2ZxsXL$@+TH82DTi#$q$KVET+XPBNl$dO}T5 z|AZJJQ5@%ckXN9mGnPygJKBYVis7n{JiJ^adxC$!!7^Cc^>Pn_KJ!!zz=CyQ4=be1M#7n?V#u&s3{?LmDE*ezI|Q9$ii+#Bf|xi;4FI}{#cPc5Ux_GEk52Cy&gEY=EceX05xh3F2h9h zjw=R2PKq;*)IL$9K+VzzACTz!*5UA7V1qm0Qz zRxKLK56-&M7lyEjDhpC&?D_n%j|O_BfkOblN-L+X)Q>taQ+Bxw-$4j%qk|&lA`KjW z1BvtZk}_6z)}dh`aL-gTcu$Wz0Vf zT@xoKQ>WkaY}e3TGT7gNkI zls%HxY)wjR&^;f#Qzw?~^%AfZ;+u6_Td8LtKe`SsH z{FOD%rUg{c_<+uMy1ek6r^GT_woEn$&e8FZL8xR*qxEIP<6TWdOND_d`S_+D!{m1e zR7jjOgdDF1_m?v;Nf>Z3yEo?RqaZCO&z29v%yC>KF>={dq=8;Vu~(rKj+=hM1HD;} z{)re=tYdGz2JQHI_vXP_uO8~Kl^K71m1k_1!zMI zpcTqrTKVmxvHLr%;O{d(JJnC!_(pWWbpPe@@;;TfZCH61ILL(%ysmNuESO-?M@wsg z=Jw^#0<=qSFynOM{jts#VLEEILupnlIMk;t?2$D8r60B5yaS<|y1E!moRxgt+O(80 zuo-r87BmS&@~vkl3$~pkMP_CK6}aS5*~?(x=D+iobzyNj5-$t+l+zsx{3DcdJj~zP zQW9~#VPfoMvD~68D~CBEisPwJewA07>!bUrc$vWi+Mw$jJW@DCSrEROyiu;q?~-Fz zhX{a%{`Y`_7rt6+1zaKh-9i3s+x^u&{N;;4yyYYWs9^TP_U4P?I-p7se+GG2e_91^nb7 zVs)ey8n6Xdb_tXF3btR&=N)pgYM3LE%pZ}mMm>qh2>sKuAt>kJDxycOXbP+4b#{i? z|FQ9&ILltsfGZJz#sl1-f3d&*o=tu={kQ$`?~&0Z+51;AKtRKHEoY7dgxOYn2#-|e zA)yXx;ZpRwr4b*l^D(MI#3Gn`UflTX-nr!!@Ukw8?5n*7&+`gp(vt@ zpi>lurG`oMe`wjl5q+;I9xpG}%LkLP%+iY$DDQ5q>FUTxS-*^Ki)_NEqAo5@8_&ErKboGo4 zet9u}9ts;6npm3Hn^;-?92vc;ngD+$s=JQK3+QHANEcXObg1ni)i;y6ZV^Lai}g$6LPp-GXH}f8Yfg06`#tIN<8M`xB^Vu}2%6#1{AiLE!d5!w z*+1-ZeM3};)_~!1L{M%aAnSNxGhN7Gu~SsG7if@X0ENf3Cc9cF3pMc<$kdHSIUezc zNnDid3-ZeP2Hva?8{3>cd&U*mZH;K%?fd15LD}k@s6kV8(vL<%){m1*p_U9is9Y^) zV%7N^-6^#&p#B_Y2*=uH!qgBxO=*6k&$q#j!6;0B#iItZFCLA%N zQiMbPDoTA!TMSN}({Zj?_)NMX*qdtSh{3S{tsw8xNW>I^T~RgkMwKQIn1(~*vUehf zX2=HDzyz~J7I+$4!<`cnjoA5oiQSfr7q7#}Mcc5`C)-bXN2|V@UsfN;yu!cC?+&ex zJ;WG4??Y|k1W#y;AN!b568nG@ZhSjbTdHx?QjGw+Bh+AL!LMCS9`IUx+|#I|=C zszAK!A^xzan=}%Tk#=mX{d6nr%8WQ`a8NT}MO(V`QLkDvcpZ9-wM`2+bm1ouxmw;r zL4u9A)#&CO2F9;h*MM+2CY1+f?+nXZ>R>=j%X_dDQv((H*O;iHoCBF^(K^v~6hs2^ z%t&H3b1(9*(&t)kv?ySb62aRwBKs1h4L$>+Gwa|TG8TinYF8!tq``yExPo{$>7o~^ z9qL`&Pu%6kc zsQz%ba4u&UM`eyEFIl^nrgEB6Lf^-*lOG@IgE1eRfm&eUg<|*A;YC*I{V?;~gWtx& zPorjaRe?Wx9Oa8lQ7PiCVmN_4Q3%};hRKH_$_g19&m)gB^MxRzB0xaZW5w>?N|arR z?+*o@jEPw^x6DvMXnw{foiSk1{>(WZ6;Si#`&c&C^_gva!L1>F0M@f3=LO;8Hp7p6 zX1PSjyYox2cA-|Tgqo{mSS z>Q2|Tj3_kPY;t4^MK~Is@Z6-@(XYAQ{MfjhOBlO6aeuhI)lTz?Nzm#xlb;@h;T-CJ1>#?AHE*sI( zFrV?^Lwb8#)8hGX&#~nKuLZu#&erwa#+^x(%~fmDQ{_YLV(UBCc1B6Rlz^p& zfx~mWcDM8k{G|wj(^ED+^HX>Cg?H_tRu`>zt`FTeRS)(rwcM>sJu9&dUmD$>Zzkl{ zmMsXXp4c(FHQ zCETW*sS)YIJw3I=h<9s5a)A8l<@nmdkE}>yw6ssb2`0?zU_FC(gOO^Ig zSq~L=v%7Dn&%t7v?me>4H>WSpA67FT4KW~7Vi9;r@!&krZea^?-F+hYMUQyOJ7F;Z6E zDI=qq997Sx?Pz&d>geU+@lvreb9`wzL&IF+MZO<)T{^Vu>Pp!!aJWCM+Z01!uySy3 zLBDeFj$&ug$f|Axx43y`@l6*sN*t`{Vx@k;;l=op$%v%=S3pFEdxPe2Ds;boMf5mt zLV}^i4_`FdHB*CGMCIz$FFlU)VU9gEF+0-57X`9vMl1WDm*XvKaRgzgFbUFRbYDt` zf#dCr>l_7)w%TBdA9ZWW?Z4oRH%^Npkcps#U4BK@N!G**NCX*(O{kvMy?9&N@VOhi zz^cwhSKc>$R7Y}73T13PwnpJ$TX0t}PKm*+#B5))ac<~M`mIr$9i#qC?+Ht+2zW{( zJB_QUacNac*ocvN?attxc~A%$qQ>_yN1rjulEOF{{U+S)JazWZ-7p1KHFK%lM)q$Q zzxNbTCy*_4Ia>%#=IW&_W6m2{*bk-d<(kN8or{8>`mjnCFg}MlalMItR7<~3He+^i zaVP;O2WAz2GEvB$3cE)#`fHl33CDt!5k~PRblI8sC>{$5-w^>aNc4x>UW;rat z_0!4X=?-JfqLoLQ1h{L!rUI1o9uY-1(r%svTag39t#uYaVWM~$1T#Z z+0C{a+f)Qv8dgR53RjYji?0LjDmJfO5$5W=7}Qn_)Mb)HElu&gAI9Jw3cr zag9PnJGBj{h;$qYv)*GTAsuemG+rsL;<{>fLiW94c%3-WR?6c_HgaN*77~s`nm~FRD%=Yg!nQ2&~?q&kKe* z?>*ZWjZF>Z;Lo+pKH2+ppi#cN!|YmH%xlb?{2tJuLP7yju^f3*>04^f& zz4E&01byA-4J(l>$%S2Lbu`mwl&ckF16Zo><@-2=%uqR)ZkrOeR1rzoR~EI~31ALc zLK=}p6QXP5Zx(^gh=himNxGU_BO&xC?xBxm)nU?#NMFn{tt2rV(WvO{-Lv%wvDufd42SR16(M2^zl`Zvju3fqzm`J>cGi+x{;J^!VSU&zH%*xFa5zO+P9-0`6a zS@b;b*P4bs*bD5}11ZD0;@`(nU8jmzk*t6@>vj^*`qqW?vn8yzR9OLVGdb&5*PtBg z3$igH8$x(*ETv*AxKo~fvvMU_YZr|5wk~Mm!0P!z)sN*!9|-p4t=Y+AN`yaV(7{|A zwLeALpG_&PK&mJRLp_@dnwjHZSTjMbecEtFdZ{?9rj(E}DRTrj6$zL>ZsG^+O0Dd$ zc-dUI9gMq_gfS2fqr>PE^x0G-Y@JZ8`pmfcjxVhEki&tNw1cTxQw$E>=Nr)?C%#&aMXBeR zqxfB=tfDY?n7;x&OQvQqwT(Dt!~&O2*^v=@mj2ZC{u_1=kY=%jp~9Pca5~@gOq^59 zs;JDj%|&d@aqt|#VL&x}pdO_w!nw*_8jFg1daFeu?F7~t`h)s`1HE#WZX#{_C!S?s zXR>SL2y5Y-B8>-}s z*7M*?kWUE|6)ve?@M(RAw4oVgLuSKUS|!)+L|Tdjc1Ok}7%bqMe}#>gP9^yj8Dy|_ zBzVvsaaJF0MffbcaR=lKG^Gt2pCj+BKN!j)5EnimtB9sdm%KiIvCun1HP5}ZLw83M zXEie@9)Bozn6?x%nLDlwfd#3h-me_rUFaL7{{}A|W}B3^i!GBvHDe z8@N+U>pJQ5`gQQ900vv9RfIQvKXK$S^xg`l(9ljoU8;Vq$;U8#9X)gJ{*@>)o z`YdpDa>U++C5hH^d12pM8}}94*{YYHbeC<10Ys@rb#_!W2S4&zI7Rd+gXZF9uPw+8 zI;48HwQmwi{Sw7aE*&S!1u@Svl_uV7rcck3WdGUO>1_~&3iW;Omd>}7Ug+dX zTp9+vtIGsuG2$-A%)(s&kt*PO)7GM_Xiyy5AxymwL1D6dKrUm>!2-pO_pPL8EHh7$ zGUt|SA>2R`A<;+dMx0HM(t^l#JQL)e=WaCe^ofM*hx6qH$x5WG*2+mXs9j*sFZNKI z0!g%@P7VqDv{M~WEhHs^5v4)kmD;$@kkt1g%PaQb^2%7lz>k?#wDJeNRAuH(NplO7Vnrmnc4*kh3CN@7EE!ZZU&1ZhProJYz#WM@ z+QFcyPzDR66r3lf!VWcmvTB;abTn3CP4+)fnzeJOPJz+TYt8Y@SLAgF8;Z-a3;5jC z<2jB|$)&h5xKkV3`R(>TKNNRisZzB33mfj?A;URV0cj_8L91|PGN!6`8>MFFGWrW% zh)D1k^L;C@#uCpM8+@5LUh5B%835m|2Y)%EH?qJN&I4*mss1h zI;fyP_#PM-bpSF^w|OKnyG_d4OQe$eS#;`*LBE=0Hq0<*zSC~z6OfG2I}!AUvE?u} zl=xNU?`oyck-(RS2k!eg#2|Biocx1r4mtBXr~Z98W?^K?iE_hO%FppOt>FXWqNBE^ccH*3#d|#CEw!i^d9kIJ+7c z`v476fC6&jYs0U${C(5-32$Y_nko?`zVQ7MB1gV*W;mvfmR_xXxkTN8{12V4hQwRj zx$n{IfNa)OXKP>{!(5~x=GVV=d0GofGtB2+eL-e#V$Xj60nsA_T}C=~xR@+)F3al3 z6M8vt4H>N?jK>C?&Ps#2NJW2I%OBQk(&=lb?d)5w)*_3^;ynzFr%Co%?sc3)Odk@{ z^baWmzS$a~ah;;BxQBwPY>Zw?qSjpIp=H;CFXlvK{HxRBjB}AHGLTP?q#S{n=mY0^ zQ>m}ef{E^q_jAgBvh4gjw4eiBqb|yQ1?D?t4wg{TOmxoR)$Zt z$7dXBSoOhGLL1V&#z`2T+Ri09S4on-&V!x6K1eZ`T7RjsT>p}Vl)cgW_2WK7H?dT< zoFcn!+0sKO0*W+)xSd;8?{MYDT|;9|57<%xW2Sj*v8oPA%2o8zxgG*a&)*`yIwBv{z?L4h7;2#E7S8CzTgt>3GL{ z_DG{+>J|1E+4LMj%mhnPDsOpH%%gT|G=hzy#lY+@BH05fgix4)(+{24R@l(61%@(8 zPYk8NtpwKTuRP1Yr?SzLYwehLT>yVxJl{kx8c&pB5cm$gAKkFR)Sl1`ho2#~Aa;x# z(Fo1ZG8f$wZX5~=YIv|7264t6@y#_^x>?Y*JCp2`As zT1PiZOP553UJ@+){UO3Dv`~a?*{!N=#TZR}C>3s{@Tkn`1_(&BcOSp7iA_gs#`EI# z^@7q#N!hT|$w!aRqN%cco%sNQLEmP^^fp#hCH?B?-OAKQ?~1T~WU5m1 z$g+_S#AbTdI1|C}&aKl0cG4s+6IQoJ9F0rrSQpfwJI2;g(9L6hi-$39uz;dxGMGEq z3oxc=x0Lgi5vF(MD>FWxGTK+@TeF)&&`zP|CGMn{jF(^%VCF)ncextigT^WMBu!3- z(aXQ{o+L}1xx-mO_`)nDCQMC*`384Px7HZS!J9C{j4_4vAhnJxPr(u+B=mQlE#udg zt-36e!#Von6W@+j_UUb5npgtqFE&`__IqiVl9=554>Jq8H2u|0u}^Z1I=s{B+^pe) zk%&S1HImF1E$b&&FRDI4mhTB~ICHp)8$dJeY*9%GM^?P7@^vGwJnrfVul=)wP4=9w zkPl#1k^=C(kpNYe^sV&dZLO>UTwE(#gTI2J0oD8duecJxsumihE!RVc>31sghyZs( zTk9|)sDaK`jidc(Gqj%@r*aRI*Kz15-d5tG6fXxJj^ds zp1Zsu@F;x_dn^d&-`N&y&xhGPVFmNmgw~@W!*4og!-ehnB^im2Q_LVeQ86_pKY9WLIm|^85*h20XtD&MK}`t&T!8_2 z9}xfSb-LEp|JUgNz3uP6)aWtU!@r8BzTd-!HZeMdbTrgA}LnF({PTghq?| zLOLhDz$iK}`h!{(&IB%=2d|z(l{SFJ=}YG?C{5lpNVuTZTuIftb#Z-LF&HUL5f%Xq zQw55OalaptT(H1T44GIZ(kVDAJSHt!%UTL2>x}EH*jLNeJ5U>4Rh;T~Xv}T~MIv|c zsMQ@$7%Pv@{zZt9V_f6D%mm9~AV6+JyQfn_n=Q=gD`JP@8g~kDe{gk58FdWZ6W$s=f+QZ+E@{p+wX-|ybc6Ya1!K8;tgrlf+>Y=#;ei-kLC#NIsA-e zY{?My9I7`a=i3D&?>Y0?OM#){^t&dHMyedOT9|o`!L@K$17zw7WxS(v*7jPh z3c14>?#8!^*>u5o3U(hdm+oiI>V90gMrZ7{PAbR^Rmhd{0UpDD+uH+&Bs^yTo!1B8 zR09-0{$t8?u(P+a_`m-9uigs;R2!#lw!{FKG$9`lMCVm#hg8k6$n)+fP`@gO*N3ct zNw}wBnhz~V)-&9%6_{HHLA0Eog$an`^6l;HQ*=B`&2}L*w^thm@6!qZV_Jj^ALZ|l z>;e~1?5DB#m6mx?atTD77!vSpPo!C7*`Zx|B``dp9;cbrFdY5B$6j2!x7c!$Tw5~&!Y%EU9 z&X8(XqvPC2Eb9eP65LZ0Chj63^&MN>6j-kKAU~Ooz>JEy=zJ)5IIwq2UX-~i3pP(0 z0+Wg;Vojsu4X%eeLoD&BNL`VtUK^{(l;ZIvg05ugwC&KaoSPs2!&jHv{_Bj`Dfwa6 z#yop#ks+w91-XEH98soZ!>C&}7>9SU_t49$A0ix|xbw6bVJHY-?ia=OOzW)<5U92? znXB>roR~P^<9r4VyYg*@r(;3F$pqKcL`YRxHq{cUI_M;oDCQ{0qO-_O>Mk9w6X1v_ z_asOd=n4C66sSrul~y50W5i+L6(c_Nu6??-@B{eE3<@>~*NvCbh?xF2M6<28;(Lz$d_?{)Y?smskDwiC2!~e^D$< zdl(RfPyL>K7QWDO2gnES;39)GLF|E|n^=S_Xk@7fKHf|*X8GX<=&p{|whS;3-&hH>oH!RCg^Avkmz5x-sOi-MP6{oAUIAk93>@*iO7Zla8D@rZgc7ug3x1w z-}-I7NktzO$yX4oeUB?)_5E|n3^iyodp;K@=)pF&{1dj+<%>B!gw;Xkb-nzchw}dZ zY3hyIZrYHot>Sv3=rDx4x{OM`&xyv9W8D|Pis{$n01QG42%7%q+UUQ>!mp!$sFW@* z^`8L$SxWg&0Av8K^OsJ4s_wPm>w>bsMMVJQs=rGf{;Ry~YvKPaxA|KX2uL454*&@N z`r-d7zWEyGb*07MNL6qCeG`B4k$y9gUZcED$oCs%lI|zU>-2oD0bVEi`3=y-_7mWD z+Mm~=uVdkVi%tPv2mnpky^f53jqo}?{5Qf2|4)S9T%-Sr7JrTPIvDde)~d))tbfyY z{v__aMtL1`_Zx*r{3pu4Sap8|;{9ZKyheDlC&d3oc6$x@ni=*RutMV};Ga+GkLm1Zr2lW8*lXC=EUVwJ z&6+=9|NYQ@VgC8({&8r(W-k2(Y|#2scfa6X@t9uU-D}>@Z?Jyt-;MrXSU|5qU$YQ? zgNEt;zd`@xB)mra&#mciJRqQ9JHV{;dZYSU{6F^?e~R-v{vrPNj^nlTe=hR=lzwvl wLt5|OOTO0tuiuQnA)sAf>*J4(^-qMWzk38YV7&