-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSingleTable.py
More file actions
136 lines (103 loc) · 4.12 KB
/
SingleTable.py
File metadata and controls
136 lines (103 loc) · 4.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# rewrite by numpy to achieve faster speed
# PWL - Lattice model is an 1-input model
# PWL-Lattice-Copula model is an 2-input model
# Query Phase:
## calculate_query_cardinality: numpy version
# Generation Phase:
## generate_by_row / generate_by_col
# no Plottings
import argparse
import os
import numpy as np
from tqdm import tqdm
from dataset import *
from generator import *
from model import *
from preprocessing import *
from util import *
np.random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="1-input", help="model type")
parser.add_argument("--dataset", type=str, default="wine3", help="Dataset.")
parser.add_argument("--query-size", type=int, default=10000, help="query size")
parser.add_argument("--min-conditions", type=int, default=1, help="min num of query conditions")
parser.add_argument("--max-conditions", type=int, default=2, help="max num of query conditions")
parser.add_argument("--cdf", type=str, default="res", help="joint cdf model, lattice or res")
parser.add_argument("--lattice-size", type=int, default=2, help="Lattice size for each column.")
parser.add_argument(
"--last-lattice-size", type=int, default=2, help="Lattice size for Joint CDF model."
)
parser.add_argument(
"--use-last-pwl", type=bool, default=False, help="whether use pwl layer after model output."
)
parser.add_argument(
"--boundary", type=bool, default=False, help="whether add boundary point to train set."
)
parser.add_argument(
"--unique-train", type=bool, default=False, help="whether make train set unique."
)
parser.add_argument("--pwl-n", type=int, default=1, help="Number of PWL layer for each column.")
parser.add_argument(
"--pwl-tanh", type=bool, default=False, help="whether add tanh activation layer after pwl."
)
parser.add_argument("--epochs", type=int, default=2000, help="Number of train epochs.")
parser.add_argument("--bs", type=int, default=1000, help="Batch size.")
parser.add_argument("--loss", type=str, default="MSE", help="Loss.")
parser.add_argument("--opt", type=str, default="adamax", help="Optimizer.")
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate")
try:
args = parser.parse_args()
except:
# args = parser.parse_args([])
args, unknown = parser.parse_known_args()
FilePath = (
f"{args.dataset}_{args.query_size}_{args.min_conditions}_{args.max_conditions}_{args.model}"
)
def make_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
resultsPath = f"results/{FilePath}"
modelPath = f"models/{FilePath}"
make_directory(resultsPath)
make_directory(modelPath)
OPS = {
">": np.greater,
"<": np.less,
">=": np.greater_equal,
"<=": np.less_equal,
"=": np.equal,
}
print("\nBegin Loading Data ...")
table, original_table_columns, sorted_table_columns, max_decimal_places = load_and_process_dataset(
args.dataset, resultsPath
)
table_size = table.shape
print(f"{args.dataset}.csv, shape: {table_size}")
print("Done.\n")
print("Begin Generating Queries Set ...")
rng = np.random.RandomState(42)
query_set = [generate_random_query(table, args, rng) for _ in tqdm(range(args.query_size))]
print("Done.\n")
print("Begin Intervalization ...")
unique_intervals = column_intervalization(query_set, table_size)
column_interval_number = count_column_unique_interval(unique_intervals)
print(f"{column_interval_number=}")
print("Done.\n")
print("Begin Building Train set and Model ...")
X, y, m, values = setup_train_set_and_model(
args, query_set, unique_intervals, modelPath, table_size
)
# m.show_all_attributes()
m.build_model(args.cdf)
print("Done.\n")
m.fit(X, y, args)
m.load()
Table_Generated = m.generate_table_by_row(values, batch_size=10000)
Q_error = calculate_Q_error(Table_Generated, query_set)
print_Q_error(Q_error, args, resultsPath)
print(f"\n Original table shape : {table_size}")
print(f"Generated table shape : {Table_Generated.shape}")
recovered_Table_Generated = recover_table_as_original(
Table_Generated, original_table_columns, sorted_table_columns, max_decimal_places
)
recovered_Table_Generated.to_csv(f"{resultsPath}/generated_table.csv", index=False, header=False)