-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathconfig.py
171 lines (150 loc) · 9.76 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import configargparse
import numpy as np
import os
def config_parser():
parser = configargparse.ArgumentParser()
# Experiment Setup
parser.add_argument('--config', is_config_file=True, default='configs/syn.txt',
help='config file path')
parser.add_argument("--exp_name", type=str, default=None,
help='Experiment name, used as folder name for the experiment. If left blank, a \
name will be auto generated based on the configuration settings.')
parser.add_argument("--log_dir", type=str, default=None,
help='start time')
parser.add_argument("--data_dir", type=str,
help='input data directory')
parser.add_argument("--raw_data_dir", type=str,
help='raw data directory')
parser.add_argument("--input_data_glob", type=str,
help='glob expression to find raw input files')
parser.add_argument("--split_file", type=str,
help='Path to read and write the data split file. Needs to end with ".npz"')
# Training Data Parameters
parser.add_argument("--sample_std_dev", action='append', type=float,
help='Standard deviations of gaussian samples. \
Used for displacing surface points to sample the distance field.')
parser.add_argument("--sample_ratio", action='append', type=float,
help='Ratio of standard deviations for samples used for training. \
Needs to have the same len as sample_std with floats between 0-1 \
and summing to 1.')
parser.add_argument("--bb_min", default=-0.5, type=float,
help='Training and testing shapes are normalized to be in a common bounding box.\
This value defines the min value in x,y and z for the bounding box.')
parser.add_argument("--bb_max", default=0.5, type=float,
help='Training and testing shapes are normalized to be in a common bounding box.\
This value defines the max value in x,y and z for the bounding box.')
parser.add_argument("--input_res", type=int, default=256,
help='Training and testing shapes are normalized to be in a common bounding box.\
This value defines the max value in x,y and z for the bounding box.')
parser.add_argument("--num_points", type=int, default=10000,
help='Number of points sampled from each ground truth shape.')
# Preprocessing - Multiprocessing
parser.add_argument("--num_chunks", type=int, default=1,
help='The preprocessing can be distributed over num_chunks multiple machines.\
For this the raw files are split into num_chunks chunks. \
Default is preprocessing on a single machine.')
parser.add_argument("--current_chunk", type=int, default=0,
help='Tells the script which chunk it should process. \
Value between 0 till num_chunks-1.')
parser.add_argument("--num_cpus", type=int, default=-1,
help='Number of cpu cores to use for running the script. \
Default is -1, that is, using all available cpus.')
parser.add_argument("--res", type=int, default=128)
# Creating a data test/train/validation split
parser.add_argument('--class_folders', type=str, default=None,
help='If set to None, the split is created by creating a random sample from all input files. '
'If not None, the split is created per class of objects. Objects of the same class need to '
'be in a common parent folder for this. Variable class_folder is interpreted as glob '
'pattern, suffix of data_dir - i.e. data_dir + class_folder, e.g. class_folder="/*/".')
parser_nval = parser.add_mutually_exclusive_group()
parser_nval.add_argument('--n_val', type=int,
help='Size of validation set.')
parser_nval.add_argument('--r_val', type=float, default=0.1,
help='Relative size of validation set.')
parser_ntest = parser.add_mutually_exclusive_group()
parser_ntest.add_argument('--n_test', type=int,
help='Size of test set.')
parser_ntest.add_argument('--r_test', type=float, default=0.2,
help='Relative size of test set.')
# Generation
parser.add_argument("--num_sample_points_generation", type=int, default=50000,
help='Number of point samples per object provided to the RangeUDF network during generation.\
Influences generation speed (larger batches result in faster generation) but also GPU \
memory usage (higher values need more memory). Tip: choose largest possible value on GPU.')
# Network
parser.add_argument("--label_mode", type=str, default='full',
help='loss join type')
parser.add_argument("--joint_mode", type=str, default='naive')
parser.add_argument("--in_dim", type=int, default=3,
help='Number of layers')
parser.add_argument("--concat", type=int, default=3,
help='Number of layers')
parser.add_argument("--rotate", type=int, default=1)
parser.add_argument("--num_layers", type=int, default=4,
help='Number of layers')
parser.add_argument("--sub_sampling_ratio", nargs='+', type=int)
parser.add_argument("--d_out", nargs='+', type=int,help="encoder dims")
parser.add_argument("--num_neighbors", type=int, default=8,
help='Number of neighbors for encoder')
parser.add_argument("--num_interp", type=int, default=8,
help='Number of neighbors for interpolation')
parser.add_argument("--dropout", action='store_true')
parser.add_argument("--fixed_input", action='store_true')
parser.add_argument("--fixed_random_seed", action='store_true')
parser.add_argument("--fixed_cudnn", action='store_true')
parser.add_argument("--rec_loss", type=int, default=1)
parser.add_argument("--sem_loss", type=str, default='ori')
parser.add_argument("--reg_term", type=str, default='on')
parser.add_argument("--reg_coef", type=float, default=0)
parser.add_argument("--sem_term", type=str, default='off')
parser.add_argument("--sem_coef", type=float, default=0)
parser.add_argument("--hidden_dim", type=int,default=512)
parser.add_argument("--rec_hidden_dims", nargs='+',type=int)
parser.add_argument("--sem_hidden_dims", nargs='+',type=int)
parser.add_argument("--rec_hidden_layers", type=int,default=1)
parser.add_argument("--sem_hidden_layers", type=int,default=1)
parser.add_argument("--task", type=str,default='rec') #rec,sem,joint
parser.add_argument("--distance", action='store_false')#attention pooling with or without distance
# Training
parser.add_argument("--ckpt", type=str, default=None, help='which split to train on?')
parser.add_argument("--num_sample_points_training", type=int, default=50000,
help='Number of point samples per object provided to the RangeUDF network during training.\
Influences training speed (larger batches result in shorter epochs) but also GPU \
memory usage (higher values need more memory). Needs to be balanced with batch_size.')
parser.add_argument("--batch_size", type=int, default=4,
help='Number of objects provided to the RangeUDF network in one batch during training.\
Influences training speed (larger batches result in shorter epochs) but also GPU \
memory usage (higher values need more memory). Needs to be balanced with \
num_sample_points_training')
parser.add_argument("--num_epochs", type=int, default=1000,
help='Stopping citron for duration of training. Model converges much earlier: model convergence\
can be checked via tensorboard and is logged within the experiment folder.')
parser.add_argument("--lr", type=float, default=1e-3,
help='Learning rate used during training.')
parser.add_argument("--gamma", type=float, default=1,
help='Learning rate used during training.')
parser.add_argument("--optimizer", type=str, default='Adam',
help='Optimizer used during training.')
parser.add_argument("--max_dist", type=float, default=0.1,
help='max_distance for calculate rec loss')
return parser
def get_config():
parser = config_parser()
cfg = parser.parse_args()
args = vars(cfg)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ---------------')
cfg.sample_ratio = np.array(cfg.sample_ratio)
cfg.sample_std_dev = np.array(cfg.sample_std_dev)
assert np.sum(cfg.sample_ratio) == 1
assert np.any(cfg.sample_ratio < 0) == False
assert len(cfg.sample_ratio) == len(cfg.sample_std_dev)
if cfg.exp_name is None:
cfg.exp_name = 'data-{}dist-{}sigmas-{}res-{}'.format(
os.path.basename(cfg.data_dir),
''.join(str(e) + '_' for e in cfg.sample_ratio),
''.join(str(e) + '_' for e in cfg.sample_std_dev),
cfg.input_res)
return cfg