forked from Lifelong-Robot-Learning/LIBERO
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexecute_eval_few.sh
executable file
·128 lines (105 loc) · 7.46 KB
/
execute_eval_few.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
# pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f4_k3s4_tt_n6d384_off0_5shot/run_001/multitask_model_ep10.pth" \
# exp_name="eval40_lib10_m4op_32_f4_k3s4_tt_n6d384_off0_5shot_ep10" \
# benchmark_name="LIBERO_10" \
# sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
# pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_8_f4_k3s4_tt_n6d384_off100_5shot/run_001/multitask_model_ep10.pth" \
# exp_name="eval40_lib10_m4op_8_f4_k3s4_tt_n6d384_off100_5shot_ep10" \
# data.seq_len=8 \
# policy.skill_block_size=8 \
# policy.prior.block_size=2 \
# policy.offset_loss_scale=100 \
# benchmark_name="LIBERO_10" \
# sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
# pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f5_k3s4_tt_n6d384_5shot/run_001/multitask_model_ep10.pth" \
# exp_name="eval40_lib10_m4op_32_f5_k3s4_tt_n6d384_5shot_ep10" \
# policy.fsq_level=[7,5,5,5,5] \
# policy.offset_loss_scale=1 \
# policy.prior.vocab_size=4380 \
# policy.prior.output_dim=4375 \
# policy.prior.start_token=4376 \
# benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f4_k3s4_tt_n6d384_off0_5shot/run_001/multitask_model_ep20.pth" \
exp_name="eval40_lib10_m4op_32_f4_k3s4_tt_n6d384_off0_5shot_ep20" \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_8_f4_k3s4_tt_n6d384_off100_5shot/run_001/multitask_model_ep20.pth" \
exp_name="eval40_lib10_m4op_8_f4_k3s4_tt_n6d384_off100_5shot_ep20" \
data.seq_len=8 \
policy.skill_block_size=8 \
policy.prior.block_size=2 \
policy.offset_loss_scale=100 \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f5_k3s4_tt_n6d384_5shot/run_001/multitask_model_ep20.pth" \
exp_name="eval40_lib10_m4op_32_f5_k3s4_tt_n6d384_5shot_ep20" \
policy.fsq_level=[7,5,5,5,5] \
policy.offset_loss_scale=1 \
policy.prior.vocab_size=4380 \
policy.prior.output_dim=4375 \
policy.prior.start_token=4376 \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f4_k3s4_tt_n6d384_off0_5shot/run_001/multitask_model_ep50.pth" \
exp_name="eval40_lib10_m4op_32_f4_k3s4_tt_n6d384_off0_5shot_ep50" \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_8_f4_k3s4_tt_n6d384_off100_5shot/run_001/multitask_model_ep50.pth" \
exp_name="eval40_lib10_m4op_8_f4_k3s4_tt_n6d384_off100_5shot_ep50" \
data.seq_len=8 \
policy.skill_block_size=8 \
policy.prior.block_size=2 \
policy.offset_loss_scale=100 \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f5_k3s4_tt_n6d384_5shot/run_001/multitask_model_ep50.pth" \
exp_name="eval40_lib10_m4op_32_f5_k3s4_tt_n6d384_5shot_ep50" \
policy.fsq_level=[7,5,5,5,5] \
policy.offset_loss_scale=1 \
policy.prior.vocab_size=4380 \
policy.prior.output_dim=4375 \
policy.prior.start_token=4376 \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f4_k3s4_tt_n6d384_off0_5shot/run_001/multitask_model_ep80.pth" \
exp_name="eval40_lib10_m4op_32_f4_k3s4_tt_n6d384_off0_5shot_ep80" \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_8_f4_k3s4_tt_n6d384_off100_5shot/run_001/multitask_model_ep80.pth" \
exp_name="eval40_lib10_m4op_8_f4_k3s4_tt_n6d384_off100_5shot_ep80" \
data.seq_len=8 \
policy.skill_block_size=8 \
policy.prior.block_size=2 \
policy.offset_loss_scale=100 \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f5_k3s4_tt_n6d384_5shot/run_001/multitask_model_ep80.pth" \
exp_name="eval40_lib10_m4op_32_f5_k3s4_tt_n6d384_5shot_ep80" \
policy.fsq_level=[7,5,5,5,5] \
policy.offset_loss_scale=1 \
policy.prior.vocab_size=4380 \
policy.prior.output_dim=4375 \
policy.prior.start_token=4376 \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f4_k3s4_tt_n6d384_off0_5shot/run_001/multitask_model_ep100.pth" \
exp_name="eval40_lib10_m4op_32_f4_k3s4_tt_n6d384_off0_5shot_ep100" \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_8_f4_k3s4_tt_n6d384_off100_5shot/run_001/multitask_model_ep100.pth" \
exp_name="eval40_lib10_m4op_8_f4_k3s4_tt_n6d384_off100_5shot_ep100" \
data.seq_len=8 \
policy.skill_block_size=8 \
policy.prior.block_size=2 \
policy.offset_loss_scale=100 \
benchmark_name="LIBERO_10" \
sbatch slurm/eval.sbatch python libero/lifelong/skill_policy_eval.py \
pretrain_model_path="/storage/home/hcoda1/0/amete7/p-agarg35-0/diff-skill/LIBERO/experiments_finetune_clip/LIBERO_10/Multitask/SkillGPT_Model/ResnetEncoder/m4op_32_f5_k3s4_tt_n6d384_5shot/run_001/multitask_model_ep100.pth" \
exp_name="eval40_lib10_m4op_32_f5_k3s4_tt_n6d384_5shot_ep100" \
policy.fsq_level=[7,5,5,5,5] \
policy.offset_loss_scale=1 \
policy.prior.vocab_size=4380 \
policy.prior.output_dim=4375 \
policy.prior.start_token=4376 \
benchmark_name="LIBERO_10" \