Skip to content

Latest commit

 

History

History
105 lines (105 loc) · 57.2 KB

PriorityTable.md

File metadata and controls

105 lines (105 loc) · 57.2 KB
Priority\Condition EA-NC-DG-NW EA-NC-EG-NW EA-NC-NG-NW EA-NC-VG-NW EA-SC-DG-NW EA-SC-EG-NW RA-NC-EG-NW RA-NC-NG-NW RA-SC-DG-NW RA-SC-NG-NW OA-NC-DG-NW OA-NC-EG-EW OA-NC-NG-NW OA-SC-DG-NW OA-SC-EG-EW OA-SC-NG-NW XA-NC-NG-NW
1st trainable=True trainable=True trainable=True trainable=True trainable=True trainable=True block type=xception block type=xception learning rate=0.0001 learning rate=0.01 block type=xception num blocks=1 block type=xception block type=xception num blocks=3 block type=xception trainable=True
2nd imagenet size=True block type=xception block type=xception imagenet size=True imagenet size=True learning rate=0.01 trainable=True trainable=True trainable=True trainable=True filters 1 0=256 num layers=2 separable=False learning rate=0.01 block type=xception optimizer=adam weight decay imagenet size=True
3rd block type=xception imagenet size=True imagenet size=True block type=xception block type=xception imagenet size=True imagenet size=True imagenet size=True learning rate=0.001 learning rate=0.1 optimizer=adam weight decay optimizer=sgd optimizer=adam weight decay learning rate=0.001 num layers=2 end learning rate=1e=5 pretrained=True
4th optimizer=adam weight decay version=b7 double step=True momentum=0.1 momentum=0.1 block type=xception augment=False optimizer=adam weight decay block type=xception learning rate=0.001 end learning rate=1e=5 block type=xception end learning rate=1e=5 optimizer=sgd separable=True optimizer=nadam optimizer=adam weight decay
5th optimizer=nadam optimizer=adam weight decay dropout=0.5 momentum=0.5 momentum=0.5 optimizer=adam vertical flip=False dropout=0.5 learning rate=2e=05 optimizer=adam weight decay optimizer=nadam momentum=0.1 dropout=0.5 momentum=0.1 reduction type=global max learning rate=0.01 dropout=0.5
6th end learning rate=1e=5 end learning rate=1e=5 triple step=True learning rate=0.0001 learning rate=0.001 optimizer=adam weight decay pretrained=False end learning rate=1e=5 learning rate=1e=05 end learning rate=1e=5 learning rate=0.001 separable=True optimizer=nadam momentum=0.5 reduction type=global avg learning rate=0.1 end learning rate=1e=5
7th trainable=False optimizer=nadam version=b7 momentum=0.9 momentum=0.9 end learning rate=1e=5 rotation factor=0.0 learning rate=0.1 learning rate=0.01 optimizer=nadam max pooling=True learning rate=0.1 block type=resnet learning rate=0.0001 max pooling=True optimizer=adam learning rate=0.0001
8th double step=True double step=True pretrained=False momentum=0.99 momentum=0.99 dropout=0.5 translation factor=0.0 optimizer=nadam optimizer=sgd block type=xception block type=resnet momentum=0.5 reduction type=global max num layers=1 block type=resnet block type=resnet reduction type=global max
9th triple step=True triple step=True block type=resnet optimizer=sgd optimizer=sgd optimizer=nadam horizontal flip=True pretrained=False momentum=0.1 optimizer=adam dropout=0.5 momentum=0.9 learning rate=0.001 learning rate=2e=05 normalize=True learning rate=0.001 double step=True
10th pretrained=True dropout=0.5 reduction type=global max learning rate=0.001 optimizer=adam weight decay momentum=0.99 zoom factor=0.1 learning rate=0.01 momentum=0.5 learning rate=0.0001 vertical flip=False momentum=0.99 augment=False num blocks=2 normalize=False separable=False reduction type=global avg
11th block type=resnet learning rate=0.001 reduction type=global avg block type=resnet end learning rate=1e=5 learning rate=0.001 normalize=True reduction type=global max trainable=False momentum=0.99 filters 2 0=512 augment=False vertical flip=False momentum=0.9 kernel size=5 learning rate=0.0001 optimizer=adam
12th dropout=0.5 block type=resnet version=b4 optimizer=adam weight decay optimizer=nadam block type=resnet contrast factor=0.0 reduction type=global avg dropout=0.5 imagenet size=True filters 2 0=256 block type=resnet num layers=1 learning rate=1e=05 kernel size=3 reduction type=global max triple step=True
13th version=b2 learning rate=0.0001 dropout=0.25 end learning rate=1e=5 learning rate=0.0001 momentum=0.9 reduction type=global max augment=False momentum=0.9 momentum=0.9 separable=False vertical flip=False filters 1 0=512 momentum=0.99 kernel size=7 augment=contrast factor=0.1 dropout=0.25
14th learning rate=0.1 optimizer=adam rotation factor=0.0 optimizer=nadam block type=resnet learning rate=0.1 reduction type=global avg dropout=0.25 momentum=0.99 learning rate=2e=05 rotation factor=0.0 kernel size=5 filters 1 1=512 separable=False num blocks=2 filters 1 1=64 block type=efficient
15th version=b4 pretrained=False translation factor=0.1 version=b1 learning rate=2e=05 version=b0 contrast factor=0.1 normalize=True optimizer=adam weight decay pretrained=False filters 2 0=128 reduction type=global max filters 2 0=512 filters 2 1=512 filters 0 0=16 contrast factor=0.1 imagenet size=False
16th version=b1 dropout=0.25 augment=False version=b0 learning rate=0.01 reduction type=global max reduction type=flatten momentum=0.99 augment=translation factor=0.0 dropout=0.5 filters 0 0=512 dropout=0.5 num blocks=2 augment=rotation factor=0.0 filters 0 0=128 kernel size=5 block type=resnet
17th imagenet size=False version=b0 horizontal flip=False dropout=0.5 learning rate=1e=05 learning rate=1e=05 normalize=False momentum=0.9 translation factor=0.0 dropout=0.25 filters 0 1=128 max pooling=False rotation factor=0.0 rotation factor=0.0 filters 0 0=32 num layers=1 optimizer=nadam
18th version=b3 freeze=all zoom factor=0.0 learning rate=0.01 version=b6 dropout=0.25 zoom factor=0.0 optimizer=adam augment=vertical flip=True reduction type=global max zoom factor=0.1 rotation factor=0.0 activation=tanh filters 1 1=32 filters 0 0=64 rotation factor=0.1 freeze=all
19th normalize=True freeze=no freeze=all normalize=False reduction type=global max version=b2 horizontal flip=False dropout=0.0 vertical flip=True normalize=True kernel size=5 initial=lecun uniform learning rate=0.01 filters 2 1=64 filters 0 0=256 dropout=0.5 learning rate=0.001
20th freeze=bn freeze=bn freeze=bn reduction type=global max dropout=0.5 normalize=False augment=vertical flip=False normalize=False end learning rate=1e=5 reduction type=global avg activation=selu activation=tanh initial=he uniform augment=horizontal flip=False filters 0 0=512 kernel size=3 pretrained=False
21st freeze=all reduction type=global max normalize=False learning rate=2e=05 version=b3 learning rate=2e=05 translation factor=0.1 augment=translation factor=0.1 optimizer=nadam normalize=False initial=he uniform dropout=0.25 filters 0 0=512 horizontal flip=False filters 0 1=16 filters 1 0=32 freeze=bn
22nd freeze=no version=b1 vertical flip=True version=b6 version=b2 version=b6 rotation factor=0.1 augment=horizontal flip=False augment=contrast factor=0.1 dropout=0.0 reduction type=global max translation factor=0.0 activation=selu num blocks=1 filters 0 1=512 filters 0 0=32 freeze=no
23rd reduction type=global max normalize=True freeze=no version=b2 version=b5 version=b3 augment=rotation factor=0.0 augment=vertical flip=False contrast factor=0.1 reduction type=flatten num layers=2 contrast factor=0.1 filters 1 0=256 block type=resnet filters 0 1=32 num blocks=2 dropout=0.0
24th reduction type=global avg normalize=False augment=rotation factor=0.0 version=b7 reduction type=global avg version=b4 pretrained=True augment=rotation factor=0.1 pretrained=False pretrained=True activation=tanh filters 0 0=512 initial=lecun uniform filters 1 1=256 filters 0 1=64 filters 0 1=16 normalize=True
25th learning rate=0.01 reduction type=global avg augment=translation factor=0.1 reduction type=global avg version=b7 version=b1 vertical flip=True augment=zoom factor=0.1 augment=horizontal flip=False learning rate=1e=05 initial=lecun uniform initial=he uniform filters 2 1=512 optimizer=adam filters 0 1=128 augment=rotation factor=0.1 lr ratio=1.0
26th version=b0 reduction type=flatten contrast factor=0.1 version=b3 optimizer=adam reduction type=global avg augment=zoom factor=0.1 augment=contrast factor=0.0 horizontal flip=False momentum=0.5 augment=translation factor=0.0 kernel size=3 translation factor=0.0 kernel size=5 filters 0 1=256 filters 0 1=512 momentum=0.99
27th reduction type=flatten learning rate=0.01 version=b1 normalize=True version=b4 version=b5 augment=translation factor=0.0 momentum=0.5 reduction type=global max imagenet size=False augment=horizontal flip=True activation=selu filters 1 1=256 max pooling=True filters 1 0=16 augment=zoom factor=0.1 normalize=False
28th lr ratio=1.0 dropout=0.0 augment=horizontal flip=False learning rate=0.1 dropout=0.25 version=b7 augment=horizontal flip=True reduction type=flatten augment=zoom factor=0.0 momentum=0.1 augment=vertical flip=False normalize=True max pooling=False filters 1 0=64 filters 1 0=512 num blocks=3 momentum=0.9
29th normalize=False version=b3 augment=zoom factor=0.0 optimizer=adam normalize=False dropout=0.0 augment=vertical flip=True momentum=0.1 dropout=0.25 optimizer=sgd augment=rotation factor=0.0 horizontal flip=True filters 2 1=256 filters 0 0=16 filters 1 0=32 filters 0 0=16 epoch ratio=0.2
30th momentum=0.99 version=b4 version=b3 dropout=0.25 dropout=0.0 normalize=True augment=rotation factor=0.1 optimizer=sgd zoom factor=0.0 block type=efficient augment=zoom factor=0.1 learning rate=0.01 filters 1 0=128 filters 1 0=16 filters 1 0=64 reduction type=global avg end learning rate=2e=06
31st momentum=0.9 lr ratio=1.0 contrast factor=0.0 version=b5 version=b0 learning rate=0.0001 augment=zoom factor=0.0 learning rate=0.001 optimizer=adam block type=vanilla augment=contrast factor=0.1 filters 0 1=512 kernel size=5 normalize=True filters 1 0=128 contrast factor=0.0 weight decay rate=0.01
32nd learning rate=0.001 version=b2 vertical flip=False reduction type=flatten normalize=True reduction type=flatten augment=contrast factor=0.0 learning rate=0.0001 augment=rotation factor=0.0 / normalize=False zoom factor=0.1 filters 2 0=256 filters 1 0=128 filters 1 0=256 vertical flip=False end learning rate=0.0
33rd dropout=0.25 version=b6 normalize=True learning rate=1e=05 version=b1 imagenet size=False augment=contrast factor=0.1 pretrained=True rotation factor=0.0 / filters 0 0=128 filters 0 0=128 filters 2 1=128 filters 1 1=512 filters 1 1=16 filters 0 0=64 weight decay rate=0.1
34th momentum=0.5 epoch ratio=0.2 version=b2 dropout=0.0 imagenet size=False block type=vanilla augment=horizontal flip=False block type=vanilla normalize=True / dropout=0.25 optimizer=adam weight decay filters 0 0=256 optimizer=adam weight decay filters 1 1=256 augment=zoom factor=0.0 weight decay rate=0.001
35th version=b5 version=b5 lr ratio=1.0 version=b4 reduction type=flatten momentum=0.5 augment=translation factor=0.1 block type=efficient augment=translation factor=0.1 / filters 2 0=64 optimizer=nadam filters 2 0=64 reduction type=global max filters 1 1=32 translation factor=0.0 end learning rate=0.0001
36th epoch ratio=0.2 end learning rate=2e=06 augment=vertical flip=True block type=vanilla learning rate=0.1 momentum=0.1 imagenet size=False imagenet size=False augment=horizontal flip=True / augment=horizontal flip=False end learning rate=1e=5 filters 2 0=128 end learning rate=1e=5 filters 1 1=64 max pooling=True epoch ratio=0.3
37th momentum=0.1 weight decay rate=0.005 reduction type=flatten / block type=vanilla optimizer=sgd block type=efficient learning rate=2e=05 augment=vertical flip=False / augment=translation factor=0.1 zoom factor=0.0 horizontal flip=False optimizer=nadam filters 1 1=128 augment=translation factor=0.0 epoch ratio=0.1
38th optimizer=sgd end learning rate=1e=06 augment=contrast factor=0.1 / / / block type=vanilla learning rate=1e=05 augment=rotation factor=0.1 / kernel size=7 filters 0 1=32 filters 0 1=512 reduction type=global avg block type=efficient filters 0 1=64 weight decay rate=0.05
39th weight decay rate=0.01 momentum=0.99 zoom factor=0.1 / / / / trainable=False augment=zoom factor=0.1 / contrast factor=0.1 augment=vertical flip=False filters 1 0=64 kernel size=3 filters 1 1=512 horizontal flip=False end learning rate=1e=06
40th end learning rate=1e=06 end learning rate=0.0 horizontal flip=True / / / / / augment=contrast factor=0.0 / reduction type=global avg filters 0 1=128 normalize=False dropout=0.25 max pooling=False augment=False weight decay rate=0.005
41st end learning rate=2e=06 weight decay rate=0.01 augment=translation factor=0.0 / / / / / reduction type=global avg / filters 1 1=32 filters 0 1=64 kernel size=7 dropout=0.0 reduction type=flatten zoom factor=0.1 epoch ratio=0.5
42nd weight decay rate=0.05 epoch ratio=0.3 augment=horizontal flip=True / / / / / reduction type=flatten / num blocks=3 horizontal flip=False dropout=0.25 kernel size=7 separable=False augment=contrast factor=0.0 reduction type=flatten
43rd weight decay rate=0.005 weight decay rate=0.001 augment=vertical flip=False / / / / / normalize=False / filters 1 1=256 normalize=False zoom factor=0.1 filters 0 0=32 num layers=1 augment=vertical flip=False epoch ratio=0.4
44th version=b6 end learning rate=0.0001 augment=rotation factor=0.1 / / / / / augment=False / augment=contrast factor=0.0 filters 0 0=256 filters 1 1=128 filters 0 0=64 num blocks=1 filters 0 1=32 triple step=False
45th weight decay rate=0.1 weight decay rate=0.05 augment=zoom factor=0.1 / / / / / imagenet size=False / translation factor=0.0 dropout=0.0 contrast factor=0.0 filters 0 0=128 / learning rate=1e=05 momentum=0.5
46th epoch ratio=0.3 epoch ratio=0.1 augment=contrast factor=0.0 / / / / / dropout=0.0 / horizontal flip=True translation factor=0.1 zoom factor=0.0 filters 0 0=256 / filters 0 1=128 learning rate=2e=05
47th end learning rate=0.0 weight decay rate=0.1 translation factor=0.0 / / / / / rotation factor=0.1 / filters 0 0=64 contrast factor=0.0 filters 2 1=64 filters 0 0=512 / filters 1 1=16 momentum=0.1
48th weight decay rate=0.001 epoch ratio=0.4 rotation factor=0.1 / / / / / zoom factor=0.1 / learning rate=0.0001 rotation factor=0.1 num blocks=1 filters 0 1=16 / filters 0 0=512 optimizer=sgd
49th dropout=0.0 momentum=0.9 version=b0 / / / / / horizontal flip=True / filters 0 1=256 max pooling=True contrast factor=0.1 filters 0 1=128 / normalize=True lr ratio=0.1
50th learning rate=0.0001 epoch ratio=0.5 epoch ratio=0.2 / / / / / pretrained=True / filters 1 1=64 filters 0 1=16 normalize=True filters 0 1=32 / filters 1 0=64 learning rate=0.01
51st epoch ratio=0.1 pretrained=True version=b6 / / / / / contrast factor=0.0 / horizontal flip=False filters 0 0=32 horizontal flip=True filters 0 1=64 / filters 1 0=128 lr ratio=0.01
52nd epoch ratio=0.5 learning rate=2e=05 weight decay rate=0.01 / / / / / vertical flip=False / translation factor=0.1 filters 0 0=64 filters 1 1=64 filters 0 1=256 / filters 1 0=512 learning rate=1e=05
53rd end learning rate=0.0001 learning rate=1e=05 version=b5 / / / / / translation factor=0.1 / filters 0 1=64 reduction type=global avg filters 0 0=128 filters 0 1=512 / activation=selu learning rate=0.1
54th learning rate=1e=05 momentum=0.5 end learning rate=2e=06 / / / / / block type=vanilla / num blocks=2 reduction type=flatten dropout=0.0 filters 1 0=32 / activation=tanh block type=vanilla
55th learning rate=2e=05 learning rate=0.1 weight decay rate=0.005 / / / / / imagenet size=True / filters 1 0=128 kernel size=7 filters 0 1=128 filters 1 0=256 / initial=he uniform trainable=False
56th optimizer=adam block type=vanilla epoch ratio=0.1 / / / / / block type=efficient / filters 1 1=512 filters 0 1=256 filters 0 1=256 filters 1 0=512 / initial=lecun uniform /
57th epoch ratio=0.4 momentum=0.1 end learning rate=1e=06 / / / / / learning rate=0.1 / filters 0 1=32 augment=translation factor=0.0 filters 0 0=64 filters 2 0=16 / augment=horizontal flip=False /
58th lr ratio=0.1 lr ratio=0.1 weight decay rate=0.001 / / / / / / / augment=zoom factor=0.0 augment=contrast factor=0.1 filters 0 1=64 filters 2 0=512 / filters 1 0=256 /
59th version=b7 optimizer=sgd weight decay rate=0.05 / / / / / / / filters 0 1=16 augment=horizontal flip=True optimizer=adam filters 2 0=32 / filters 1 1=32 /
60th block type=vanilla lr ratio=0.01 end learning rate=0.0 / / / / / / / reduction type=flatten vertical flip=True filters 0 0=32 filters 2 0=64 / filters 1 1=512 /
61st triple step=False imagenet size=False epoch ratio=0.4 / / / / / / / filters 0 0=16 augment=translation factor=0.1 max pooling=True filters 2 0=128 / filters 1 0=16 /
62nd pretrained=False triple step=False epoch ratio=0.5 / / / / / / / contrast factor=0.0 augment=horizontal flip=False filters 0 1=32 filters 2 0=256 / augment=translation factor=0.1 /
63rd lr ratio=0.01 trainable=False weight decay rate=0.1 / / / / / / / kernel size=3 augment=vertical flip=True translation factor=0.1 filters 1 1=16 / filters 1 1=256 /
64th / / end learning rate=0.0001 / / / / / / / augment=rotation factor=0.1 augment=rotation factor=0.0 filters 2 0=16 filters 1 1=128 / normalize=False /
65th / / epoch ratio=0.3 / / / / / / / filters 0 0=32 augment=zoom factor=0.1 kernel size=3 filters 2 1=16 / filters 0 0=256 /
66th / / dropout=0.0 / / / / / / / filters 1 1=128 augment=contrast factor=0.0 filters 0 1=16 filters 2 1=32 / filters 0 0=128 /
67th / / lr ratio=0.1 / / / / / / / normalize=True augment=zoom factor=0.0 filters 2 0=32 filters 2 1=128 / learning rate=2e=05 /
68th / / block type=vanilla / / / / / / / augment=False learning rate=0.001 learning rate=0.0001 filters 2 1=256 / num blocks=1 /
69th / / lr ratio=0.01 / / / / / / / filters 2 0=32 augment=rotation factor=0.1 rotation factor=0.1 augment=False / reduction type=flatten /
70th / / imagenet size=False / / / / / / / num layers=1 block type=efficient filters 1 0=16 augment=translation factor=0.1 / zoom factor=0.0 /
71st / / pretrained=True / / / / / / / filters 1 1=16 filters 0 0=16 filters 1 1=16 augment=translation factor=0.0 / augment=vertical flip=True /
72nd / / triple step=False / / / / / / / filters 2 0=16 learning rate=0.0001 filters 2 1=16 augment=horizontal flip=True / augment=horizontal flip=True /
73rd / / trainable=False / / / / / / / filters 1 0=64 learning rate=1e=05 augment=rotation factor=0.0 augment=vertical flip=True / horizontal flip=True /
74th / / / / / / / / / / augment=vertical flip=True learning rate=2e=05 augment=vertical flip=False augment=vertical flip=False / filters 1 1=128 /
75th / / / / / / / / / / zoom factor=0.0 separable=False filters 1 0=32 augment=rotation factor=0.1 / augment=rotation factor=0.0 /
76th / / / / / / / / / / filters 0 1=512 num blocks=2 reduction type=global avg augment=zoom factor=0.0 / max pooling=False /
77th / / / / / / / / / / filters 0 0=256 num blocks=3 filters 0 0=16 augment=zoom factor=0.1 / dropout=0.25 /
78th / / / / / / / / / / rotation factor=0.1 num layers=1 augment=translation factor=0.0 augment=contrast factor=0.0 / translation factor=0.1 /
79th / / / / / / / / / / optimizer=adam optimizer=adam filters 1 1=32 augment=contrast factor=0.1 / vertical flip=True /
80th / / / / / / / / / / separable=True / num layers=2 translation factor=0.0 / filters 0 1=256 /
81st / / / / / / / / / / dropout=0.0 / vertical flip=True translation factor=0.1 / dropout=0.0 /
82nd / / / / / / / / / / filters 1 0=32 / reduction type=flatten vertical flip=False / kernel size=7 /
83rd / / / / / / / / / / vertical flip=True / num blocks=3 vertical flip=True / rotation factor=0.0 /
84th / / / / / / / / / / filters 1 0=16 / augment=horizontal flip=False zoom factor=0.1 / num layers=2 /
85th / / / / / / / / / / max pooling=False / augment=contrast factor=0.0 zoom factor=0.0 / separable=True /
86th / / / / / / / / / / learning rate=2e=05 / augment=contrast factor=0.1 contrast factor=0.1 / momentum=0.99 /
87th / / / / / / / / / / momentum=0.99 / filters 2 1=32 contrast factor=0.0 / momentum=0.9 /
88th / / / / / / / / / / momentum=0.9 / augment=horizontal flip=True dropout=0.5 / block type=efficient /
89th / / / / / / / / / / num blocks=1 / augment=zoom factor=0.1 activation=selu / momentum=0.5 /
90th / / / / / / / / / / block type=efficient / augment=zoom factor=0.0 activation=tanh / momentum=0.1 /
91st / / / / / / / / / / learning rate=1e=05 / momentum=0.99 initial=he uniform / optimizer=sgd /
92nd / / / / / / / / / / momentum=0.5 / augment=rotation factor=0.1 initial=lecun uniform / / /
93rd / / / / / / / / / / momentum=0.1 / augment=translation factor=0.1 reduction type=flatten / / /
94th / / / / / / / / / / optimizer=sgd / momentum=0.9 filters 1 1=64 / / /
95th / / / / / / / / / / learning rate=0.01 / augment=vertical flip=True normalize=False / / /
96th / / / / / / / / / / filters 1 0=512 / momentum=0.5 max pooling=False / / /
97th / / / / / / / / / / learning rate=0.1 / learning rate=2e=05 horizontal flip=True / / /
98th / / / / / / / / / / / / momentum=0.1 rotation factor=0.1 / / /
99th / / / / / / / / / / / / optimizer=sgd separable=True / / /
100th / / / / / / / / / / / / block type=efficient learning rate=0.1 / / /
101st / / / / / / / / / / / / learning rate=1e=05 block type=efficient / / /
102nd / / / / / / / / / / / / separable=True num blocks=3 / / /
103rd / / / / / / / / / / / / learning rate=0.1 num layers=2 / / /