forked from open-mmlab/OpenUnReID
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.yaml
137 lines (103 loc) · 2.32 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
DATA_ROOT: '../datasets/'
LOGS_ROOT: '../logs/'
MODEL:
# architecture
backbone: 'resnet50'
pooling: 'gem'
embed_feat: 0
dropout: 0.
# Domain-Specific Batch Normalization
dsbn: True
sync_bn: False
samples_per_bn: 16
mean_net: False
alpha: 0.999
# pretraining
imagenet_pretrained: True
source_pretrained: null
DATA:
height: 256
width: 128
norm_mean: [0.485, 0.456, 0.406]
norm_std: [0.229, 0.224, 0.225]
TRAIN:
# augmentation
is_autoaug: False
is_flip: True
flip_prob: 0.5
is_pad: True
pad_size: 10
is_blur: False
blur_prob: 0.5
is_erase: True
erase_prob: 0.5
# dual augmentation for MMT
is_mutual_transform: False
mutual_times: 2
TRAIN:
seed: 1
deterministic: True
# # mixed precision training for PyTorch>=1.6
# amp: False
# datasets
# datasets: {'market1501': 'trainval',}
datasets: {'market1501': 'trainval', 'dukemtmcreid': 'trainval'} # joint training
unsup_dataset_indexes: [1,] # UDA: Duke2Market
epochs: 50
iters: 400
LOSS:
losses: {'cross_entropy': 1., 'softmax_triplet': 1.}
margin: 0.
# validate
val_dataset: 'dukemtmcreid'
val_freq: 5
# sampler
SAMPLER:
num_instances: 4
is_shuffle: True
# data loader
LOADER:
samples_per_gpu: 8 # indicate the batch size in forwarding the support dataset
workers_per_gpu: 2
# pseudo labels
PSEUDO_LABELS:
freq: 1 # epochs
use_outliers: False
norm_feat: True
norm_center: True
cluster: 'dbscan'
eps: [0.6,]
min_samples: 4 # for dbscan
dist_metric: 'jaccard'
k1: 30 # for jaccard distance
k2: 6 # for jaccard distance
search_type: 0 # 0,1,2 for GPU, 3 for CPU (work for faiss)
cluster_num: null
# cluster: 'kmeans'
# cluster_num: [500,]
# dist_cuda: True
# optim
OPTIM:
optim: 'adam'
lr: 0.00014 # 需要和batchsize一起做正相关调整
weight_decay: 0.0005
SCHEDULER:
lr_scheduler: null
TEST:
# datasets
# datasets: ['market1501',]
datasets: ['dukemtmcreid',]
# data loader
LOADER:
samples_per_gpu: 32
workers_per_gpu: 2
# ranking setting
dist_metric: 'euclidean'
norm_feat: True
dist_cuda: True
# post processing
rerank: False
search_type: 0 # 0,1,2 for GPU, 3 for CPU (work for faiss)
k1: 20
k2: 6
lambda_value: 0.3