Skip to content

Commit 534d48f

Browse files
committed
fix bugs; add wilds
1 parent e5efc5f commit 534d48f

File tree

13 files changed

+849
-17
lines changed

13 files changed

+849
-17
lines changed
Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1 @@
1-
wilds
21
timm

examples/domain_adaptation/object_detection/d_adapt/d_adapt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,7 @@ def main(args, args_cls, args_box):
332332
)
333333
args, argv = parser.parse_known_args(argv)
334334
print("Detection Args:")
335-
pprint.pp(args)
335+
pprint.pprint(args)
336336

337337
args.sources = utils.build_dataset(args.sources[::2], args.sources[1::2])
338338
args.targets = utils.build_dataset(args.targets[::2], args.targets[1::2])

examples/domain_adaptation/object_detection/d_adapt/d_adapt.sh

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# ResNet101 Based Faster RCNN: Faster RCNN: VOC->Clipart
22
# 44.8
3-
pretrained_models=../source_only/logs/faster_rcnn_R_101_C4/voc2clipart/model_final.pth
3+
pretrained_models=../logs/source_only/faster_rcnn_R_101_C4/voc2clipart/model_final.pth
44
CUDA_VISIBLE_DEVICES=0 python d_adapt.py \
55
--config-file config/faster_rcnn_R_101_C4_voc.yaml \
66
-s VOC2007 ../datasets/VOC2007 VOC2012 ../datasets/VOC2012 \
@@ -28,7 +28,7 @@ CUDA_VISIBLE_DEVICES=0 python d_adapt.py --confidence-ratio-c 0.2 \
2828

2929
# ResNet101 Based Faster RCNN: Faster RCNN: VOC->WaterColor
3030
# 54.1
31-
pretrained_models=../source_only/logs/faster_rcnn_R_101_C4/voc2watercolor_comic/model_final.pth
31+
pretrained_models=../logs/source_only/faster_rcnn_R_101_C4/voc2watercolor_comic/model_final.pth
3232
CUDA_VISIBLE_DEVICES=0 python d_adapt.py \
3333
--config-file config/faster_rcnn_R_101_C4_voc.yaml \
3434
-s VOC2007Partial ../datasets/VOC2007 VOC2012Partial ../datasets/VOC2012 \
@@ -45,7 +45,7 @@ CUDA_VISIBLE_DEVICES=0 python d_adapt.py --confidence-ratio-c 0.1 \
4545

4646
# ResNet101 Based Faster RCNN: Faster RCNN: VOC->Comic
4747
# 39.7
48-
pretrained_models=../source_only/logs/faster_rcnn_R_101_C4/voc2watercolor_comic/model_final.pth
48+
pretrained_models=../logs/source_only/faster_rcnn_R_101_C4/voc2watercolor_comic/model_final.pth
4949
CUDA_VISIBLE_DEVICES=0 python d_adapt.py \
5050
--config-file config/faster_rcnn_R_101_C4_voc.yaml \
5151
-s VOC2007Partial ../datasets/VOC2007 VOC2012Partial ../datasets/VOC2012 \
@@ -62,7 +62,7 @@ CUDA_VISIBLE_DEVICES=0 python d_adapt.py --confidence-ratio-c 0.1 \
6262

6363
# ResNet101 Based Faster RCNN: Cityscapes -> Foggy Cityscapes
6464
# 40.1
65-
pretrained_models=../source_only/logs/faster_rcnn_R_101_C4/cityscapes2foggy/model_final.pth
65+
pretrained_models=../logs/source_only/faster_rcnn_R_101_C4/cityscapes2foggy/model_final.pth
6666
CUDA_VISIBLE_DEVICES=0 python d_adapt.py --workers-c 4 --max-train-c 20 --ignored-scores-c 0.05 0.5 \
6767
--config-file config/faster_rcnn_R_101_C4_cityscapes.yaml \
6868
-s Cityscapes ../datasets/cityscapes_in_voc -t FoggyCityscapes ../datasets/foggy_cityscapes_in_voc/ \
@@ -80,7 +80,7 @@ CUDA_VISIBLE_DEVICES=0 python d_adapt.py --workers-c 4 --max-train-c 20 --ignore
8080

8181
# VGG Based Faster RCNN: Cityscapes -> Foggy Cityscapes
8282
# 33.3
83-
pretrained_models=../source_only/logs/faster_rcnn_vgg_16/cityscapes2foggy/model_final.pth
83+
pretrained_models=../logs/source_only/faster_rcnn_vgg_16/cityscapes2foggy/model_final.pth
8484
CUDA_VISIBLE_DEVICES=0 python d_adapt.py --workers-c 4 --max-train-c 20 --ignored-scores-c 0.05 0.5 \
8585
--config-file config/faster_rcnn_vgg_16_cityscapes.yaml \
8686
-s Cityscapes ../datasets/cityscapes_in_voc -t FoggyCityscapes ../datasets/foggy_cityscapes_in_voc/ \
@@ -105,7 +105,7 @@ CUDA_VISIBLE_DEVICES=0 python d_adapt.py --workers-c 4 --max-train-c 20 --ignore
105105

106106
# ResNet101 Based Faster RCNN: Sim10k -> Cityscapes Car
107107
# 51.9
108-
pretrained_models=../source_only/logs/faster_rcnn_R_101_C4/sim10k2cityscapes_car/model_final.pth
108+
pretrained_models=../logs/source_only/faster_rcnn_R_101_C4/sim10k2cityscapes_car/model_final.pth
109109
CUDA_VISIBLE_DEVICES=0 python d_adapt.py --workers-c 8 --ignored-scores-c 0.05 0.5 --bottleneck-dim-c 256 --bottleneck-dim-b 256 \
110110
--config-file config/faster_rcnn_R_101_C4_cityscapes.yaml \
111111
-s Sim10kCar ../datasets/sim10k -t CityscapesCar ../datasets/cityscapes_in_voc/ \
@@ -114,7 +114,7 @@ CUDA_VISIBLE_DEVICES=0 python d_adapt.py --workers-c 8 --ignored-scores-c 0.05 0
114114

115115
# VGG Based Faster RCNN: Sim10k -> Cityscapes Car
116116
# 49.3
117-
pretrained_models=../source_only/logs/faster_rcnn_vgg_16/sim10k2cityscapes_car/model_final.pth
117+
pretrained_models=../logs/source_only/faster_rcnn_vgg_16/sim10k2cityscapes_car/model_final.pth
118118
CUDA_VISIBLE_DEVICES=0 python d_adapt.py --workers-c 8 --ignored-scores-c 0.05 0.5 --bottleneck-dim-c 256 --bottleneck-dim-b 256 \
119119
--config-file config/faster_rcnn_vgg_16_cityscapes.yaml \
120120
-s Sim10kCar ../datasets/sim10k -t CityscapesCar ../datasets/cityscapes_in_voc/ \
Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
# Unsupervised Domain Adaptation for Image Classification
2+
3+
## Installation
4+
It’s suggested to use **pytorch==1.9.0** in order to reproduce the benchmark results.
5+
6+
You need to install **apex** following ``https://github.com/NVIDIA/apex``.
7+
Then run
8+
```
9+
pip install -r requirements.txt
10+
```
11+
12+
## Usage
13+
14+
1.
15+
16+
2. Visualization
17+
```
18+
tensorboard --logdir=logs
19+
```
20+
21+
3. Distributed training.
22+
23+
```
24+
CUDA_VISIBLE_DEVICES=0,3 python -m torch.distributed.launch --nproc_per_node=2 --master_port 6666 erm.py data/wilds -d "fmow" --aa "v0" --arch "densenet121" \
25+
--lr 0.1 --opt-level O1 --deterministic --vflip 0.5 -j 8 --log logs/erm/fmow/lr_0_1_aa_v0_densenet121_bs_128
26+
```
27+
28+
## Dataset
29+
30+
Following datasets can be downloaded automatically:
31+
- [DomainNet](http://ai.bu.edu/M3SDA/)
32+
- [iwildcam (WILDS)](https://wilds.stanford.edu/datasets/)
33+
- [camelyon17 (WILDS)](https://wilds.stanford.edu/datasets/)
34+
- [fmow (WILDS)](https://wilds.stanford.edu/datasets/)
35+
36+
## Supported Methods
37+
38+
Supported methods include:
39+
40+
- [Domain Adversarial Neural Network (DANN)](https://arxiv.org/abs/1505.07818)
41+
- [Deep Adaptation Network (DAN)](https://arxiv.org/pdf/1502.02791)
42+
- [Joint Adaptation Network (JAN)](https://arxiv.org/abs/1605.06636)
43+
- [Adversarial Discriminative Domain Adaptation (ADDA)](https://arxiv.org/pdf/1702.05464.pdf)
44+
- [Conditional Domain Adversarial Network (CDAN)](https://arxiv.org/abs/1705.10667)
45+
- [Maximum Classifier Discrepancy (MCD)](https://arxiv.org/abs/1712.02560)
46+
- [Adaptive Feature Norm (AFN)](https://arxiv.org/pdf/1811.07456v2.pdf)
47+
- [Batch Spectral Penalization (BSP)](http://ise.thss.tsinghua.edu.cn/~mlong/doc/batch-spectral-penalization-icml19.pdf)
48+
- [Margin Disparity Discrepancy (MDD)](https://arxiv.org/abs/1904.05801)
49+
- [Minimum Class Confusion (MCC)](https://arxiv.org/abs/1912.03699)
50+
51+
## Experiment and Results
52+
53+
The shell files give the script to reproduce the [benchmarks](/docs/dalib/benchmarks/image_classification.rst) with specified hyper-parameters.
54+
For example, if you want to train DANN on Office31, use the following script
55+
56+
```shell script
57+
# Train a DANN on Office-31 Amazon -> Webcam task using ResNet 50.
58+
# Assume you have put the datasets under the path `data/office-31`,
59+
# or you are glad to download the datasets automatically from the Internet to this path
60+
CUDA_VISIBLE_DEVICES=0 python dann.py data/office31 -d Office31 -s A -t W -a resnet50 --epochs 20 --seed 1 --log logs/dann/Office31_A2W
61+
```
62+
63+
For more information please refer to [Get Started](/docs/get_started/quickstart.rst) for help.
64+
65+
66+
## Citation
67+
If you use these methods in your research, please consider citing.
68+
69+
```
70+
@inproceedings{DANN,
71+
author = {Ganin, Yaroslav and Lempitsky, Victor},
72+
Booktitle = {ICML},
73+
Title = {Unsupervised domain adaptation by backpropagation},
74+
Year = {2015}
75+
}
76+
77+
@inproceedings{DAN,
78+
author = {Mingsheng Long and
79+
Yue Cao and
80+
Jianmin Wang and
81+
Michael I. Jordan},
82+
title = {Learning Transferable Features with Deep Adaptation Networks},
83+
booktitle = {ICML},
84+
year = {2015},
85+
}
86+
87+
@inproceedings{JAN,
88+
title={Deep transfer learning with joint adaptation networks},
89+
author={Long, Mingsheng and Zhu, Han and Wang, Jianmin and Jordan, Michael I},
90+
booktitle={ICML},
91+
year={2017},
92+
}
93+
94+
@inproceedings{ADDA,
95+
title={Adversarial discriminative domain adaptation},
96+
author={Tzeng, Eric and Hoffman, Judy and Saenko, Kate and Darrell, Trevor},
97+
booktitle={CVPR},
98+
year={2017}
99+
}
100+
101+
@inproceedings{CDAN,
102+
author = {Mingsheng Long and
103+
Zhangjie Cao and
104+
Jianmin Wang and
105+
Michael I. Jordan},
106+
title = {Conditional Adversarial Domain Adaptation},
107+
booktitle = {NeurIPS},
108+
year = {2018}
109+
}
110+
111+
@inproceedings{MCD,
112+
title={Maximum classifier discrepancy for unsupervised domain adaptation},
113+
author={Saito, Kuniaki and Watanabe, Kohei and Ushiku, Yoshitaka and Harada, Tatsuya},
114+
booktitle={CVPR},
115+
year={2018}
116+
}
117+
118+
@InProceedings{AFN,
119+
author = {Xu, Ruijia and Li, Guanbin and Yang, Jihan and Lin, Liang},
120+
title = {Larger Norm More Transferable: An Adaptive Feature Norm Approach for Unsupervised Domain Adaptation},
121+
booktitle = {ICCV},
122+
year = {2019}
123+
}
124+
125+
@inproceedings{MDD,
126+
title={Bridging theory and algorithm for domain adaptation},
127+
author={Zhang, Yuchen and Liu, Tianle and Long, Mingsheng and Jordan, Michael},
128+
booktitle={ICML},
129+
year={2019},
130+
}
131+
132+
@inproceedings{BSP,
133+
title={Transferability vs. discriminability: Batch spectral penalization for adversarial domain adaptation},
134+
author={Chen, Xinyang and Wang, Sinan and Long, Mingsheng and Wang, Jianmin},
135+
booktitle={ICML},
136+
year={2019},
137+
}
138+
139+
@inproceedings{MCC,
140+
author = {Ying Jin and
141+
Ximei Wang and
142+
Mingsheng Long and
143+
Jianmin Wang},
144+
title = {Less Confusion More Transferable: Minimum Class Confusion for Versatile
145+
Domain Adaptation},
146+
year={2020},
147+
booktitle={ECCV},
148+
}
149+
```

0 commit comments

Comments
 (0)