# evaluate our trained open-vocabulary object detectors, {RN50, RN50x4} x {COCO, LVIS} # RN50, COCO (Generalized: Novel + Base) python3 ./tools/train_net.py \ --eval-only \ --num-gpus 1 \ --config-file ./configs/COCO-InstanceSegmentation/CLIP_fast_rcnn_R_50_C4_ovd.yaml \ MODEL.WEIGHTS ./pretrained_ckpt/regionclip/regionclip_finetuned-coco_rn50.pth \ MODEL.CLIP.OFFLINE_RPN_CONFIG ./configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x_ovd_FSD.yaml \ MODEL.CLIP.BB_RPN_WEIGHTS ./pretrained_ckpt/rpn/rpn_coco_48.pth \ MODEL.CLIP.TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/coco_48_base_cls_emb.pth \ MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/coco_65_cls_emb.pth \ MODEL.ROI_HEADS.SOFT_NMS_ENABLED True \ # # RN50, COCO (only Novel) # # --config-file ./configs/COCO-InstanceSegmentation/CLIP_fast_rcnn_R_50_C4_ovd_testt.yaml \ # # MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/coco_17_target_cls_emb.pth \ # # RN50, COCO (only Base) # # --config-file ./configs/COCO-InstanceSegmentation/CLIP_fast_rcnn_R_50_C4_ovd_testb.yaml \ # # MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/coco_48_base_cls_emb.pth \ # # RN50, LVIS # python3 ./tools/train_net.py \ # --eval-only \ # --num-gpus 1 \ # --config-file ./configs/LVISv1-InstanceSegmentation/CLIP_fast_rcnn_R_50_C4.yaml \ # MODEL.WEIGHTS ./pretrained_ckpt/regionclip/regionclip_finetuned-lvis_rn50.pth \ # MODEL.CLIP.OFFLINE_RPN_CONFIG ./configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ # MODEL.CLIP.BB_RPN_WEIGHTS ./pretrained_ckpt/rpn/rpn_lvis_866_lsj.pth \ # MODEL.CLIP.TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/lvis_866_base_cls_emb.pth \ # MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/lvis_1203_cls_emb.pth \ # MODEL.CLIP.OFFLINE_RPN_LSJ_PRETRAINED True \ # MODEL.ROI_HEADS.SOFT_NMS_ENABLED True \ # # RN50x4, COCO (Generalized: Novel + Base) # python3 ./tools/train_net.py \ # --eval-only \ # --num-gpus 1 \ # --config-file ./configs/COCO-InstanceSegmentation/CLIP_fast_rcnn_R_50_C4_ovd.yaml \ # MODEL.WEIGHTS ./pretrained_ckpt/regionclip/regionclip_finetuned-coco_rn50x4.pth \ # MODEL.CLIP.OFFLINE_RPN_CONFIG ./configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x_ovd_FSD.yaml \ # MODEL.CLIP.BB_RPN_WEIGHTS ./pretrained_ckpt/rpn/rpn_coco_48.pth \ # MODEL.CLIP.TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/coco_48_base_cls_emb_rn50x4.pth \ # MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/coco_65_cls_emb_rn50x4.pth \ # MODEL.CLIP.TEXT_EMB_DIM 640 \ # MODEL.RESNETS.DEPTH 200 \ # MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION 18 \ # MODEL.ROI_HEADS.SOFT_NMS_ENABLED True \ # # RN50x4, COCO (only Novel) # # --config-file ./configs/COCO-InstanceSegmentation/CLIP_fast_rcnn_R_50_C4_ovd_testt.yaml \ # # MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/coco_17_target_cls_emb_rn50x4.pth \ # # RN50x4, COCO (only Base) # # --config-file ./configs/COCO-InstanceSegmentation/CLIP_fast_rcnn_R_50_C4_ovd_testb.yaml \ # # MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/coco_48_base_cls_emb_rn50x4.pth \ # # RN50x4, LVIS # python3 ./tools/train_net.py \ # --eval-only \ # --num-gpus 1 \ # --config-file ./configs/LVISv1-InstanceSegmentation/CLIP_fast_rcnn_R_50_C4.yaml \ # MODEL.WEIGHTS ./pretrained_ckpt/regionclip/regionclip_finetuned-lvis_rn50x4 \ # MODEL.CLIP.OFFLINE_RPN_CONFIG ./configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ # MODEL.CLIP.BB_RPN_WEIGHTS ./pretrained_ckpt/rpn/rpn_lvis_866_lsj.pth \ # MODEL.CLIP.TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/lvis_866_base_cls_emb_rn50x4.pth \ # MODEL.CLIP.OPENSET_TEST_TEXT_EMB_PATH ./pretrained_ckpt/concept_emb/lvis_1203_cls_emb_rn50x4.pth \ # MODEL.CLIP.OFFLINE_RPN_LSJ_PRETRAINED True \ # MODEL.CLIP.TEXT_EMB_DIM 640 \ # MODEL.RESNETS.DEPTH 200 \ # MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION 18 \ # MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION 18 \ # MODEL.RESNETS.RES2_OUT_CHANNELS 320 \ # MODEL.ROI_HEADS.SOFT_NMS_ENABLED True \