zfzhang-thu commited on
Commit
ac1f6ae
·
1 Parent(s): dbdd374

try cpu inference

Browse files
Files changed (2) hide show
  1. leo/inference.py +2 -2
  2. leo/model.py +1 -1
leo/inference.py CHANGED
@@ -166,8 +166,8 @@ def form_batch(data_dict):
166
 
167
 
168
  def inference(scan_id, task, predict_mode=False):
169
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
170
- # device = 'cpu' # ok for predict_mode=False, and both for Gradio demo local preview
171
 
172
  data_dict = load_data(scan_id)
173
  data_dict.update(get_lang(task))
 
166
 
167
 
168
  def inference(scan_id, task, predict_mode=False):
169
+ # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
170
+ device = 'cpu' # ok for predict_mode=False, and both for Gradio demo local preview
171
 
172
  data_dict = load_data(scan_id)
173
  data_dict.update(get_lang(task))
leo/model.py CHANGED
@@ -14,7 +14,7 @@ from leo.grounding_head import SequentialGroundHead
14
  from leo.utils import get_mlp_head
15
 
16
 
17
- def maybe_autocast(model, dtype='bf16', enabled=True): ### not-half mode
18
  # if on cpu, don't use autocast
19
  # if on gpu, use autocast with dtype if provided, otherwise use torch.float16
20
  enable_autocast = model.device != torch.device('cpu')
 
14
  from leo.utils import get_mlp_head
15
 
16
 
17
+ def maybe_autocast(model, dtype='float32', enabled=True): ### not-half mode
18
  # if on cpu, don't use autocast
19
  # if on gpu, use autocast with dtype if provided, otherwise use torch.float16
20
  enable_autocast = model.device != torch.device('cpu')