BoyuNLP commited on
Commit
1486763
Β·
verified Β·
1 Parent(s): ecbfda9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -22,13 +22,13 @@ MAX_PIXELS = 1806336
22
  # https://huggingface.co/osunlp/UGround-V1-2B
23
  model_repo = "osunlp/UGround-V1-2B"
24
  destination_folder = "./UGround-V1-2B"
25
- #
26
  # # Ensure the destination folder exists
27
  # os.makedirs(destination_folder, exist_ok=True)
28
- #
29
  # # List all files in the repository
30
  # files = list_repo_files(repo_id=model_repo)
31
- #
32
  # # Download each file to the destination folder
33
  # for file in files:
34
  # file_path = hf_hub_download(repo_id=model_repo, filename=file, local_dir=destination_folder)
@@ -37,7 +37,7 @@ destination_folder = "./UGround-V1-2B"
37
  model = Qwen2VLForConditionalGeneration.from_pretrained(
38
  model_repo,
39
  torch_dtype=torch.bfloat16,
40
- device_map="cpu",
41
  )
42
 
43
  # Load the processor
@@ -112,7 +112,7 @@ def run_showui(image, query, session_id, iterations=1):
112
  ]
113
 
114
  global model
115
- model = model.to("cuda")
116
 
117
  text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
118
  image_inputs, video_inputs = process_vision_info(messages)
 
22
  # https://huggingface.co/osunlp/UGround-V1-2B
23
  model_repo = "osunlp/UGround-V1-2B"
24
  destination_folder = "./UGround-V1-2B"
25
+ #
26
  # # Ensure the destination folder exists
27
  # os.makedirs(destination_folder, exist_ok=True)
28
+ #
29
  # # List all files in the repository
30
  # files = list_repo_files(repo_id=model_repo)
31
+ #
32
  # # Download each file to the destination folder
33
  # for file in files:
34
  # file_path = hf_hub_download(repo_id=model_repo, filename=file, local_dir=destination_folder)
 
37
  model = Qwen2VLForConditionalGeneration.from_pretrained(
38
  model_repo,
39
  torch_dtype=torch.bfloat16,
40
+ device_map="cuda",
41
  )
42
 
43
  # Load the processor
 
112
  ]
113
 
114
  global model
115
+ # model = model.to("cuda")
116
 
117
  text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
118
  image_inputs, video_inputs = process_vision_info(messages)