ljy266987 commited on
Commit
852b612
·
1 Parent(s): 2cf54bf
Files changed (1) hide show
  1. app.py +21 -25
app.py CHANGED
@@ -29,20 +29,28 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
29
 
30
  ##
31
  # 检查CUDA是否可用
32
- os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
33
- if torch.cuda.is_available():
34
- print("CUDA is available. Listing available GPUs:")
35
- # 获取并打印GPU数量
36
- num_gpus = torch.cuda.device_count()
37
- for i in range(num_gpus):
38
- print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
39
- # 其他相关信息,例如内存
40
- print(f" Memory Allocated: {torch.cuda.memory_allocated(i) / 1024 ** 2:.0f} MB")
41
- print(f" Memory Reserved: {torch.cuda.memory_reserved(i) / 1024 ** 2:.0f} MB")
42
- else:
43
- print("CUDA is not available.")
44
 
45
  ##
 
 
 
 
 
 
 
 
46
 
47
 
48
  if not torch.cuda.is_available():
@@ -67,19 +75,7 @@ def generate(
67
  repetition_penalty: float = 1.2,
68
  ) -> Iterator[str]:
69
 
70
- ## check inside function
71
- # 检查CUDA是否可用
72
- if torch.cuda.is_available():
73
- print("check inside function: CUDA is available. Listing available GPUs:")
74
- # 获取并打印GPU数量
75
- num_gpus = torch.cuda.device_count()
76
- for i in range(num_gpus):
77
- print(f"check inside function: GPU {i}: {torch.cuda.get_device_name(i)}")
78
- # 其他相关信息,例如内存
79
- print(f"check inside function: Memory Allocated: {torch.cuda.memory_allocated(i) / 1024 ** 2:.0f} MB")
80
- print(f"check inside function: Memory Reserved: {torch.cuda.memory_reserved(i) / 1024 ** 2:.0f} MB")
81
- else:
82
- print("check inside function: CUDA is not available.")
83
 
84
  conversation = []
85
  if system_prompt:
 
29
 
30
  ##
31
  # 检查CUDA是否可用
32
+ def print_gpu():
33
+ if torch.cuda.is_available():
34
+ print("CUDA is available. Listing available GPUs:")
35
+ # 获取并打印GPU数量
36
+ num_gpus = torch.cuda.device_count()
37
+ for i in range(num_gpus):
38
+ print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
39
+ # 其他相关信息,例如内存
40
+ print(f" Memory Allocated: {torch.cuda.memory_allocated(i) / 1024 ** 2:.0f} MB")
41
+ print(f" Memory Reserved: {torch.cuda.memory_reserved(i) / 1024 ** 2:.0f} MB")
42
+ else:
43
+ print("CUDA is not available.")
44
 
45
  ##
46
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
47
+ print_gpu()
48
+
49
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3,4,5,6,7,8'
50
+ print_gpu()
51
+
52
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14'
53
+ print_gpu()
54
 
55
 
56
  if not torch.cuda.is_available():
 
75
  repetition_penalty: float = 1.2,
76
  ) -> Iterator[str]:
77
 
78
+ print_gpu()
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  conversation = []
81
  if system_prompt: