Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -18,13 +18,13 @@ from typing import Tuple
|
|
18 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
19 |
import paramiko
|
20 |
|
21 |
-
torch.backends.cuda.matmul.allow_tf32 =
|
22 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
23 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
24 |
-
torch.backends.cudnn.allow_tf32 =
|
25 |
torch.backends.cudnn.deterministic = False
|
26 |
torch.backends.cudnn.benchmark = False
|
27 |
-
torch.set_float32_matmul_precision("
|
28 |
|
29 |
FTP_HOST = "1ink.us"
|
30 |
FTP_USER = "ford442"
|
|
|
18 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
19 |
import paramiko
|
20 |
|
21 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
22 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
23 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
24 |
+
torch.backends.cudnn.allow_tf32 = True
|
25 |
torch.backends.cudnn.deterministic = False
|
26 |
torch.backends.cudnn.benchmark = False
|
27 |
+
torch.set_float32_matmul_precision("medium")
|
28 |
|
29 |
FTP_HOST = "1ink.us"
|
30 |
FTP_USER = "ford442"
|