thinh111 commited on
Commit
1609f16
Β·
verified Β·
1 Parent(s): 9ade237

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +1 -1
model.py CHANGED
@@ -6,7 +6,7 @@ os.system(str_cmd2)
6
 
7
 
8
  from unsloth import FastLanguageModel
9
- import torch
10
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
 
6
 
7
 
8
  from unsloth import FastLanguageModel
9
+ # import torch
10
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.