smakamali commited on
Commit
f97caf8
·
1 Parent(s): 1810f49

update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -57,12 +57,12 @@ def summarize_text(title,text):
57
  from transformers import BitsAndBytesConfig
58
  from transformers import AutoTokenizer, AutoModelForCausalLM
59
 
60
- quantization_config = BitsAndBytesConfig(
61
- load_in_4bit=True,
62
- bnb_4bit_compute_dtype=torch.float16,
63
- bnb_4bit_quant_type="nf4",
64
- bnb_4bit_use_double_quant=True,
65
- )
66
 
67
  # model = "nomic-ai/gpt4all-falcon"
68
  model = "tiiuae/falcon-7b-instruct"
@@ -70,7 +70,7 @@ def summarize_text(title,text):
70
  tokenizer = AutoTokenizer.from_pretrained(model,trust_remote_code=True,)
71
  model = AutoModelForCausalLM.from_pretrained(model,
72
  # trust_remote_code=True,
73
- quantization_config=quantization_config,
74
  )
75
 
76
  from langchain import HuggingFacePipeline
 
57
  from transformers import BitsAndBytesConfig
58
  from transformers import AutoTokenizer, AutoModelForCausalLM
59
 
60
+ # quantization_config = BitsAndBytesConfig(
61
+ # load_in_4bit=True,
62
+ # bnb_4bit_compute_dtype=torch.float16,
63
+ # bnb_4bit_quant_type="nf4",
64
+ # bnb_4bit_use_double_quant=True,
65
+ # )
66
 
67
  # model = "nomic-ai/gpt4all-falcon"
68
  model = "tiiuae/falcon-7b-instruct"
 
70
  tokenizer = AutoTokenizer.from_pretrained(model,trust_remote_code=True,)
71
  model = AutoModelForCausalLM.from_pretrained(model,
72
  # trust_remote_code=True,
73
+ # quantization_config=quantization_config,
74
  )
75
 
76
  from langchain import HuggingFacePipeline