|
--- |
|
library_name: transformers |
|
pipeline_tag: text-generation |
|
inference: true |
|
widget: |
|
- text: Hello! |
|
example_title: Hello world |
|
group: Python |
|
--- |
|
|
|
This model is for debugging. It is randomly initialized using the config from [tiiuae/falcon-mamba-7b](https://huggingface.co/tiiuae/falcon-mamba-7b) but with smaller size. |
|
|
|
Codes: |
|
```python |
|
import os |
|
|
|
import torch |
|
|
|
from huggingface_hub import create_repo, upload_folder |
|
from transformers import ( |
|
AutoModelForCausalLM, |
|
AutoTokenizer, |
|
GenerationConfig, |
|
AutoConfig, |
|
pipeline, |
|
set_seed, |
|
) |
|
|
|
model_id = "tiiuae/falcon-mamba-7b" |
|
repo_id = "yujiepan/falcon-mamba-tiny-random" |
|
save_path = f"/tmp/{repo_id}" |
|
os.system(f'rm -rf {save_path}') |
|
|
|
config = AutoConfig.from_pretrained(model_id) |
|
config.use_cache = True |
|
config.num_hidden_layers = 2 |
|
config.hidden_size = 8 |
|
config.intermediate_size = 16 |
|
config.state_size = 8 |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
|
tokenizer.save_pretrained(save_path) |
|
|
|
model = AutoModelForCausalLM.from_config( |
|
config, torch_dtype=torch.bfloat16, |
|
trust_remote_code=True, |
|
) |
|
model.generation_config = GenerationConfig.from_pretrained( |
|
model_id, |
|
trust_remote_code=True, |
|
) |
|
|
|
set_seed(42) |
|
num_params = 0 |
|
with torch.no_grad(): |
|
for name, p in sorted(model.named_parameters()): |
|
print(name, p.shape) |
|
torch.nn.init.uniform_(p, -0.5, 0.5) |
|
num_params += p.numel() |
|
print("Total number of parameters:", num_params) |
|
model.save_pretrained(save_path) |
|
|
|
pipe = pipeline( |
|
"text-generation", |
|
model=save_path, |
|
device="cpu", |
|
trust_remote_code=True, |
|
max_new_tokens=20, |
|
) |
|
print(pipe("Hello World!")) |
|
|
|
# create_repo(repo_id, exist_ok=True) |
|
# upload_folder(repo_id=repo_id, folder_path=save_path, repo_type='model') |
|
``` |
|
|