Files changed (1) hide show
  1. README.md +15 -9
README.md CHANGED
@@ -61,13 +61,21 @@ Please refer to the [github repository](https://github.com/AI4Bharat/IndicTrans2
61
  ```python
62
  import torch
63
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
64
- from IndicTransTokenizer import IndicProcessor
65
-
 
 
66
 
 
67
  model_name = "ai4bharat/indictrans2-en-indic-dist-200M"
68
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
69
 
70
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name, trust_remote_code=True)
 
 
 
 
 
71
 
72
  ip = IndicProcessor(inference=True)
73
 
@@ -78,12 +86,8 @@ input_sentences = [
78
  "My friend has invited me to his birthday party, and I will give him a gift.",
79
  ]
80
 
81
- src_lang, tgt_lang = "eng_Latn", "hin_Deva"
82
-
83
  batch = ip.preprocess_batch(input_sentences, src_lang=src_lang, tgt_lang=tgt_lang)
84
 
85
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
86
-
87
  # Tokenize the sentences and generate input encodings
88
  inputs = tokenizer(
89
  batch,
@@ -120,8 +124,10 @@ for input_sentence, translation in zip(input_sentences, translations):
120
  print(f"{tgt_lang}: {translation}")
121
  ```
122
 
123
- **Note: IndicTrans2 is now compatible with AutoTokenizer, however you need to use IndicProcessor from [IndicTransTokenizer](https://github.com/VarunGumma/IndicTransTokenizer) for preprocessing before tokenization.**
124
-
 
 
125
 
126
 
127
  ### Citation
 
61
  ```python
62
  import torch
63
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
64
+ from IndicTransToolkit import IndicProcessor
65
+ # recommended to run this on a gpu with flash_attn installed
66
+ # don't set attn_implemetation if you don't have flash_attn
67
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
68
 
69
+ src_lang, tgt_lang = "eng_Latn", "hin_Deva"
70
  model_name = "ai4bharat/indictrans2-en-indic-dist-200M"
71
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
72
 
73
+ model = AutoModelForSeq2SeqLM.from_pretrained(
74
+ model_name,
75
+ trust_remote_code=True,
76
+ torch_dtype=torch.float16, # performance might slightly vary for bfloat16
77
+ attn_implementation="flash_attention_2"
78
+ ).to(DEVICE)
79
 
80
  ip = IndicProcessor(inference=True)
81
 
 
86
  "My friend has invited me to his birthday party, and I will give him a gift.",
87
  ]
88
 
 
 
89
  batch = ip.preprocess_batch(input_sentences, src_lang=src_lang, tgt_lang=tgt_lang)
90
 
 
 
91
  # Tokenize the sentences and generate input encodings
92
  inputs = tokenizer(
93
  batch,
 
124
  print(f"{tgt_lang}: {translation}")
125
  ```
126
 
127
+ ### 📢 Long Context IT2 Models
128
+ - New RoPE based IndicTrans2 models which are capable of handling sequence lengths **upto 2048 tokens** are available [here](https://huggingface.co/collections/prajdabre/indictrans2-rope-6742ddac669a05db0804db35)
129
+ - These models can be used by just changing the `model_name` parameter. Please read the model card of the RoPE-IT2 models for more information about the generation.
130
+ - It is recommended to run these models with `flash_attention_2` for efficient generation.
131
 
132
 
133
  ### Citation