WebashalarForML commited on
Commit
5c3e86a
·
verified ·
1 Parent(s): 08f219c

Update utility/utils.py

Browse files
Files changed (1) hide show
  1. utility/utils.py +4 -3
utility/utils.py CHANGED
@@ -173,7 +173,7 @@ def extract_text_from_images(image_paths):
173
  # Function to call the Gemma model and process the output as Json
174
  def Data_Extractor(data, client=client):
175
  text = f'''Act as a Text extractor for the following text given in text: {data}
176
- extract text in the following output JSON string:
177
  {{
178
  "Name": ["Identify and Extract All the person's name from the text."],
179
  "Designation": ["Extract All the designation or job title mentioned in the text."],
@@ -182,8 +182,9 @@ def Data_Extractor(data, client=client):
182
  "Address": ["Extract All the full postal address or location mentioned in the text."],
183
  "Email": ["Identify and Extract All valid email addresses mentioned in the text else 'Not found'."],
184
  "Link": ["Identify and Extract any website URLs or social media links present in the text."]
185
- }}
186
- Output:
 
187
  '''
188
  # Call the API for inference
189
  response = client.text_generation(text, max_new_tokens=1000, temperature=0.4, top_k=50, top_p=0.9, repetition_penalty=1.2)
 
173
  # Function to call the Gemma model and process the output as Json
174
  def Data_Extractor(data, client=client):
175
  text = f'''Act as a Text extractor for the following text given in text: {data}
176
+ Extract text in the following output JSON string:
177
  {{
178
  "Name": ["Identify and Extract All the person's name from the text."],
179
  "Designation": ["Extract All the designation or job title mentioned in the text."],
 
182
  "Address": ["Extract All the full postal address or location mentioned in the text."],
183
  "Email": ["Identify and Extract All valid email addresses mentioned in the text else 'Not found'."],
184
  "Link": ["Identify and Extract any website URLs or social media links present in the text."]
185
+ }}
186
+
187
+ Output:
188
  '''
189
  # Call the API for inference
190
  response = client.text_generation(text, max_new_tokens=1000, temperature=0.4, top_k=50, top_p=0.9, repetition_penalty=1.2)