abhilashnl2006 commited on
Commit
0dc6a26
·
verified ·
1 Parent(s): e8bd372

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -45
app.py CHANGED
@@ -1,72 +1,64 @@
1
  import os
2
  import gradio as gr
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
- import torch
5
  import logging
6
 
7
  logging.basicConfig(level=logging.INFO)
8
  logger = logging.getLogger(__name__)
9
 
10
- # Change to a more suitable model for text generation
11
- model_name = "meta-llama/Llama-3.2-1B" # Ensure this model is available for your use
12
-
13
- # In Hugging Face Spaces, the token is usually available as an environment variable
14
  hf_token = os.environ.get("HUGGINGFACE_TOKEN")
15
  if not hf_token:
16
- logger.warning("HUGGINGFACE_TOKEN environment variable is not set. Some features may not work.")
 
17
 
18
- try:
19
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
20
- model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=hf_token)
21
- generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
22
- logger.info(f"Successfully loaded model and tokenizer: {model_name}")
23
- except Exception as e:
24
- logger.error(f"Failed to load model or tokenizer: {type(e).__name__}: {str(e)}")
25
- # Fallback to a smaller, open-access model if the specified model fails to load
26
- model_name = "distilgpt2"
27
- tokenizer = AutoTokenizer.from_pretrained(model_name)
28
- model = AutoModelForCausalLM.from_pretrained(model_name)
29
- generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
30
- logger.info(f"Fallback: Loaded model and tokenizer: {model_name}")
31
 
32
  def generate_text(prompt):
33
  try:
34
  logger.info(f"Attempting to generate text for prompt: {prompt[:50]}...")
35
 
36
- response = generator(prompt, max_length=1000, num_return_sequences=1, do_sample=True, temperature=0.7)
37
-
38
- generated_text = response[0]['generated_text']
 
 
 
 
 
39
 
40
- logger.info(f"Generated text: {generated_text[:100]}...")
41
- return generated_text
42
  except Exception as e:
43
  logger.error(f"Error in generate_text: {type(e).__name__}: {str(e)}")
44
  return f"An error occurred: {type(e).__name__}: {str(e)}"
45
 
46
  def generate_email(industry, recipient_role, company_details):
47
  try:
48
- prompt = f"""Generate a professional cold outreach email using the following details:
49
- Industry: {industry}
50
- Recipient Role: {recipient_role}
51
- Company Details: {company_details}
52
- Format the email as follows:
53
- Subject: [Insert catchy subject line related to the industry and recipient role]
54
- Dear [Recipient's Name],
55
- [Introduction paragraph: Briefly introduce yourself and your company]
56
- [Value proposition paragraph: Explain how your company can benefit the recipient, citing specific details from the company information]
57
- [Call to action paragraph: Suggest a meeting or call to discuss further]
58
- [Closing paragraph: Thank the recipient and provide your contact information]
59
- Best regards,
60
- [Your Name]
61
- [Your Title]
62
- [Your Company]
63
- Email:
64
- """
 
 
65
 
66
  generated_text = generate_text(prompt)
67
 
68
- # Remove the prompt from the generated text
69
- email_content = generated_text[generated_text.find("Subject:"):].strip()
70
 
71
  logger.info(f"Generated email for {industry}, {recipient_role}")
72
  return email_content
@@ -76,7 +68,7 @@ def generate_email(industry, recipient_role, company_details):
76
 
77
  def test_model_connection():
78
  try:
79
- test_prompt = "Generate a short, coherent paragraph about artificial intelligence:"
80
  response = generate_text(test_prompt)
81
  logger.info(f"Test model connection successful. Response: {response}")
82
  return "Model connection test successful. Response: " + response
 
1
  import os
2
  import gradio as gr
3
+ from huggingface_hub import InferenceClient
 
4
  import logging
5
 
6
  logging.basicConfig(level=logging.INFO)
7
  logger = logging.getLogger(__name__)
8
 
9
+ model_name = "meta-llama/Llama-3.2-1B"
 
 
 
10
  hf_token = os.environ.get("HUGGINGFACE_TOKEN")
11
  if not hf_token:
12
+ logger.error("HUGGINGFACE_TOKEN environment variable is not set")
13
+ raise ValueError("HUGGINGFACE_TOKEN environment variable is not set")
14
 
15
+ client = InferenceClient(model=model_name, token=hf_token)
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  def generate_text(prompt):
18
  try:
19
  logger.info(f"Attempting to generate text for prompt: {prompt[:50]}...")
20
 
21
+ response = client.text_generation(
22
+ prompt,
23
+ max_new_tokens=500,
24
+ temperature=0.7,
25
+ top_k=50,
26
+ top_p=0.95,
27
+ do_sample=True
28
+ )
29
 
30
+ logger.info(f"Generated text: {response[:100]}...")
31
+ return response
32
  except Exception as e:
33
  logger.error(f"Error in generate_text: {type(e).__name__}: {str(e)}")
34
  return f"An error occurred: {type(e).__name__}: {str(e)}"
35
 
36
  def generate_email(industry, recipient_role, company_details):
37
  try:
38
+ prompt = f"""Task: Generate a professional cold outreach email.
39
+
40
+ Context:
41
+ - Industry: {industry}
42
+ - Recipient Role: {recipient_role}
43
+ - Company Details: {company_details}
44
+
45
+ Instructions:
46
+ 1. Create a catchy subject line related to the industry and recipient role.
47
+ 2. Write a personalized greeting.
48
+ 3. Introduce yourself and your company briefly.
49
+ 4. Explain how your company can benefit the recipient, using specific details from the company information.
50
+ 5. Suggest a meeting or call to discuss further.
51
+ 6. Thank the recipient and provide your contact information.
52
+ 7. Use a professional closing.
53
+
54
+ Now, write the email following these instructions. Be creative and specific, don't use placeholder text:
55
+
56
+ """
57
 
58
  generated_text = generate_text(prompt)
59
 
60
+ # Remove any remaining prompt text if present
61
+ email_content = generated_text.split("Now, write the email following these instructions.")[-1].strip()
62
 
63
  logger.info(f"Generated email for {industry}, {recipient_role}")
64
  return email_content
 
68
 
69
  def test_model_connection():
70
  try:
71
+ test_prompt = "Write a short paragraph about the importance of AI in modern business:"
72
  response = generate_text(test_prompt)
73
  logger.info(f"Test model connection successful. Response: {response}")
74
  return "Model connection test successful. Response: " + response