minko186 commited on
Commit
708f094
·
1 Parent(s): da88846

format updates + search added to RAG instead

Browse files
Files changed (3) hide show
  1. ai_generate.py +103 -129
  2. app.py +46 -28
  3. plagiarism.py → google_search.py +0 -0
ai_generate.py CHANGED
@@ -11,14 +11,16 @@ import google.generativeai as genai
11
  import anthropic
12
  from langchain_community.document_loaders import PyMuPDFLoader
13
  from langchain_community.document_loaders import TextLoader
 
14
  from langchain_community.embeddings.sentence_transformer import (
15
  SentenceTransformerEmbeddings,
16
  )
 
17
  from langchain_community.vectorstores import Chroma
18
  from langchain_text_splitters import CharacterTextSplitter
19
  from langchain import hub
20
  from langchain_core.output_parsers import StrOutputParser
21
- from langchain_core.runnables import RunnablePassthrough
22
  from langchain.chains import RetrievalQA
23
  from langchain_groq import ChatGroq
24
  from langchain_openai import ChatOpenAI
@@ -46,147 +48,119 @@ claude_client = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
46
  temperature = 1.0
47
  max_tokens = 2048
48
 
49
- rag_llms = {
50
- "LLaMA 3": ChatGroq(
51
- temperature=temperature,
52
- max_tokens=max_tokens,
53
- model_name="llama3-70b-8192",
54
- ),
55
- "OpenAI GPT 4o Mini": ChatOpenAI(
56
- temperature=temperature,
57
- max_tokens=max_tokens,
58
- model_name="gpt-4o-mini",
59
- ),
60
- "OpenAI GPT 4o": ChatOpenAI(
61
- temperature=temperature,
62
- max_tokens=max_tokens,
63
- model_name="gpt-4o",
64
- ),
65
- "OpenAI GPT 4": ChatOpenAI(
66
- temperature=temperature,
67
- max_tokens=max_tokens,
68
- model_name="gpt-4-turbo",
69
- ),
70
- "Gemini 1.5 Pro": ChatGoogleGenerativeAI(temperature=temperature, max_tokens=max_tokens, model="gemini-1.5-pro"),
71
- "Claude Sonnet 3.5": ChatAnthropic(
72
- temperature=temperature,
73
- max_tokens=max_tokens,
74
- model_name="claude-3-5-sonnet-20240620",
75
- ),
76
  }
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
- def create_db_with_langchain(path):
80
  all_docs = []
81
- for file in path:
82
- loader = PyMuPDFLoader(file)
83
- data = loader.load()
84
- # split it into chunks
85
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
86
- docs = text_splitter.split_documents(data)
87
- all_docs.extend(docs)
88
-
89
- # create the open-source embedding function
90
  embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
91
-
92
- # load it into Chroma
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  db = Chroma.from_documents(all_docs, embedding_function)
94
  return db
95
 
96
 
97
- def generate_rag(text, model, path):
98
- print(f"Generating text using RAG for {model}...")
99
- llm = rag_llms[model]
100
- db = create_db_with_langchain(path)
 
 
 
 
 
 
 
 
 
 
 
 
101
  retriever = db.as_retriever(search_type="mmr", search_kwargs={"k": 4, "fetch_k": 20})
102
- prompt = hub.pull("rlm/rag-prompt")
103
 
104
  def format_docs(docs):
105
  return "\n\n".join(doc.page_content for doc in docs)
106
 
107
- rag_chain = {"context": retriever | format_docs, "question": RunnablePassthrough()} | prompt | llm
108
- return rag_chain.invoke(text).content
109
-
110
-
111
- def generate_groq(text, model):
112
- completion = groq_client.chat.completions.create(
113
- model=model,
114
- messages=[
115
- {"role": "user", "content": text},
116
- {
117
- "role": "assistant",
118
- "content": "Please follow the instruction and write about the given topic in approximately the given number of words",
119
- },
120
- ],
121
- temperature=temperature,
122
- max_tokens=max_tokens,
123
- stream=True,
124
- stop=None,
125
- )
126
- response = ""
127
- for i, chunk in enumerate(completion):
128
- if i != 0:
129
- response += chunk.choices[0].delta.content or ""
130
- return response
131
-
132
-
133
- def generate_openai(text, model, openai_client):
134
- message = [{"role": "user", "content": text}]
135
- response = openai_client.chat.completions.create(
136
- model=model,
137
- messages=message,
138
- temperature=temperature,
139
- max_tokens=max_tokens,
140
- )
141
- return response.choices[0].message.content
142
-
143
-
144
- def generate_gemini(text, model, gemini_client):
145
- safety_settings = {
146
- generative_models.HarmCategory.HARM_CATEGORY_HATE_SPEECH: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
147
- generative_models.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
148
- generative_models.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
149
- generative_models.HarmCategory.HARM_CATEGORY_HARASSMENT: generative_models.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
150
- }
151
- generation_config = {
152
- "max_output_tokens": max_tokens,
153
- "temperature": temperature,
154
- }
155
- response = gemini_client.generate_content(
156
- [text],
157
- generation_config=generation_config,
158
- safety_settings=safety_settings,
159
- stream=False,
160
- )
161
- return response.text
162
-
163
-
164
- def generate_claude(text, model, claude_client):
165
- response = claude_client.messages.create(
166
- model=model,
167
- max_tokens=max_tokens,
168
- temperature=temperature,
169
- system="You are helpful assistant.",
170
- messages=[{"role": "user", "content": [{"type": "text", "text": text}]}],
171
  )
172
- return response.content[0].text.strip()
173
-
174
-
175
- def generate(text, model, path, api=None):
176
- if path:
177
- result = generate_rag(text, model, path)
178
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  else:
180
- print(f"Generating text for {model}...")
181
- if model == "LLaMA 3":
182
- return generate_groq(text, "llama3-70b-8192")
183
- elif model == "OpenAI GPT 4o Mini":
184
- return generate_openai(text, "gpt-4o-mini", openai_client)
185
- elif model == "OpenAI GPT 4o":
186
- return generate_openai(text, "gpt-4o", openai_client)
187
- elif model == "OpenAI GPT 4":
188
- return generate_openai(text, "gpt-4-turbo", openai_client)
189
- elif model == "Gemini 1.5 Pro":
190
- return generate_gemini(text, "", gemini_client)
191
- elif model == "Claude Sonnet 3.5":
192
- return generate_claude(text, "claude-3-5-sonnet-20240620", claude_client)
 
11
  import anthropic
12
  from langchain_community.document_loaders import PyMuPDFLoader
13
  from langchain_community.document_loaders import TextLoader
14
+ from langchain_core.documents import Document
15
  from langchain_community.embeddings.sentence_transformer import (
16
  SentenceTransformerEmbeddings,
17
  )
18
+ from langchain.schema import StrOutputParser
19
  from langchain_community.vectorstores import Chroma
20
  from langchain_text_splitters import CharacterTextSplitter
21
  from langchain import hub
22
  from langchain_core.output_parsers import StrOutputParser
23
+ from langchain_core.runnables import RunnablePassthrough, RunnableMap
24
  from langchain.chains import RetrievalQA
25
  from langchain_groq import ChatGroq
26
  from langchain_openai import ChatOpenAI
 
48
  temperature = 1.0
49
  max_tokens = 2048
50
 
51
+ llm_model_translation = {
52
+ "LLaMA 3": "llama3-70b-8192",
53
+ "OpenAI GPT 4o Mini": "gpt-4o-mini",
54
+ "OpenAI GPT 4o": "gpt-4o",
55
+ "OpenAI GPT 4": "gpt-4-turbo",
56
+ "Gemini 1.5 Pro": "gemini-1.5-pro",
57
+ "Claude Sonnet 3.5": "claude-3-5-sonnet-20240620",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  }
59
 
60
+ llm_classes = {
61
+ "llama3-70b-8192": ChatGroq,
62
+ "gpt-4o-mini": ChatOpenAI,
63
+ "gpt-4o": ChatOpenAI,
64
+ "gpt-4-turbo": ChatOpenAI,
65
+ "gemini-1.5-pro": ChatGoogleGenerativeAI,
66
+ "claude-3-5-sonnet-20240620": ChatAnthropic,
67
+ }
68
+
69
+
70
+ def load_llm(model: str, api_key: str, temperature: float = 1.0, max_length: int = 2048):
71
+ model_name = llm_model_translation.get(model)
72
+ llm_class = llm_classes.get(model_name)
73
+ if not llm_class:
74
+ raise ValueError(f"Model {model} not supported.")
75
+ try:
76
+ llm = llm_class(model_name=model_name, temperature=temperature, max_tokens=max_length)
77
+ except Exception as e:
78
+ print(f"An error occurred: {e}")
79
+ llm = None
80
+ return llm
81
+
82
 
83
+ def create_db_with_langchain(path: list[str], url_content: dict):
84
  all_docs = []
85
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
 
 
 
 
 
 
 
 
86
  embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
87
+ if path:
88
+ for file in path:
89
+ loader = PyMuPDFLoader(file)
90
+ data = loader.load()
91
+ # split it into chunks
92
+ docs = text_splitter.split_documents(data)
93
+ all_docs.extend(docs)
94
+
95
+ if url_content:
96
+ for url, content in url_content.items():
97
+ doc = Document(page_content=content, metadata={"source": url})
98
+ # split it into chunks
99
+ docs = text_splitter.split_documents([doc])
100
+ all_docs.extend(docs)
101
+
102
+ assert len(all_docs) > 0, "No PDFs or scrapped data provided"
103
  db = Chroma.from_documents(all_docs, embedding_function)
104
  return db
105
 
106
 
107
+ def generate_rag(
108
+ prompt: str,
109
+ topic: str,
110
+ model: str,
111
+ url_content: dict,
112
+ path: list[str],
113
+ temperature: float = 1.0,
114
+ max_length: int = 2048,
115
+ api_key: str = "",
116
+ sys_message="",
117
+ ):
118
+ llm = load_llm(model, api_key, temperature, max_length)
119
+ if llm is None:
120
+ print("Failed to load LLM. Aborting operation.")
121
+ return None
122
+ db = create_db_with_langchain(path, url_content)
123
  retriever = db.as_retriever(search_type="mmr", search_kwargs={"k": 4, "fetch_k": 20})
124
+ rag_prompt = hub.pull("rlm/rag-prompt")
125
 
126
  def format_docs(docs):
127
  return "\n\n".join(doc.page_content for doc in docs)
128
 
129
+ docs = retriever.get_relevant_documents(topic)
130
+ formatted_docs = format_docs(docs)
131
+ rag_chain = (
132
+ {"context": lambda _: formatted_docs, "question": RunnablePassthrough()} | rag_prompt | llm | StrOutputParser()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  )
134
+ return rag_chain.invoke(prompt)
135
+
136
+
137
+ def generate_base(
138
+ prompt: str, topic: str, model: str, temperature: float, max_length: int, api_key: str, sys_message=""
139
+ ):
140
+ llm = load_llm(model, api_key, temperature, max_length)
141
+ if llm is None:
142
+ print("Failed to load LLM. Aborting operation.")
143
+ return None
144
+ try:
145
+ output = llm.invoke(prompt).content
146
+ return output
147
+ except Exception as e:
148
+ print(f"An error occurred while running the model: {e}")
149
+ return None
150
+
151
+
152
+ def generate(
153
+ prompt: str,
154
+ topic: str,
155
+ model: str,
156
+ url_content: dict,
157
+ path: list[str],
158
+ temperature: float = 1.0,
159
+ max_length: int = 2048,
160
+ api_key: str = "",
161
+ sys_message="",
162
+ ):
163
+ if path or url_content:
164
+ return generate_rag(prompt, topic, model, url_content, path, temperature, max_length, api_key, sys_message)
165
  else:
166
+ return generate_base(prompt, topic, model, temperature, max_length, api_key, sys_message)
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -16,7 +16,7 @@ from transformers import GPT2LMHeadModel, GPT2TokenizerFast
16
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
17
 
18
  from utils import remove_special_characters
19
- from plagiarism import google_search, months, domain_list, build_date
20
  from humanize import paraphrase_text, device
21
  from ai_generate import generate
22
 
@@ -261,6 +261,8 @@ def generate_prompt(settings: Dict[str, str]) -> str:
261
  prompt = f"""
262
  I am a {settings['role']}
263
  Write a {settings['article_length']} words (around) {settings['format']} on {settings['topic']}.
 
 
264
 
265
  Style and Tone:
266
  - Writing style: {settings['writing_style']}
@@ -282,8 +284,6 @@ def generate_prompt(settings: Dict[str, str]) -> str:
282
  - Add a "References" section in the format "References:" on a new line at the end with at least 3 credible detailed sources, formatted as [1], [2], etc. with each source on their own line
283
  - Do not repeat sources
284
  - Do not make any headline, title bold.
285
-
286
- {settings['sources']}
287
 
288
  Ensure proper paragraph breaks for better readability.
289
  Avoid any references to artificial intelligence, language models, or the fact that this is generated by an AI, and do not mention something like here is the article etc.
@@ -296,15 +296,16 @@ def regenerate_prompt(settings: Dict[str, str]) -> str:
296
  I am a {settings['role']}
297
  "{settings['generated_article']}"
298
  Edit the given text based on user comments.
 
 
299
 
300
- Comments:
301
  - Don't start with "Here is a...", start with the requested text directly
302
- - {settings['user_comments']}
303
  - The original content should not be changed. Make minor modifications based on user comments above.
304
  - Keep the references the same as the given text in the same format.
305
  - Do not make any headline, title bold.
306
-
307
- {settings['sources']}
308
 
309
  Ensure proper paragraph breaks for better readability.
310
  Avoid any references to artificial intelligence, language models, or the fact that this is generated by an AI, and do not mention something like here is the article etc.
@@ -315,6 +316,7 @@ def regenerate_prompt(settings: Dict[str, str]) -> str:
315
  def generate_article(
316
  input_role: str,
317
  topic: str,
 
318
  keywords: str,
319
  article_length: str,
320
  format: str,
@@ -328,14 +330,16 @@ def generate_article(
328
  conclusion_type: str,
329
  ai_model: str,
330
  content_string: str,
 
331
  # api_key: str = None,
332
- pdf_file_input=None,
333
  generated_article: str = None,
334
  user_comments: str = None,
335
  ) -> str:
336
  settings = {
337
  "role": input_role,
338
  "topic": topic,
 
339
  "keywords": [k.strip() for k in keywords.split(",")],
340
  "article_length": article_length,
341
  "format": format,
@@ -360,7 +364,9 @@ def generate_article(
360
  print("Generated Prompt...\n", prompt)
361
  article = generate(
362
  prompt,
 
363
  ai_model,
 
364
  pdf_file_input, # api_key
365
  )
366
 
@@ -399,6 +405,7 @@ def update_visibility_api(model: str):
399
  def generate_and_format(
400
  input_role,
401
  topic,
 
402
  keywords,
403
  article_length,
404
  format,
@@ -410,7 +417,7 @@ def generate_and_format(
410
  references,
411
  num_examples,
412
  conclusion_type,
413
- ai_model,
414
  # api_key,
415
  google_search_check,
416
  year_from,
@@ -428,6 +435,7 @@ def generate_and_format(
428
  ):
429
  content_string = ""
430
  url_content = None
 
431
  if google_search_check:
432
  date_from = build_date(year_from, month_from, day_from)
433
  date_to = build_date(year_to, month_to, day_to)
@@ -450,6 +458,7 @@ def generate_and_format(
450
  article = generate_article(
451
  input_role,
452
  topic,
 
453
  keywords,
454
  article_length,
455
  format,
@@ -463,6 +472,7 @@ def generate_and_format(
463
  conclusion_type,
464
  ai_model,
465
  content_string,
 
466
  # api_key,
467
  pdf_file_input,
468
  generated_article,
@@ -500,6 +510,11 @@ def create_interface():
500
  placeholder="Enter the main topic of your article",
501
  elem_classes="input-highlight-pink",
502
  )
 
 
 
 
 
503
  input_keywords = gr.Textbox(
504
  label="Keywords",
505
  placeholder="Enter comma-separated keywords",
@@ -667,23 +682,24 @@ def create_interface():
667
  gr.Markdown("# Add Optional PDF Files with Information", elem_classes="text-center text-3xl mb-6")
668
  pdf_file_input = gr.File(label="Upload PDF(s)", file_count="multiple", file_types=[".pdf"])
669
 
670
- with gr.Group():
671
- gr.Markdown("## AI Model Configuration", elem_classes="text-xl mb-4")
672
- ai_generator = gr.Dropdown(
673
- choices=[
674
- "OpenAI GPT 4",
675
- "OpenAI GPT 4o",
676
- "OpenAI GPT 4o Mini",
677
- "Claude Sonnet 3.5",
678
- "Gemini 1.5 Pro",
679
- "LLaMA 3",
680
- ],
681
- value="OpenAI GPT 4o Mini",
682
- label="AI Model",
683
- elem_classes="input-highlight-pink",
684
- )
685
- # input_api = gr.Textbox(label="API Key", visible=False)
686
- # ai_generator.change(update_visibility_api, ai_generator, input_api)
 
687
 
688
  generate_btn = gr.Button("Generate Article", variant="primary")
689
 
@@ -762,6 +778,7 @@ def create_interface():
762
  inputs=[
763
  input_role,
764
  input_topic,
 
765
  input_keywords,
766
  input_length,
767
  input_format,
@@ -773,7 +790,7 @@ def create_interface():
773
  input_references,
774
  input_num_examples,
775
  input_conclusion,
776
- ai_generator,
777
  # input_api,
778
  google_search_check,
779
  year_from,
@@ -795,6 +812,7 @@ def create_interface():
795
  inputs=[
796
  input_role,
797
  input_topic,
 
798
  input_keywords,
799
  input_length,
800
  input_format,
@@ -806,7 +824,7 @@ def create_interface():
806
  input_references,
807
  input_num_examples,
808
  input_conclusion,
809
- ai_generator,
810
  # input_api,
811
  google_search_check,
812
  year_from,
 
16
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
17
 
18
  from utils import remove_special_characters
19
+ from google_search import google_search, months, domain_list, build_date
20
  from humanize import paraphrase_text, device
21
  from ai_generate import generate
22
 
 
261
  prompt = f"""
262
  I am a {settings['role']}
263
  Write a {settings['article_length']} words (around) {settings['format']} on {settings['topic']}.
264
+ Context:
265
+ - {settings['context']}
266
 
267
  Style and Tone:
268
  - Writing style: {settings['writing_style']}
 
284
  - Add a "References" section in the format "References:" on a new line at the end with at least 3 credible detailed sources, formatted as [1], [2], etc. with each source on their own line
285
  - Do not repeat sources
286
  - Do not make any headline, title bold.
 
 
287
 
288
  Ensure proper paragraph breaks for better readability.
289
  Avoid any references to artificial intelligence, language models, or the fact that this is generated by an AI, and do not mention something like here is the article etc.
 
296
  I am a {settings['role']}
297
  "{settings['generated_article']}"
298
  Edit the given text based on user comments.
299
+ User Comments:
300
+ - {settings['user_comments']}
301
 
302
+ Requirements:
303
  - Don't start with "Here is a...", start with the requested text directly
 
304
  - The original content should not be changed. Make minor modifications based on user comments above.
305
  - Keep the references the same as the given text in the same format.
306
  - Do not make any headline, title bold.
307
+ Context:
308
+ - {settings['context']}
309
 
310
  Ensure proper paragraph breaks for better readability.
311
  Avoid any references to artificial intelligence, language models, or the fact that this is generated by an AI, and do not mention something like here is the article etc.
 
316
  def generate_article(
317
  input_role: str,
318
  topic: str,
319
+ context: str,
320
  keywords: str,
321
  article_length: str,
322
  format: str,
 
330
  conclusion_type: str,
331
  ai_model: str,
332
  content_string: str,
333
+ url_content: str = None,
334
  # api_key: str = None,
335
+ pdf_file_input: list[str] = None,
336
  generated_article: str = None,
337
  user_comments: str = None,
338
  ) -> str:
339
  settings = {
340
  "role": input_role,
341
  "topic": topic,
342
+ "context": context,
343
  "keywords": [k.strip() for k in keywords.split(",")],
344
  "article_length": article_length,
345
  "format": format,
 
364
  print("Generated Prompt...\n", prompt)
365
  article = generate(
366
  prompt,
367
+ topic,
368
  ai_model,
369
+ url_content,
370
  pdf_file_input, # api_key
371
  )
372
 
 
405
  def generate_and_format(
406
  input_role,
407
  topic,
408
+ context,
409
  keywords,
410
  article_length,
411
  format,
 
417
  references,
418
  num_examples,
419
  conclusion_type,
420
+ # ai_model,
421
  # api_key,
422
  google_search_check,
423
  year_from,
 
435
  ):
436
  content_string = ""
437
  url_content = None
438
+ ai_model = "Claude Sonnet 3.5"
439
  if google_search_check:
440
  date_from = build_date(year_from, month_from, day_from)
441
  date_to = build_date(year_to, month_to, day_to)
 
458
  article = generate_article(
459
  input_role,
460
  topic,
461
+ context,
462
  keywords,
463
  article_length,
464
  format,
 
472
  conclusion_type,
473
  ai_model,
474
  content_string,
475
+ url_content,
476
  # api_key,
477
  pdf_file_input,
478
  generated_article,
 
510
  placeholder="Enter the main topic of your article",
511
  elem_classes="input-highlight-pink",
512
  )
513
+ input_context = gr.Textbox(
514
+ label="Context",
515
+ placeholder="Provide some context for your topic",
516
+ elem_classes="input-highlight-pink",
517
+ )
518
  input_keywords = gr.Textbox(
519
  label="Keywords",
520
  placeholder="Enter comma-separated keywords",
 
682
  gr.Markdown("# Add Optional PDF Files with Information", elem_classes="text-center text-3xl mb-6")
683
  pdf_file_input = gr.File(label="Upload PDF(s)", file_count="multiple", file_types=[".pdf"])
684
 
685
+ # HIDE AI MODEL SELECTION
686
+ # with gr.Group():
687
+ # gr.Markdown("## AI Model Configuration", elem_classes="text-xl mb-4")
688
+ # ai_generator = gr.Dropdown(
689
+ # choices=[
690
+ # "OpenAI GPT 4",
691
+ # "OpenAI GPT 4o",
692
+ # "OpenAI GPT 4o Mini",
693
+ # "Claude Sonnet 3.5",
694
+ # "Gemini 1.5 Pro",
695
+ # "LLaMA 3",
696
+ # ],
697
+ # value="OpenAI GPT 4o Mini",
698
+ # label="AI Model",
699
+ # elem_classes="input-highlight-pink",
700
+ # )
701
+ # input_api = gr.Textbox(label="API Key", visible=False)
702
+ # ai_generator.change(update_visibility_api, ai_generator, input_api)
703
 
704
  generate_btn = gr.Button("Generate Article", variant="primary")
705
 
 
778
  inputs=[
779
  input_role,
780
  input_topic,
781
+ input_context,
782
  input_keywords,
783
  input_length,
784
  input_format,
 
790
  input_references,
791
  input_num_examples,
792
  input_conclusion,
793
+ # ai_generator,
794
  # input_api,
795
  google_search_check,
796
  year_from,
 
812
  inputs=[
813
  input_role,
814
  input_topic,
815
+ input_context,
816
  input_keywords,
817
  input_length,
818
  input_format,
 
824
  input_references,
825
  input_num_examples,
826
  input_conclusion,
827
+ # ai_generator,
828
  # input_api,
829
  google_search_check,
830
  year_from,
plagiarism.py → google_search.py RENAMED
File without changes