vendetta25 commited on
Commit
faf81e1
·
verified ·
1 Parent(s): c6103d4

Rename pentesting_assistant.py to chat_ai.py

Browse files
Files changed (2) hide show
  1. chat_ai.py +40 -0
  2. pentesting_assistant.py +0 -18
chat_ai.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AlbertTokenizer, AlbertForSequenceClassification
2
+ from datasets import load_dataset
3
+ from transformers import Trainer, TrainingArguments
4
+
5
+ # Load dataset (ganti dengan nama dataset dan versi kamu)
6
+ dataset = load_dataset('your_username/your_dataset_name', 'your_dataset_version')
7
+
8
+ # Load tokenizer and model
9
+ tokenizer = AlbertTokenizer.from_pretrained('google/albert-base-v2')
10
+ model = AlbertForSequenceClassification.from_pretrained('google/albert-base-v2')
11
+
12
+ # Define preprocessing function
13
+ def preprocess_function(examples):
14
+ return tokenizer(examples['text'], truncation=True, padding='max_length')
15
+
16
+ # Preprocess dataset
17
+ encoded_dataset = dataset.map(preprocess_function, batched=True)
18
+
19
+ # Define training arguments
20
+ training_args = TrainingArguments(
21
+ output_dir='./results',
22
+ per_device_train_batch_size=8,
23
+ num_train_epochs=3,
24
+ evaluation_strategy="epoch",
25
+ save_strategy="epoch",
26
+ )
27
+
28
+ # Create trainer
29
+ trainer = Trainer(
30
+ model=model,
31
+ args=training_args,
32
+ train_dataset=encoded_dataset['train'],
33
+ eval_dataset=encoded_dataset['validation'],
34
+ )
35
+
36
+ # Train model
37
+ trainer.train()
38
+
39
+ # Save trained model
40
+ trainer.save_model('./your_private_albert_model')
pentesting_assistant.py DELETED
@@ -1,18 +0,0 @@
1
- from transformers import pipeline, AlbertTokenizer, AlbertForSequenceClassification
2
-
3
- # Load model
4
- model = AlbertForSequenceClassification.from_pretrained('./pentesting_albert_model')
5
- tokenizer = AlbertTokenizer.from_pretrained('./pentesting_albert_model')
6
-
7
- # Create text-generation pipeline
8
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
-
10
- while True:
11
- user_input = input("Kamu: ")
12
- if user_input.lower() == "keluar":
13
- break
14
-
15
- # Generate response
16
- response = generator(user_input, max_length=50, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95)
17
-
18
- print("Asisten: ", response[0]['generated_text'])