areeb-h commited on
Commit
f8098be
·
verified ·
1 Parent(s): 2cb6772

Update index.js

Browse files
Files changed (1) hide show
  1. index.js +27 -46
index.js CHANGED
@@ -1,64 +1,45 @@
1
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
2
 
3
- // Disable local models
4
  env.allowLocalModels = false;
5
 
6
- // Reference the elements
7
  const status = document.getElementById('status');
8
  const userInput = document.getElementById('user-input');
9
  const outputContainer = document.getElementById('output');
10
  const submitButton = document.getElementById('submit-button');
11
 
12
- // Function to initialize and load the text-generation pipeline
13
- async function loadModel() {
14
- try {
15
- status.textContent = 'Loading model...';
16
- const generator = await pipeline('text-generation', 'EleutherAI/gpt-neo-125M');
17
- status.textContent = 'Model loaded. Ready to chat!';
18
- return generator;
19
- } catch (error) {
20
- console.error('Error loading model:', error);
21
- status.textContent = 'Failed to load model. Please try again later.';
22
- throw error; // Stop further execution if model loading fails
23
- }
24
- }
25
 
26
- // Initialize the model
27
  let generator;
28
- loadModel().then((loadedGenerator) => {
29
- generator = loadedGenerator;
 
 
 
 
 
30
 
31
- // Add event listener to the submit button after the model is ready
32
- submitButton.addEventListener('click', async () => {
33
- const inputText = userInput.value.trim();
34
 
35
- if (!inputText) {
36
- outputContainer.innerText = 'Please enter a prompt.';
37
- return;
38
- }
39
 
40
- // Update status to show we're processing
41
- status.textContent = 'Generating response...';
42
 
43
- try {
44
- const response = await generator(inputText, {
45
- max_new_tokens: 100,
46
- temperature: 0.7, // Controls randomness; lower = more deterministic
47
- top_p: 0.95, // Nucleus sampling
48
- });
49
 
50
- // Display the generated response
51
- outputContainer.innerText = response[0].generated_text;
52
- } catch (error) {
53
- console.error('Error generating response:', error);
54
- outputContainer.innerText = 'Error generating response. Please try again.';
55
- }
56
 
57
- // Reset the status
58
- status.textContent = 'Model loaded. Ready to chat!';
59
- });
60
- }).catch((error) => {
61
- console.error('Model initialization failed:', error);
62
- outputContainer.innerText = 'The application cannot proceed because the model failed to load.';
63
  });
64
-
 
1
  import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1';
2
 
 
3
  env.allowLocalModels = false;
4
 
 
5
  const status = document.getElementById('status');
6
  const userInput = document.getElementById('user-input');
7
  const outputContainer = document.getElementById('output');
8
  const submitButton = document.getElementById('submit-button');
9
 
10
+ status.textContent = 'Loading model...';
 
 
 
 
 
 
 
 
 
 
 
 
11
 
 
12
  let generator;
13
+ try {
14
+ generator = await pipeline('text-generation', 'Xenova/gpt2');
15
+ status.textContent = 'Model loaded. Ready to chat!';
16
+ } catch (error) {
17
+ console.error('Error loading model:', error);
18
+ status.textContent = 'Failed to load model. Please try again later.';
19
+ }
20
 
21
+ submitButton.addEventListener('click', async () => {
22
+ const inputText = userInput.value.trim();
 
23
 
24
+ if (!inputText) {
25
+ outputContainer.innerText = 'Please enter a prompt.';
26
+ return;
27
+ }
28
 
29
+ status.textContent = 'Generating response...';
 
30
 
31
+ try {
32
+ const response = await generator(inputText, {
33
+ max_new_tokens: 50,
34
+ temperature: 0.7,
35
+ top_p: 0.95,
36
+ });
37
 
38
+ outputContainer.innerText = response[0].generated_text;
39
+ } catch (error) {
40
+ console.error('Error generating response:', error);
41
+ outputContainer.innerText = 'Error generating response. Please try again.';
42
+ }
 
43
 
44
+ status.textContent = 'Model loaded. Ready to chat!';
 
 
 
 
 
45
  });