areeb-h's picture
Update index.js
b431032 verified
raw
history blame
4.32 kB
// import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
// // Since we will download the model from the Hugging Face Hub, we can skip the local model check
// env.allowLocalModels = false;
// // Reference the elements that we will need
// const status = document.getElementById('status');
// const fileUpload = document.getElementById('upload');
// const imageContainer = document.getElementById('container');
// const example = document.getElementById('example');
// const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
// // Create a new object detection pipeline
// status.textContent = 'Loading model...';
// const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
// status.textContent = 'Ready';
// example.addEventListener('click', (e) => {
// e.preventDefault();
// detect(EXAMPLE_URL);
// });
// fileUpload.addEventListener('change', function (e) {
// const file = e.target.files[0];
// if (!file) {
// return;
// }
// const reader = new FileReader();
// // Set up a callback when the file is loaded
// reader.onload = e2 => detect(e2.target.result);
// reader.readAsDataURL(file);
// });
// // Detect objects in the image
// async function detect(img) {
// imageContainer.innerHTML = '';
// imageContainer.style.backgroundImage = `url(${img})`;
// status.textContent = 'Analysing...';
// const output = await detector(img, {
// threshold: 0.5,
// percentage: true,
// });
// status.textContent = '';
// output.forEach(renderBox);
// }
// // Render a bounding box and label on the image
// function renderBox({ box, label }) {
// const { xmax, xmin, ymax, ymin } = box;
// // Generate a random color for the box
// const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
// // Draw the box
// const boxElement = document.createElement('div');
// boxElement.className = 'bounding-box';
// Object.assign(boxElement.style, {
// borderColor: color,
// left: 100 * xmin + '%',
// top: 100 * ymin + '%',
// width: 100 * (xmax - xmin) + '%',
// height: 100 * (ymax - ymin) + '%',
// })
// // Draw label
// const labelElement = document.createElement('span');
// labelElement.textContent = label;
// labelElement.className = 'bounding-box-label';
// labelElement.style.backgroundColor = color;
// boxElement.appendChild(labelElement);
// imageContainer.appendChild(boxElement);
// }
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;
// Reference the elements that we will need
const status = document.getElementById('status');
const userInput = document.getElementById('user-input');
const outputContainer = document.getElementById('output');
const submitButton = document.getElementById('submit-button');
// Load the text-generation pipeline
status.textContent = 'Loading model...';
const generator = await pipeline('text-generation', 'meta-llama/Llama-2-7b-hf');
status.textContent = 'Model loaded. Ready to chat!';
// Add event listener to the submit button
submitButton.addEventListener('click', async () => {
const inputText = userInput.value.trim();
if (!inputText) {
outputContainer.innerText = 'Please enter a prompt.';
return;
}
// Update status to show the user we're processing
status.textContent = 'Generating response...';
try {
// Generate text from the user input
const response = await generator(inputText, {
max_new_tokens: 100,
temperature: 0.7, // Controls randomness; lower = more deterministic
top_p: 0.95, // Nucleus sampling
});
// Display the generated response
outputContainer.innerText = response[0].generated_text;
} catch (error) {
console.error(error);
outputContainer.innerText = 'Error generating response. Please try again.';
}
// Reset the status
status.textContent = 'Model loaded. Ready to chat!';
});