import requests import torch import torch.nn.functional as F from transformers import ImageClassificationPipeline, AutoConfig, AutoTokenizer from PIL import Image from io import BytesIO device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_name_or_path = "Akazi/resnet_c_s_redwood_finetuned" config = AutoConfig.from_pretrained(model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) class CustomImageClassificationPipeline(ImageClassificationPipeline): def _sanitize_parameters(self, **pipeline_parameters): return {}, {}, {} def preprocess(self, inputs): if isinstance(inputs, str): if inputs.startswith("http://") or inputs.startswith("https://"): response = requests.get(inputs) image = Image.open(BytesIO(response.content)) else: image = Image.open(inputs) elif isinstance(inputs, bytes): image = Image.open(BytesIO(inputs)) else: image = inputs inputs = tokenizer(image, return_tensors="pt") inputs.to(device) return inputs def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits probabilities = F.softmax(logits, dim=1).detach().cpu().numpy()[0] labels = [config.id2label[i] for i in range(len(probabilities))] outputs = [{"label": label, "score": round(score, 5)} for label, score in zip(labels, probabilities)] return outputs # Create the pipeline pipeline = CustomImageClassificationPipeline(model=model_name_or_path, device=device) # Example usage image_path = "path/to/your/image.jpg" results = pipeline(image_path) # Print the results for result in results: print(f"Label: {result['label']}, Score: {result['score']}")