i finetuned the vit model from huggingface and wanted to do some prediction
# Get test image from the web
test_image_url = 'something.jpg'
response = requests.get(test_image_url)
test_image = Image.open(BytesIO(response.content))
# Resize and display the image
aspect_ratio = test_image.size[0] / test_image.size[1]
max_height = 250
resized_width = int(max_height * aspect_ratio)
resized_img = test_image.resize((resized_width, max_height))
display(resized_img)
this is the code for the prediction
# Predict the top k classes for the test image
inputs = feature_extractor(images=test_image, return_tensors="pt").to("cuda")
outputs = model(**inputs)
logits = outputs.logits
top_classes = torch.topk(outputs.logits, k_for_top_acc).indices.flatten().tolist()
for i, class_idx in enumerate(top_classes):
print(str(i + 1), "- Predicted class:", model.config.id2label[class_idx])
yet i keep getting this error
ValueError: Input image size (256*256) doesn't match model (224*224).