Sample Image Of A page of the PDF
I have to extract the images or diagrams from a scanned PDF via Python where there are no clear boundaries between images and text. For text, I can do OCR, but for the diagrams, libraries like PyMuPDF and PDFminer are not working. I had some success with pdf2image and OpenCV, and with Google Cloud Vision, but it still is not completely accurate. Sometimes it extracts the same image multiple times in different parts or sometimes doesn’t extract the diagram at all. All online tools extract the entire page as the image because the PDF is originally a scanned copy of a printed question paper. I would appreciate any help.
I have tried two methods. Here is the code:
Both these codes have accuracy issues, like missing some diagrams completely or extracting one diagram more than once.
import cv2
import numpy as np
import os
def preprocess(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (5, 5), 1)
img_canny = cv2.Canny(img_blur, 50, 50)
return img_canny
def get_rois(img, pad=3):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
rois = []
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
x, y, w, h = cv2.boundingRect(approx)
if w * h > 10000: # Filter out small contours based on area
rois.append((x - pad, y - pad, w + pad * 2, h + pad * 2))
return rois
# Load image
img_path = "C:/Users/Lenovo/OneDrive/Desktop/Images/page 4.png"
img = cv2.imread(img_path)
# Check if the image is loaded properly
if img is None:
print(f"Error: Unable to load image from {img_path}")
exit()
img_processed = preprocess(img)
rois = get_rois(img_processed)
output_folder = "extracted_diagrams"
os.makedirs(output_folder, exist_ok=True)
# Iterate through all detected regions
for idx, (x, y, w, h) in enumerate(rois):
roi = img[y:y + h, x:x + w]
cv2.imshow(f"Diagram {idx + 1}", roi)
cv2.imwrite(os.path.join(output_folder, f"diagram_{idx + 1}.png"), roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
import numpy as np
from pdf2image import convert_from_path
import os
import pytesseract
# Function to save images
def save_image(image, path, page_num, img_num):
filename = f"page_{page_num + 1}_image_{img_num + 1}.png"
cv2.imwrite(os.path.join(path, filename), image)
# Function to check if a region contains text using OCR
def contains_text(image):
text = pytesseract.image_to_string(image)
return len(text.strip()) > 10 # Adjust threshold as needed
# Convert PDF pages to images
pdf_path = "D:/Files/Internship/Sample pdf.pdf"
output_folder = "extracted_images"
os.makedirs(output_folder, exist_ok=True)
pages = convert_from_path(pdf_path)
for page_num, page in enumerate(pages):
print(f"Processing page {page_num + 1}/{len(pages)}")
# Convert page to OpenCV image
open_cv_image = np.array(page)
open_cv_image = open_cv_image[:, :, ::-1].copy() # Convert RGB to BGR
# Convert to grayscale
gray = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2GRAY)
# Use GaussianBlur to remove noise and improve edge detection
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# Edge detection using Canny
edges = cv2.Canny(blurred, 50, 150)
# Find contours
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print(f"Found {len(contours)} contours")
# Simplified merging of nearby contours
merged_contours = []
for i in range(len(contours)):
if contours[i].size == 0 or cv2.contourArea(contours[i]) == 0:
continue
merged = False
for j in range(len(merged_contours)):
if merged_contours[j].size == 0:
continue
x1, y1, w1, h1 = cv2.boundingRect(merged_contours[j])
x2, y2, w2, h2 = cv2.boundingRect(contours[i])
if (x1 < x2 + w2 and x1 + w1 > x2 and y1 < y2 + h2 and y1 + h1 > y2):
merged_contours[j] = np.vstack((merged_contours[j], contours[i]))
merged = True
break
if not merged:
merged_contours.append(contours[i])
print(f"Merged into {len(merged_contours)} contours")
img_num = 0
for cnt in merged_contours:
if cnt.size == 0:
continue
x, y, w, h = cv2.boundingRect(cnt)
if w > 50 and h > 50 and w < 0.9 * open_cv_image.shape[1] and h < 0.9 * open_cv_image.shape[0]: # Filter out small and very large regions
img = open_cv_image[y:y + h, x:x + w]
if not contains_text(img):
save_image(img, output_folder, page_num, img_num)
print(f"Saved image {img_num + 1} for page {page_num + 1}")
img_num += 1
print("Images extracted and saved in:", output_folder)
Gaurav Kumar Gupta is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.