I am trying to scrape all available data for each “disposable type” from the following website- website link (Disposed Dasboard-> Nature of Disposal). The table displays up to 1000 entries per page and has a “Next” button to load the next set of 1000 entries.
Issue: My current Python code (attached below) fetches only the first 1000 entries but doesn’t retrieve additional pages even though the table has more entries. For example, there are 2400 entries for the “Allowed” category, and my code only returns the first 1000.
How can I get all the available data by navigating next button?
Screenshot:
Here’s a screenshot of the table for reference:
import requests
import json
# Define the base URL for the POST request
url = "https://njdg.ecourts.gov.in/scnjdg/?p=home/casewiseList"
# Define the headers (same for all requests)
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'Cookie': 'YOUR_COOKIE_DATA_HERE',
}
# List with all disposal types and corresponding app tokens
disposed_types = [
{'val': 'Allowed', 'app_token': '2b0e6e5055adb7c128398139170dca6e6b72e07f98c8d3f0f009d96831132a47'},
{'val': 'Conditional Order', 'app_token': '82008d8eeeabbcf84afc073d52511177d2a7a5027b4e3d84dd5b635d4f0b3b0f'},
{'val': 'Dismissed', 'app_token': '7822e5682b3fe4c4e9f41fbbb16c559f5f29b12e32712e67530dbc910d490a54'},
{'val': 'Disposed of', 'app_token': '1e286253c751422f9ad0ae4af7913cffc8c8e00b35664a5fd32056eea9387ce4'},
{'val': 'Granted', 'app_token': '57d35c7e14334c7fdff87bbc9cf46a385ea79e9e3a80b778cbc43614e466f22a'},
{'val': 'Others', 'app_token': 'b30e998579ac60d3c517fb7291257138c0575a2660fbd4a5bd6fb6fe24d58385'}
]
# Define the common postData template
post_data_template = (
'sEcho={sEcho}&iColumns=4&sColumns=,,,&iDisplayStart={iDisplayStart}&iDisplayLength={iDisplayLength}'
'&mDataProp_0=0&sSearch_0=&bRegex_0=false&bSearchable_0=true&bSortable_0=true'
'&mDataProp_1=1&sSearch_1=&bRegex_1=false&bSearchable_1=true&bSortable_1=true'
'&mDataProp_2=2&sSearch_2=&bRegex_2=false&bSearchable_2=true&bSortable_2=true'
'&mDataProp_3=3&sSearch_3=&bRegex_3=false&bSearchable_3=true&bSortable_3=true'
'&sSearch=&bRegex=false&iSortCol_0=0&sSortDir_0=asc&iSortingCols=1'
'&flag=natDisp&state_code=&dist_code=&ci_cri=2&val={val}&active_tab=disposed'
'&casetypeval=&monthInsDisp=undefined&disp_year=2019&search_by=&dashFlag=reg&ajax_req=true'
'&app_token={app_token}'
)
# Function to fetch data for a specific disposal type
def fetch_data(start, length, sEcho, val, app_token):
post_data = post_data_template.format(
sEcho=sEcho,
iDisplayStart=start,
iDisplayLength=length,
val=val,
app_token=app_token
)
response = requests.post(url, headers=headers, data=post_data)
return response.json()
# Function to iterate over all disposed types and fetch all pages
def fetch_all_disposed_data(disposed_types):
all_data = {}
for disposed_type in disposed_types:
val = disposed_type['val']
app_token = disposed_type['app_token']
sEcho = 1 # Increment this for each request
start = 0 # Starting index (pagination)
length = 1000 # Number of records per request
# First request to get the total number of records
initial_data = fetch_data(start=start, length=length, sEcho=sEcho, val=val, app_token=app_token)
total_records = initial_data.get("iTotalRecords", 0)
print(f"Fetching {total_records} records for {val} disposal type...")
disposed_data = [initial_data]
# Loop through all pages to fetch data until all records are retrieved
while start + length < total_records:
start += length # Move to the next page
sEcho += 1 # Increment sEcho for the next request
data = fetch_data(start=start, length=length, sEcho=sEcho, val=val, app_token=app_token)
disposed_data.append(data)
# Fetch any remaining records
if start < total_records:
sEcho += 1
data = fetch_data(start=start, length=total_records - start, sEcho=sEcho, val=val, app_token=app_token)
disposed_data.append(data)
# Store data for each disposal type
all_data[val] = disposed_data
return all_data
# Fetch and store data for all disposed types
disposed_data = fetch_all_disposed_data(disposed_types)
# Save the data to a JSON file for further processing
with open("disposed_data_2019.json", "w") as f:
json.dump(disposed_data, f, indent=4)
print("Data fetching completed and saved to disposed_data_2019.json")
1