I’m working on a Python script using Selenium to scrape Instagram data. The script should terminate when a specific condition is met, but it continues scraping despite the termination condition being true. Here’s a simplified version of my code:
import time
import os
import threading
import tkinter as tk
import webbrowser
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup
from selenium.webdriver.common.keys import Keys
from process_the_xlsx import proces_excel_files
from login_handling import login_to_insta, load_cookies
# Global variable to control the termination of the script
terminate_script = False
def warn_message():
def open_link(url):
webbrowser.open_new(url)
def close_window():
custom_box.destroy()
root.quit()
root = tk.Tk()
root.withdraw()
custom_box = tk.Toplevel(root)
custom_box.title("Developed By")
custom_box.geometry("400x150")
msg = tk.Label(custom_box, text="Plz make sure you read the file called 'read me' first... n Any help contact us.", wraplength=350)
msg.pack(pady=10)
link = tk.Label(custom_box, text="ddddd", fg="blue", cursor="hand2")
link.pack(pady=5)
link.bind("<Button-1>", lambda e: open_link("https://t.me/Bek_El_Ghandour"))
ok_button = tk.Button(custom_box, text="OK", command=close_window, width=10)
ok_button.pack(pady=20)
root.mainloop()
directory = "data"
file_path = os.path.join(directory, "data.csv")
file_path2 = os.path.join(directory, "full_data.csv")
if not os.path.exists(directory):
os.makedirs(directory)
warn_message()
if not os.path.exists(file_path):
with open(file_path, 'w', encoding='utf-8') as file:
file.write("Instagram ID,Username,Full name,Profile link,Avatar pic,Followed by viewer,Is verified,Followers count,Following count,Biography,Public email,Posts count,Phone country code,Phone number,City,Address,Is private,Is business,External urln") # Example header
if not os.path.exists(file_path2):
with open(file_path2, 'w', encoding='utf-8') as file:
file.write("Instagram ID,Username,Full name,Profile link,Avatar pic,Followed by viewer,Is verified,Followers count,Following count,Biography,Public email,Posts count,Phone country code,Phone number,City,Address,Is private,Is business,External urln") # Example header
if not os.path.exists('cookies'):
os.makedirs('cookies')
with open("needs/hashtag.txt", "r") as file:
type_of_agent, My_hashtag = file.readline().strip().split(",")
if not os.path.exists('Downloads'):
os.makedirs('Downloads')
current_directory = os.getcwd().replace('\', '/') + '/'
try:
path = f'{current_directory}extentions/hndnabgpcmhdmaejoapophbidipmgnpb'
directory_contents = os.listdir(path)
item = directory_contents[0]
extension_path3 = f"{current_directory}extentions/hndnabgpcmhdmaejoapophbidipmgnpb/{item}"
except Exception as e:
print(e)
chrome_options1 = Options()
chrome_options1.add_argument("--disable-notifications")
chrome_options1.add_argument(f'--load-extension={extension_path3}')
chrome_options1.add_argument('--ignore-certificate-errors')
chrome_options1.add_argument('--lang=en')
download_directory = f"{current_directory}Downloads".replace('/', '\')
prefs = {"download.default_directory": download_directory}
chrome_options1.add_experimental_option("prefs", prefs)
driver2 = webdriver.Chrome(options=chrome_options1)
def check_termination_condition():
global terminate_script
try:
with open("needs/global.txt", "r") as global_file:
global_value = global_file.readline().strip()
with open("filtered_data.csv", "r") as filtered_file:
# Read the first column of the first row of filtered_data.csv
filtered_value = filtered_file.readline().split(",")[0].strip()
if global_value == filtered_value:
terminate_script = True
except Exception as e:
print(f"Error reading files: {e}")
def loop_for_the_error_of_login():
global driver2, email, password, file_name
while not terminate_script:
try:
time.sleep(120)
if terminate_script:
break
print("downloading the xlsx file")
expert_buttons = WebDriverWait(driver2, 10).until(
EC.presence_of_all_elements_located(
(By.CLASS_NAME, 'mu-ripple-wrapper')))
expert_buttons[2].click()
time.sleep(4)
proces_excel_files()
print("Checking for the target element...")
page_html = driver2.page_source
soup = BeautifulSoup(page_html, 'html.parser')
error_div = soup.find('div', {'class': 'mu-alert mu-error-color mu-inverse',
'data-v-116b9c74': True, 'style': 'margin-top: 20px; margin-bottom: 5px; width: 60%;'})
if error_div and "Request failed with status code 401" in error_div.text:
login_to_insta(email, password)
driver2.switch_to.window(driver2.window_handles[3])
time.sleep(3)
load_cookies(driver2, file_name)
driver2.refresh()
time.sleep(3)
driver2.switch_to.window(driver2.window_handles[2])
print("haha")
except TimeoutException:
print("Timeout: Target element not found.")
if terminate_script:
break
def open_extention():
global driver2
url = "https://www.instagram.com/accounts/login/"
time.sleep(2)
driver2.switch_to.window(driver2.window_handles[0])
print('switching')
driver2.get(url)
driver2.switch_to.window(driver2.window_handles[0])
extension_popup_url = "chrome-extension://hndnabgpcmhdmaejoapophbidipmgnpb/popup/popup.html"
driver2.get(extension_popup_url)
print('working 1')
track = WebDriverWait(driver2, 20).until(EC.presence_of_element_located((By.XPATH, "//div[@label='Wait interval(seconds)']")))
track.click()
try:
track.send_keys(Keys.ARROW_LEFT * 15)
except:
pass
time.sleep(10)
hashtags = WebDriverWait(driver2, 20).until(
EC.presence_of_all_elements_located(
(By.CSS_SELECTOR, "button.mu-button.mu-icon-button"))
)
if type_of_agent == "followers":
radio_button = WebDriverWait(driver2, 20).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "mu-radio-wrapper")))
radio_button[1].click()
hashtags[0].click()
print("getting the followers of this account")
elif type_of_agent == "following":
radio_button = WebDriverWait(driver2, 20).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "mu-radio-wrapper")))
radio_button[1].click()
hashtags[1].click()
print("getting the following accounts")
elif type_of_agent == "hashtag":
if len(hashtags) >= 3:
print('working 3')
hashtags[2].click()
else:
print("There are not enough buttons on the page.")
print('working 4')
hashtag_input = WebDriverWait(driver2, 20).until(EC.presence_of_element_located(
(By.CSS_SELECTOR, "div.mu-input input.mu-text-field-input")
))
hashtag_input.send_keys(My_hashtag)
time.sleep(2)
go_button = WebDriverWait(driver2, 20).until(EC.element_to_be_clickable(
(By.CSS_SELECTOR, "button.mu-button.mu-raised-button.mu-button-full-width.mu-success-color.mu-inverse")
))
go_button.click()
time.sleep(4)
print('working 5')
driver2.switch_to.window(driver2.window_handles[2])
print('working 6')
while not terminate_script:
driver2.refresh()
time.sleep(5)
checkbox_locator = (By.CLASS_NAME, 'mu-checkbox')
checkboxes = WebDriverWait(driver2, 10).until(
EC.presence_of_all_elements_located(checkbox_locator)
)
checkboxes[1].click()
loop_for_the_error_of_login()
if terminate_script:
break
def scraper(file_name_):
global driver2, email, password, file_name
with open(f"needs/{file_name_}", "r") as file:
email, password = file.readline().strip().split(",")
driver2.get('https://www.instagram.com/')
file_name = f"{email.replace('@', '_')}.txt"
file_path = os.path.join("cookies", file_name)
if not os.path.exists(file_path):
with open(file_path, 'w', encoding='utf-8') as f:
f.write("[]")
time.sleep(3)
load_cookies(driver2, file_name)
time.sleep(1)
driver2.get('https://www.instagram.com/')
try:
dismmis_message = WebDriverWait(driver2, 4).until(
EC.presence_of_element_located((By.XPATH, "//div[@aria-label='Dismiss']")))
if dismmis_message:
time.sleep(2)
dismmis_message.click()
print('clicked dismiss')
except:
print('no ig dismiss message')
make_sure_we_loged_in = ''
try:
make_sure_we_loged_in = WebDriverWait(driver2, 10).until(
EC.presence_of_element_located((By.XPATH, "(//*[name()='svg'][@aria-label='Home'])[1]")))
if make_sure_we_loged_in:
print('we already loged in before')
except Exception as e:
print("we need to login")
login_to_insta(email, password)
if not make_sure_we_loged_in:
time.sleep(3)
load_cookies(driver2, file_name)
time.sleep(1)
driver2.refresh()
time.sleep(5)
open_extention()
driver2.quit()
def monitor_and_terminate():
global terminate_script
while not terminate_script:
check_termination_condition()
time.sleep(10)
print("Termination condition met. Terminating all processes.")
os._exit(0) # Forcefully terminate all processes
directory_of_needs = os.path.abspath(os.path.join(os.getcwd(), "needs"))
threads = []
# Start the monitor thread
monitor_thread = threading.Thread(target=monitor_and_terminate)
monitor_thread.start()
for file_name in os.listdir(directory_of_needs):
if file_name.startswith('scraping_emails'):
print(file_name)
thread = threading.Thread(target=scraper, args=(file_name,))
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
The script should stop when the termination condition is met. Specifically, it should break out of the loops and stop running. However, even when the terminate_script variable is set to True, the script continues to scrape.
Why does the script continue to run even after the termination condition is met, and how can I fix this so that it properly terminates when terminate_script is set to True?
What I’ve Tried
Adding frequent checks for terminate_script in all loops.
Using os._exit(0) to forcefully terminate all processes.
Debugging with print statements to ensure the condition is met and terminate_script is set to True.
Despite these attempts, the script does not stop scraping. Any help would be greatly appreciated!
You will also need the growman extension on chrome (green leaf icon)