Error Message can’t get the text itself, because the cmd window closes instantly
I’ve seen two other solutions to this problem where I should use either:
pyinstaller -F --collect-all pyfiglet python.py
or
pyinstaller --add-data "venvLibsite-packagespyfiglet;./pyfiglet" python.py
In the first one everything compiles fine but when I open the .exe it gives me the same error and the second one doesn’t even compile giving me this error:
743 INFO: PyInstaller: 6.6.0, contrib hooks: 2024.5
743 INFO: Python: 3.10.11
772 INFO: Platform: Windows-10-10.0.22631-SP0
774 INFO: wrote C:UsersmaaikOneDriveРабочий столupwork projectssalary scrapersalary_scraper.spec
794 INFO: Extending PYTHONPATH with paths
['C:\Users\maaik\OneDrive\Рабочий стол\upwork projects\salary scraper']
1411 INFO: Appending 'datas' from .spec
Unable to find 'C:\Users\maaik\OneDrive\Рабочий стол\upwork projects\salary scraper\venv\Lib\site-packages\pyfiglet' when adding binary and data files.
I suppose it has something to do with my venv, maybe I installed something wrong or something, but in any case here’s my venv screenshot as well:
venv
import re
import csv
import json
import requests
from time import sleep
from bs4 import BeautifulSoup
from pyfiglet import Figlet, figlet_format
import os
def extract_salary_info(job_title, job_city):
template = 'https://www.salary.com/research/salary/alternate/{}-salary/{}'
# build the url based on search criteria
url = template.format(job_title.lower().replace(' ', '-'), job_city.lower().replace(' ', '-'))
# request the raw html .. check for valid request
try:
response = requests.get(url)
if response.status_code != 200:
return None
except requests.exceptions.ConnectionError:
return None
# parse the html and extract json data
soup = BeautifulSoup(response.text, 'html.parser')
pattern = re.compile(r'Occupation')
script = soup.find('script', {'type': 'application/ld+json'}, string=pattern)
json_raw = script.contents[0]
json_data = json.loads(json_raw)
# extract salary data
job_title = json_data['name']
location = json_data['occupationLocation'][0]['name']
description = json_data['description']
ntile_10 = json_data['estimatedSalary'][0]['percentile10']
ntile_25 = json_data['estimatedSalary'][0]['percentile25']
ntile_50 = json_data['estimatedSalary'][0]['median']
ntile_75 = json_data['estimatedSalary'][0]['percentile75']
ntile_90 = json_data['estimatedSalary'][0]['percentile90']
data = (job_title, location, description, ntile_10, ntile_25, ntile_50, ntile_75, ntile_90)
return data
def main():
"""Extract salary data from top us cities"""
salary_data = []
os.system(f"mode con: cols={150} lines={100}")
f = figlet_format("Salary Scraper", font="slant", justify='center', width=150)
print(f)
specific_or_all = int(input('nnnnnnnWould you like to know a salary for a specific city (1) or all largest cities in the US (2): '))
if specific_or_all == 1:
city = input('City name: ')
job_title = input('Job name: ')
salary_data = extract_salary_info(job_title, city)
print('Scraping...')
print('saving to csv...')
with open('salary-results.csv', 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['Title','Location', 'Description', 'nTile10', 'nTile25', 'nTile50', 'nTile75', 'nTile90'])
writer.writerow(salary_data)
return
elif specific_or_all == 2:
job_title = input('Job name: ')
print('Scraping...')
# get the list of largest us cities
with open('largest_cities.csv', newline='') as f:
reader = csv.reader(f)
# a reader typically returns each row as a list... so I need to flatten the list to make a single list
cities = [city for row in reader for city in row]
# extract salary data for each city
for city in cities:
result = extract_salary_info(job_title, city)
if result:
salary_data.append(result)
print(f'scraped for {city}')
sleep(0.5)
print('saving data to csv')
# save data to csv file
with open('salary-results.csv', 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['Title','Location', 'Description', 'nTile10', 'nTile25', 'nTile50', 'nTile75', 'nTile90'])
writer.writerows(salary_data)
return
if __name__ == '__main__':
main()
here’s the code I’m trying to run. Thanks in advance:)