I have the following code that runs a spider programatically:
import asyncio
from scrapy.crawler import CrawlerProcess
from scrapy_webcrawler.spiders.spider import WebCrawlerSpider
class WebCrawlerConnector:
def start(self) -> int:
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
process.crawl(
WebCrawlerSpider,
urls=["https://quotes.toscrape.com/"],
crawl_depth=0,
max_links_per_page=2,
)
process.start()
async def main() -> None:
"""Start the connector."""
connector = WebCrawlerConnector()
await connector.start()
if __name__ == "__main__":
asyncio.run(main())
And i have this spider, that uses a crawling tool named crawl4ai which is very good in extracting content but it is a single page crawler, that’s why I am using scrapy:
from typing import Optional
from crawl4ai import AsyncWebCrawler
from scrapy.exceptions import CloseSpider
from scrapy.http import Request
from scrapy.linkextractors import LinkExtractor
from scrapy.settings import BaseSettings
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.project import get_project_settings
class WebCrawlerSpider(CrawlSpider):
name = "webcrawler"
allowed_domains = []
rules = (
Rule(LinkExtractor(), callback="parse_item", follow=True),
)
def __init__(self, urls=["https://quotes.toscrape.com/"], *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_urls = urls
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super().from_crawler(crawler, *args, **kwargs)
spider.settings.set("DEPTH_LIMIT", 0, priority="spider",)
return spider
def start_requests(self):
for url in self.start_urls:
yield Request(url)
async def parse_start_url(self, response):
await self.process_url(response.url)
if self.should_stop_crawling():
self.logger.info("DEPTH_LIMIT is 0. Stopping crawl.")
raise CloseSpider(reason="DEPTH_LIMIT reached 0, stopping spider.")
def parse_item(self, response):
self.process_url(response.url)
async def process_url(self, url):
async with AsyncWebCrawler(verbose=False) as crawler:
result = await crawler.arun(
url=url,
exclude_external_links=True,
exclude_social_media_links=True,
)
print(result.fit_markdown)
def should_stop_crawling(self):
depth_limit = self.settings.getint("DEPTH_LIMIT", default=-1)
return depth_limit == 0
However, when i run the first file, the outcome is always this stacktrace:
ERROR:scrapy.core.scraper:Spider error processing <GET https://quotes.toscrape.com/> (referer: None)
Traceback (most recent call last):
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/defer.py", line 346, in aiter_errback
yield await it.__anext__()
^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/python.py", line 394, in __anext__
return await self.data.__anext__()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/python.py", line 375, in _async_chain
async for o in as_async_generator(it):
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/asyncgen.py", line 21, in as_async_generator
async for r in it:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/python.py", line 394, in __anext__
return await self.data.__anext__()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/python.py", line 375, in _async_chain
async for o in as_async_generator(it):
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/asyncgen.py", line 21, in as_async_generator
async for r in it:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/core/spidermw.py", line 121, in process_async
async for r in iterable:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/spidermiddlewares/referer.py", line 384, in process_spider_output_async
async for r in result:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/core/spidermw.py", line 121, in process_async
async for r in iterable:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/spidermiddlewares/urllength.py", line 62, in process_spider_output_async
async for r in result:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/core/spidermw.py", line 121, in process_async
async for r in iterable:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/spidermiddlewares/depth.py", line 60, in process_spider_output_async
async for r in result:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/core/spidermw.py", line 121, in process_async
async for r in iterable:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/spiders/crawl.py", line 165, in _parse_response
cb_res = await cb_res
^^^^^^^^^^^^
File "/Users/luis.ferreira/Documents/test/scrapy_webcrawler/scrapy_webcrawler/spiders/spider.py", line 37, in parse_start_url
await self.process_url(response.url)
File "/Users/luis.ferreira/Documents/test/scrapy_webcrawler/scrapy_webcrawler/spiders/spider.py", line 46, in process_url
async with AsyncWebCrawler(verbose=False) as crawler:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/crawl4ai/async_webcrawler.py", line 118, in __aenter__
await self.crawler_strategy.__aenter__()
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/crawl4ai/async_crawler_strategy.py", line 290, in __aenter__
await self.start()
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/crawl4ai/async_crawler_strategy.py", line 298, in start
self.playwright = await async_playwright().start()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/playwright/async_api/_context_manager.py", line 51, in start
return await self.__aenter__()
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/playwright/async_api/_context_manager.py", line 40, in __aenter__
done, _ = await asyncio.wait(
^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/asyncio/tasks.py", line 418, in wait
return await _wait(fs, timeout, return_when, loop)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/asyncio/tasks.py", line 525, in _wait
await waiter
RuntimeError: await wasn't used with future
2024-12-12 17:50:07 [scrapy.core.scraper] ERROR: Spider error processing <GET https://quotes.toscrape.com/> (referer: None)
Traceback (most recent call last):
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/defer.py", line 346, in aiter_errback
yield await it.__anext__()
^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/python.py", line 394, in __anext__
return await self.data.__anext__()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/python.py", line 375, in _async_chain
async for o in as_async_generator(it):
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/asyncgen.py", line 21, in as_async_generator
async for r in it:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/python.py", line 394, in __anext__
return await self.data.__anext__()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/python.py", line 375, in _async_chain
async for o in as_async_generator(it):
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/utils/asyncgen.py", line 21, in as_async_generator
async for r in it:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/core/spidermw.py", line 121, in process_async
async for r in iterable:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/spidermiddlewares/referer.py", line 384, in process_spider_output_async
async for r in result:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/core/spidermw.py", line 121, in process_async
async for r in iterable:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/spidermiddlewares/urllength.py", line 62, in process_spider_output_async
async for r in result:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/core/spidermw.py", line 121, in process_async
async for r in iterable:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/spidermiddlewares/depth.py", line 60, in process_spider_output_async
async for r in result:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/core/spidermw.py", line 121, in process_async
async for r in iterable:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/scrapy/spiders/crawl.py", line 165, in _parse_response
cb_res = await cb_res
^^^^^^^^^^^^
File "/Users/luis.ferreira/Documents/test/scrapy_webcrawler/scrapy_webcrawler/spiders/spider.py", line 37, in parse_start_url
await self.process_url(response.url)
File "/Users/luis.ferreira/Documents/test/scrapy_webcrawler/scrapy_webcrawler/spiders/spider.py", line 46, in process_url
async with AsyncWebCrawler(verbose=False) as crawler:
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/crawl4ai/async_webcrawler.py", line 118, in __aenter__
await self.crawler_strategy.__aenter__()
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/crawl4ai/async_crawler_strategy.py", line 290, in __aenter__
await self.start()
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/crawl4ai/async_crawler_strategy.py", line 298, in start
self.playwright = await async_playwright().start()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/playwright/async_api/_context_manager.py", line 51, in start
return await self.__aenter__()
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/site-packages/playwright/async_api/_context_manager.py", line 40, in __aenter__
done, _ = await asyncio.wait(
^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/asyncio/tasks.py", line 418, in wait
return await _wait(fs, timeout, return_when, loop)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/luis.ferreira/.pyenv/versions/3.11.2/lib/python3.11/asyncio/tasks.py", line 525, in _wait
await waiter
RuntimeError: await wasn't used with future
INFO:scrapy.core.engine:Closing spider (finished)
2024-12-12 17:50:07 [scrapy.core.engine] INFO: Closing spider (finished)
I know this is something related to sync/async code but I am not that familiar with python. Can someone help?
I already tried removing the async from the spider but that doesn’t work either.
1