whenever i use the parse_quote function nothing happens and i just get a blank csv file but if i put all my quote items in parse funtion it is able to scrapy the quote what am i doing wrong?
<code>def parse(self, response):
#scraping only the page
quotes = response.xpath("//div[@class='quote']")
for quote in quotes:
self.parse_quotes(quote)
#moving to the next page
next_page = response.xpath("//li[@class='next']/a/@href").get()
if next_page is not None:
next_page_url = "https://quotes.toscrape.com/"+next_page
#going to next page and then scraping the info of the page
yield response.follow(next_page_url,callback=self.parse)
# make a function to yield all the fields
def parse_quotes(self, response):
quote_item = QuoteItems()
quote_item['Quote'] = response.xpath(".//span[@itemprop='text']/text()").get(),
quote_item['Author'] = response.xpath(".//span//small[@class='author']/text()").get(),
quote_item['Tags'] = response.xpath(".//div[@class='tags']//a[@class='tag']/text()").getall(),
yield quote_item
</code>
<code>def parse(self, response):
#scraping only the page
quotes = response.xpath("//div[@class='quote']")
for quote in quotes:
self.parse_quotes(quote)
#moving to the next page
next_page = response.xpath("//li[@class='next']/a/@href").get()
if next_page is not None:
next_page_url = "https://quotes.toscrape.com/"+next_page
#going to next page and then scraping the info of the page
yield response.follow(next_page_url,callback=self.parse)
# make a function to yield all the fields
def parse_quotes(self, response):
quote_item = QuoteItems()
quote_item['Quote'] = response.xpath(".//span[@itemprop='text']/text()").get(),
quote_item['Author'] = response.xpath(".//span//small[@class='author']/text()").get(),
quote_item['Tags'] = response.xpath(".//div[@class='tags']//a[@class='tag']/text()").getall(),
yield quote_item
</code>
def parse(self, response):
#scraping only the page
quotes = response.xpath("//div[@class='quote']")
for quote in quotes:
self.parse_quotes(quote)
#moving to the next page
next_page = response.xpath("//li[@class='next']/a/@href").get()
if next_page is not None:
next_page_url = "https://quotes.toscrape.com/"+next_page
#going to next page and then scraping the info of the page
yield response.follow(next_page_url,callback=self.parse)
# make a function to yield all the fields
def parse_quotes(self, response):
quote_item = QuoteItems()
quote_item['Quote'] = response.xpath(".//span[@itemprop='text']/text()").get(),
quote_item['Author'] = response.xpath(".//span//small[@class='author']/text()").get(),
quote_item['Tags'] = response.xpath(".//div[@class='tags']//a[@class='tag']/text()").getall(),
yield quote_item
Tried everything and am just tired from this problem
New contributor
blob blob is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.