0

I am trying to make this code output to a csv file when calling the spider with -o output.csv

# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import SitemapSpider
from scrapy.spiders import Spider
from scrapy.http import Request, XmlResponse
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
from scrapy.utils.gz import gunzip, is_gzipped
import re
import requests

class GetpagesfromsitemapSpider(SitemapSpider):
    name = "test"
    handle_httpstatus_list = [404]

    def parse(self, response):
       print response.url

    def _parse_sitemap(self, response):
        if response.url.endswith('/robots.txt'):
            for url in sitemap_urls_from_robots(response.body):
                yield Request(url, callback=self._parse_sitemap)
        else:
            body = self._get_sitemap_body(response)
            if body is None:
                self.logger.info('Ignoring invalid sitemap: %s', response.url)
                return

            s = Sitemap(body)
            sites = []
            if s.type == 'sitemapindex':
                for loc in iterloc(s, self.sitemap_alternate_links):
                    if any(x.search(loc) for x in self._follow):
                        yield Request(loc, callback=self._parse_sitemap)
            elif s.type == 'urlset':
                for loc in iterloc(s):
                    for r, c in self._cbs:
                        if r.search(loc):
                            sites.append(loc)
                            break
            print sites

    def __init__(self, spider=None, *a, **kw):
            super(GetpagesfromsitemapSpider, self).__init__(*a, **kw)
            self.spider = spider
            l = []
            url = "https://channelstore.roku.com"
            resp = requests.head(url + "/sitemap.xml")
            if (resp.status_code != 404):
                l.append(resp.url)
            else:
                resp = requests.head(url + "/robots.txt")
                if (resp.status_code == 200):
                    l.append(resp.url)
            self.sitemap_urls = l
            print self.sitemap_urls

def iterloc(it, alt=False):
    for d in it:
        yield d['loc']

        # Also consider alternate URLs (xhtml:link rel="alternate")
        if alt and 'alternate' in d:
            for l in d['alternate']:
                yield l

I have tried changing the print response url on line 18 to a few things but I cant seem to make this script output to a CSV, all I can manage is seeing the exact information I want but on the terminal screen.

This code is from here but I am not working well with the easy part of completing the code.

Any help is greatly appreciated!

Tomzski
  • 9
  • 4

1 Answers1

0

Not clear from your example, but it looks like you are not passing the command line arguments (-o) to your SitemapSpider.

A simpler solution, instead of passing the -o argument, is to just redirect your output to a file:

my_script.py > output.csv

OR

my_script.py | tee output.csv <-- this way will write to file, and also output in your terminal

EDIT: Not the most efficient way, but without seeing a full script:

def parse(self, response):
    with open('output.csv', 'a') as fh:
        fh.write('{}\n'.format(response.url))

This will append each response.url to a new line in the output.csv file

Ravi Patel
  • 346
  • 2
  • 8
  • Is it not something I can edit in print response.url? Looking to keep everything within Scrapy if at all possible and I assume this is possible to do I just cant find the detail to accomplish what I need and the scrapy docs I cant apply to my situation. – Tomzski Oct 05 '18 at 13:58