From ee736e28ed4c5cdae395f5851eb2bad96d8078ef Mon Sep 17 00:00:00 2001 From: Lars-Dominik Braun Date: Sat, 25 Nov 2017 14:35:05 +0100 Subject: Ignore duplicate URLs when saving DOM snapshot --- crocoite/cli.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crocoite/cli.py b/crocoite/cli.py index 7a29cc7..a2ac958 100644 --- a/crocoite/cli.py +++ b/crocoite/cli.py @@ -352,9 +352,18 @@ def main (): """ viewport = getFormattedViewportMetrics (tab) dom = tab.DOM.getDocument (depth=-1, pierce=True) + haveUrls = set () for doc in ChromeTreeWalker (dom['root']).split (): - url = urlsplit (doc['documentURL']) + rawUrl = doc['documentURL'] + if rawUrl in haveUrls: + # ignore duplicate URLs. they are usually caused by + # javascript-injected iframes (advertising) with no(?) src + logger.warning ('have DOM snapshot for URL {}, ignoring'.format (rawUrl)) + continue + url = urlsplit (rawUrl) if url.scheme in ('http', 'https'): + logger.debug ('saving DOM snapshot for url {}, base {}'.format (doc['documentURL'], doc['baseURL'])) + haveUrls.add (rawUrl) walker = ChromeTreeWalker (doc) # remove script, to make the page static and noscript, because at the # time we took the snapshot scripts were enabled -- cgit v1.2.3