"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "googler" between
googler-4.2.tar.gz and googler-4.3.1.tar.gz

About: googler is a command line tool to search Google (Web & News) from the terminal (requires Python).

googler  (googler-4.2):googler  (googler-4.3.1)
skipping to change at line 90 skipping to change at line 90
sys.exit(1) sys.exit(1)
try: try:
signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGINT, sigint_handler)
except ValueError: except ValueError:
# signal only works in main thread # signal only works in main thread
pass pass
# Constants # Constants
_VERSION_ = '4.2' _VERSION_ = '4.3.1'
_EPOCH_ = '20201001'
COLORMAP = {k: '\x1b[%sm' % v for k, v in { COLORMAP = {k: '\x1b[%sm' % v for k, v in {
'a': '30', 'b': '31', 'c': '32', 'd': '33', 'a': '30', 'b': '31', 'c': '32', 'd': '33',
'e': '34', 'f': '35', 'g': '36', 'h': '37', 'e': '34', 'f': '35', 'g': '36', 'h': '37',
'i': '90', 'j': '91', 'k': '92', 'l': '93', 'i': '90', 'j': '91', 'k': '92', 'l': '93',
'm': '94', 'n': '95', 'o': '96', 'p': '97', 'm': '94', 'n': '95', 'o': '96', 'p': '97',
'A': '30;1', 'B': '31;1', 'C': '32;1', 'D': '33;1', 'A': '30;1', 'B': '31;1', 'C': '32;1', 'D': '33;1',
'E': '34;1', 'F': '35;1', 'G': '36;1', 'H': '37;1', 'E': '34;1', 'F': '35;1', 'G': '36;1', 'H': '37;1',
'I': '90;1', 'J': '91;1', 'K': '92;1', 'L': '93;1', 'I': '90;1', 'J': '91;1', 'K': '92;1', 'L': '93;1',
'M': '94;1', 'N': '95;1', 'O': '96;1', 'P': '97;1', 'M': '94;1', 'N': '95;1', 'O': '96;1', 'P': '97;1',
skipping to change at line 260 skipping to change at line 261
# May raise IndexError if offset is out of bounds. # May raise IndexError if offset is out of bounds.
def get_coordinate(self, offset: int) -> CoordinateType: def get_coordinate(self, offset: int) -> CoordinateType:
return self._coords[offset] return self._coords[offset]
### begin dim (DOM implementation with CSS support) ### ### begin dim (DOM implementation with CSS support) ###
### https://github.com/zmwangx/dim/blob/master/dim.py ### ### https://github.com/zmwangx/dim/blob/master/dim.py ###
import html import html
import re import re
import textwrap
from collections import OrderedDict from collections import OrderedDict
from enum import Enum from enum import Enum
from html.parser import HTMLParser from html.parser import HTMLParser
SelectorGroupLike = Union[str, "SelectorGroup", "Selector"] SelectorGroupLike = Union[str, "SelectorGroup", "Selector"]
class Node(object): class Node(object):
""" """
Represents a DOM node. Represents a DOM node.
skipping to change at line 296 skipping to change at line 296
# Meant to be reimplemented by subclasses. # Meant to be reimplemented by subclasses.
def __init__(self) -> None: def __init__(self) -> None:
self.tag = None # type: Optional[str] self.tag = None # type: Optional[str]
self.attrs = {} # type: Dict[str, str] self.attrs = {} # type: Dict[str, str]
self.parent = None # type: Optional[Node] self.parent = None # type: Optional[Node]
self.children = [] # type: List[Node] self.children = [] # type: List[Node]
# Used in DOMBuilder. # Used in DOMBuilder.
self._partial = False self._partial = False
self._namespace = None # type: Optional[str]
# HTML representation of the node. Meant to be implemented by # HTML representation of the node. Meant to be implemented by
# subclasses. # subclasses.
def __str__(self) -> str: # pragma: no cover def __str__(self) -> str: # pragma: no cover
raise NotImplementedError raise NotImplementedError
def select(self, selector: SelectorGroupLike) -> Optional["Node"]: def select(self, selector: SelectorGroupLike) -> Optional["Node"]:
"""DOM ``querySelector`` clone. Returns one match (if any).""" """DOM ``querySelector`` clone. Returns one match (if any)."""
selector = self._normalize_selector(selector) selector = self._normalize_selector(selector)
for node in self._select_all(selector): for node in self._select_all(selector):
skipping to change at line 529 skipping to change at line 530
def __repr__(self) -> str: def __repr__(self) -> str:
s = "<" + self.tag s = "<" + self.tag
if self.attrs: if self.attrs:
s += " attrs=%s" % repr(list(self.attrs.items())) s += " attrs=%s" % repr(list(self.attrs.items()))
if self.children: if self.children:
s += " children=%s" % repr(self.children) s += " children=%s" % repr(self.children)
s += ">" s += ">"
return s return s
# https://ipython.org/ipython-doc/3/api/generated/IPython.lib.pretty.html # https://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty. html
def _repr_pretty_(self, p: Any, cycle: bool) -> None: # pragma: no cover def _repr_pretty_(self, p: Any, cycle: bool) -> None: # pragma: no cover
if cycle: if cycle:
raise RuntimeError("cycle detected in DOM tree") raise RuntimeError("cycle detected in DOM tree")
p.text("<\x1b[1m%s\x1b[0m" % self.tag) p.text("<\x1b[1m%s\x1b[0m" % self.tag)
if self.attrs: if self.attrs:
p.text(" attrs=%s" % repr(list(self.attrs.items()))) p.text(" attrs=%s" % repr(list(self.attrs.items())))
if self.children: if self.children:
p.text(" children=[") p.text(" children=[")
if len(self.children) == 1 and isinstance(self.first_child(), TextNo de): if len(self.children) == 1 and isinstance(self.first_child(), TextNo de):
p.text("\x1b[4m%s\x1b[0m" % repr(self.first_child())) p.text("\x1b[4m%s\x1b[0m" % repr(self.first_child()))
skipping to change at line 649 skipping to change at line 650
Subclasses :class:`html.parser.HTMLParser`. Subclasses :class:`html.parser.HTMLParser`.
Consume HTML and builds a :class:`Node` tree. Once finished, use Consume HTML and builds a :class:`Node` tree. Once finished, use
:attr:`root` to access the root of the tree. :attr:`root` to access the root of the tree.
This parser cannot parse malformed HTML with tag mismatch. This parser cannot parse malformed HTML with tag mismatch.
""" """
def __init__(self) -> None: def __init__(self) -> None:
super().__init__(convert_charrefs=True) super().__init__(convert_charrefs=True)
# _stack is the stack for nodes. Each node is pushed to the
# stack when its start tag is processed, and remains on the
# stack until its parent node is completed (end tag processed),
# at which point the node is attached to the parent node as a
# child and popped from the stack.
self._stack = [] # type: List[Node] self._stack = [] # type: List[Node]
# _namespace_stack is another stack tracking the parsing
# context, which is generally the default namespace (None) but
# changes when parsing foreign objects (e.g. 'svg' when parsing
# an <svg>). The top element is always the current parsing
# context, so popping works differently from _stack: an element
# is popped as soon as the corresponding end tag is processed.
self._namespace_stack = [None] # type: List[Optional[str]]
def handle_starttag( def handle_starttag(
self, tag: str, attrs: Sequence[Tuple[str, Optional[str]]] self, tag: str, attrs: Sequence[Tuple[str, Optional[str]]]
) -> None: ) -> None:
node = ElementNode(tag, attrs) node = ElementNode(tag, attrs)
node._partial = True node._partial = True
self._stack.append(node) self._stack.append(node)
# For void elements, immediately invoke the end tag handler (see namespace = (
# handle_startendtag()). tag.lower()
if _tag_is_void(tag): if _tag_encloses_foreign_namespace(tag)
else self._namespace_stack[-1] # Inherit parent namespace
)
node._namespace = namespace
self._namespace_stack.append(namespace)
# For void elements (not in a foreign context), immediately
# invoke the end tag handler (see handle_startendtag()).
if not namespace and _tag_is_void(tag):
self.handle_endtag(tag) self.handle_endtag(tag)
def handle_endtag(self, tag: str) -> None: def handle_endtag(self, tag: str) -> None:
tag = tag.lower() tag = tag.lower()
children = [] children = []
while self._stack and not self._stack[-1]._partial: while self._stack and not self._stack[-1]._partial:
children.append(self._stack.pop()) children.append(self._stack.pop())
if not self._stack: if not self._stack:
raise DOMBuilderException(self.getpos(), "extra end tag: %s" % repr( tag)) raise DOMBuilderException(self.getpos(), "extra end tag: %s" % repr( tag))
parent = self._stack[-1] parent = self._stack[-1]
if parent.tag != tag: if parent.tag != tag:
raise DOMBuilderException( raise DOMBuilderException(
self.getpos(), self.getpos(),
"expecting end tag %s, got %s" % (repr(parent.tag), repr(tag)), "expecting end tag %s, got %s" % (repr(parent.tag), repr(tag)),
) )
parent.children = list(reversed(children)) parent.children = list(reversed(children))
parent._partial = False parent._partial = False
for child in children: for child in children:
child.parent = parent child.parent = parent
self._namespace_stack.pop()
# Make parser behavior for explicitly and implicitly void elements # Make parser behavior for explicitly and implicitly void elements
# (e.g., <hr> vs <hr/>) consistent. The former triggers # (e.g., <hr> vs <hr/>) consistent. The former triggers
# handle_starttag only, whereas the latter triggers # handle_starttag only, whereas the latter triggers
# handle_startendtag (which by default triggers both handle_starttag # handle_startendtag (which by default triggers both handle_starttag
# and handle_endtag). See https://www.bugs.python.org/issue25258. # and handle_endtag). See https://bugs.python.org/issue25258.
#
# An exception is foreign elements, which aren't considered void
# elements but can be explicitly marked as self-closing according to
# the HTML spec (e.g. <path/> is valid but <path> is not).
# Therefore, both handle_starttag and handle_endtag must be called,
# and handle_endtag should not be triggered from within
# handle_starttag in that case.
#
# Note that for simplicity we do not check whether the foreign
# element in question is allowed to be self-closing by spec. (The
# SVG spec unfortunately doesn't provide a readily available list of
# such elements.)
#
# https://html.spec.whatwg.org/multipage/syntax.html#foreign-elements
def handle_startendtag( def handle_startendtag(
self, tag: str, attrs: Sequence[Tuple[str, Optional[str]]] self, tag: str, attrs: Sequence[Tuple[str, Optional[str]]]
) -> None: ) -> None:
self.handle_starttag(tag, attrs) if self._namespace_stack[-1] or _tag_encloses_foreign_namespace(tag):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
else:
self.handle_starttag(tag, attrs)
def handle_data(self, text: str) -> None: def handle_data(self, text: str) -> None:
if not self._stack: if not self._stack:
# Ignore text nodes before the first tag. # Ignore text nodes before the first tag.
return return
self._stack.append(TextNode(text)) self._stack.append(TextNode(text))
@property @property
def root(self) -> "Node": def root(self) -> "Node":
""" """
skipping to change at line 1360 skipping to change at line 1399
"img", "img",
"input", "input",
"link", "link",
"meta", "meta",
"param", "param",
"source", "source",
"track", "track",
"wbr", "wbr",
) )
def _tag_encloses_foreign_namespace(tag: str) -> bool:
"""
Checks whether the tag encloses a foreign namespace (MathML or SVG).
https://html.spec.whatwg.org/multipage/syntax.html#foreign-elements
"""
return tag.lower() in ("math", "svg")
### end dim ### ### end dim ###
# Global helper functions # Global helper functions
def open_url(url): def open_url(url):
"""Open an URL in the user's default web browser. """Open an URL in the user's default web browser.
The string attribute ``open_url.url_handler`` can be used to open URLs The string attribute ``open_url.url_handler`` can be used to open URLs
in a custom CLI script or utility. A subprocess is spawned with url as in a custom CLI script or utility. A subprocess is spawned with url as
the parameter in this case instead of the usual webbrowser.open() call. the parameter in this case instead of the usual webbrowser.open() call.
skipping to change at line 1632 skipping to change at line 1679
self.path = '/search' self.path = '/search'
self.params = '' self.params = ''
# self.query is a calculated property # self.query is a calculated property
self.fragment = '' self.fragment = ''
self._tld = None self._tld = None
self._num = 10 self._num = 10
self._start = 0 self._start = 0
self._keywords = [] self._keywords = []
self._sites = None self._sites = None
self._exclude = None
self._query_dict = { self._query_dict = {
'ie': 'UTF-8', 'ie': 'UTF-8',
'oe': 'UTF-8', 'oe': 'UTF-8',
#'gbv': '1', # control the presence of javascript on the page, 1=no js, 2=js #'gbv': '1', # control the presence of javascript on the page, 1=no js, 2=js
'sei': base64.encodebytes(uuid.uuid1().bytes).decode("ascii").rstrip ('=\n').replace('/', '_'), 'sei': base64.encodebytes(uuid.uuid4().bytes).decode("ascii").rstrip ('=\n').replace('/', '_'),
} }
# In preloaded HTML parsing mode, set keywords to something so # In preloaded HTML parsing mode, set keywords to something so
# that we are not tripped up by require_keywords. # that we are not tripped up by require_keywords.
if opts.html_file and not opts.keywords: if opts.html_file and not opts.keywords:
opts.keywords = ['<debug>'] opts.keywords = ['<debug>']
self.update(opts, **kwargs) self.update(opts, **kwargs)
def __str__(self): def __str__(self):
skipping to change at line 1770 skipping to change at line 1818
else: else:
qd.pop('nfpr', None) qd.pop('nfpr', None)
if opts.get('from') or opts.get('to'): if opts.get('from') or opts.get('to'):
cd_min = opts.get('from') or '' cd_min = opts.get('from') or ''
cd_max = opts.get('to') or '' cd_max = opts.get('to') or ''
qd['tbs'] = 'cdr:1,cd_min:%s,cd_max:%s' % (cd_min, cd_max) qd['tbs'] = 'cdr:1,cd_min:%s,cd_max:%s' % (cd_min, cd_max)
if 'keywords' in opts: if 'keywords' in opts:
self._keywords = opts['keywords'] self._keywords = opts['keywords']
if 'lang' in opts and opts['lang']: if 'lang' in opts and opts['lang']:
qd['hl'] = opts['lang'] qd['hl'] = opts['lang']
if 'geoloc' in opts and opts['geoloc']:
qd['gl'] = opts['geoloc']
if 'news' in opts and opts['news']: if 'news' in opts and opts['news']:
qd['tbm'] = 'nws' qd['tbm'] = 'nws'
elif 'videos' in opts and opts['videos']: elif 'videos' in opts and opts['videos']:
qd['tbm'] = 'vid' qd['tbm'] = 'vid'
else: else:
qd.pop('tbm', None) qd.pop('tbm', None)
if 'num' in opts: if 'num' in opts:
self._num = opts['num'] self._num = opts['num']
if 'sites' in opts: if 'sites' in opts:
self._sites = opts['sites'] self._sites = opts['sites']
if 'exclude' in opts:
self._exclude = opts['exclude']
if 'start' in opts: if 'start' in opts:
self._start = opts['start'] self._start = opts['start']
if 'tld' in opts: if 'tld' in opts:
self._tld = opts['tld'] self._tld = opts['tld']
if 'unfilter' in opts and opts['unfilter']: if 'unfilter' in opts and opts['unfilter']:
qd['filter'] = 0 qd['filter'] = 0
def set_queries(self, **kwargs): def set_queries(self, **kwargs):
"""Forcefully set queries outside the normal `update` mechanism. """Forcefully set queries outside the normal `update` mechanism.
skipping to change at line 1945 skipping to change at line 1997
qd.update(self._query_dict) qd.update(self._query_dict)
if self._num != 10: # Skip sending the default if self._num != 10: # Skip sending the default
qd['num'] = self._num qd['num'] = self._num
if self._start: # Skip sending the default if self._start: # Skip sending the default
qd['start'] = self._start qd['start'] = self._start
# Construct the q query # Construct the q query
q = '' q = ''
keywords = self._keywords keywords = self._keywords
sites = self._sites sites = self._sites
exclude = self._exclude
if keywords: if keywords:
if isinstance(keywords, list): if isinstance(keywords, list):
q += '+'.join(urllib.parse.quote_plus(kw) for kw in keywords) q += '+'.join(urllib.parse.quote_plus(kw) for kw in keywords)
else: else:
q += urllib.parse.quote_plus(keywords) q += urllib.parse.quote_plus(keywords)
if sites: if sites:
q += '+OR'.join('+site:' + urllib.parse.quote_plus(site) for site in sites) q += '+OR'.join('+site:' + urllib.parse.quote_plus(site) for site in sites)
if exclude:
q += ''.join('+-site:' + urllib.parse.quote_plus(e) for e in exclude
)
qd['q'] = q qd['q'] = q
return '&'.join('%s=%s' % (k, qd[k]) for k in sorted(qd.keys())) return '&'.join('%s=%s' % (k, qd[k]) for k in sorted(qd.keys()))
class GoogleConnectionError(Exception): class GoogleConnectionError(Exception):
pass pass
class GoogleConnection(object): class GoogleConnection(object):
""" """
This class facilitates connecting to and fetching from Google. This class facilitates connecting to and fetching from Google.
Parameters Parameters
skipping to change at line 2242 skipping to change at line 2296
# cw is short for collapse_whitespace. # cw is short for collapse_whitespace.
cw = lambda s: re.sub(r'[ \t\n\r]+', ' ', s) if s is not None else s cw = lambda s: re.sub(r'[ \t\n\r]+', ' ', s) if s is not None else s
index = 0 index = 0
for div_g in tree.select_all('div.g'): for div_g in tree.select_all('div.g'):
if div_g.select('.hp-xpdbox'): if div_g.select('.hp-xpdbox'):
# Skip smart cards. # Skip smart cards.
continue continue
try: try:
h3 = div_g.select('div.r h3') if div_g.select('.st'):
if h3: # Old class structure, stopped working some time in
title = h3.text # September 2020, but kept just in case.
url = self.unwrap_link(h3.parent.attr('href')) h3 = div_g.select('div.r h3')
if h3:
title = h3.text
a = h3.parent
else:
h3 = div_g.select('h3.r')
a = h3.select('a')
title = a.text
mime = div_g.select('.mime')
if mime:
title = mime.text + ' ' + title
abstract_node = div_g.select('.st')
metadata_node = div_g.select('.f')
else: else:
h3 = div_g.select('h3.r') # Current structure as of October 2020.
a = h3.select('a') # Note that a filetype tag (e.g. PDF) is now pretty
title = a.text # damn hard to parse with confidence (that it'll
mime = div_g.select('.mime') # survive the slighest further change), so we don't.
if mime: title_node, details_node, *_ = div_g.select_all('div.rc > di
title = mime.text + ' ' + title v')
url = self.unwrap_link(a.attr('href')) if 'yuRUbf' not in title_node.classes:
logger.debug('unexpected title node class(es): expected
%r, got %r',
'yuRUbf', ' '.join(title_node.classes))
if 'IsZvec' not in details_node.classes:
logger.debug('unexpected details node class(es): expecte
d %r, got %r',
'IsZvec', ' '.join(details_node.classes))
a = title_node.select('a')
h3 = a.select('h3')
title = h3.text
abstract_node = details_node.select('span')
metadata_node = details_node.select('.f, span ~ div')
url = self.unwrap_link(a.attr('href'))
matched_keywords = [] matched_keywords = []
abstract = '' abstract = ''
for childnode in div_g.select('.st').children: # BFS descendant nodes. Necessary to locate matches (b,
if 'f' in childnode.classes: # em) while skipping metadata (.f).
abstract_nodes = collections.deque([abstract_node])
while abstract_nodes:
node = abstract_nodes.popleft()
if 'f' in node.classes:
# .f is handled as metadata instead. # .f is handled as metadata instead.
continue continue
childnode_text = cw(childnode.text) if node.tag in ['b', 'em']:
if childnode.tag in ['b', 'em'] and childnode_text != '...': matched_keywords.append({'phrase': node.text, 'offset':
matched_keywords.append({'phrase': childnode_text, 'offs len(abstract)})
et': len(abstract)}) abstract += node.text
abstract = abstract + childnode_text continue
if not node.children:
abstract += node.text
continue
for child in node.children:
abstract_nodes.append(child)
metadata = None
try: try:
metadata = div_g.select('.f').text # Sometimes there are multiple metadata fields
metadata = metadata.replace('\u200e', '').replace(' - ', ', # associated with a single entry, e.g. "Released",
').strip().rstrip(',') # "Producer(s)", "Genre", etc. for a song (sample
# query: "never gonna give you up"). These need to
# be delimited when displayed.
metadata_fields = metadata_node.select_all('div > div.wFMWsc
')
if metadata_fields:
metadata = ' | '.join(field.text for field in metadata_f
ields)
elif not metadata_node.select('a') and not metadata_node.sel
ect('g-expandable-container'):
metadata = metadata_node.text
if metadata:
metadata = (
metadata
.replace('\u200e', '')
.replace(' - ', ', ')
.replace(' \u2014 ', ', ')
.strip().rstrip(',')
)
except AttributeError: except AttributeError:
metadata = None pass
except (AttributeError, ValueError): except (AttributeError, ValueError):
continue continue
sitelinks = [] sitelinks = []
for td in div_g.select_all('td'): for td in div_g.select_all('td'):
try: try:
a = td.select('a') a = td.select('a')
sl_title = a.text sl_title = a.text
sl_url = self.unwrap_link(a.attr('href')) sl_url = self.unwrap_link(a.attr('href'))
sl_abstract = td.select('div.s.st, div.s .st').text sl_abstract = td.select('div.s.st, div.s .st').text
sitelink = Sitelink(cw(sl_title), sl_url, cw(sl_abstract)) sitelink = Sitelink(cw(sl_title), sl_url, cw(sl_abstract))
skipping to change at line 2444 skipping to change at line 2546
return ( return (
self.title == other.title and self.title == other.title and
self.url == other.url and self.url == other.url and
self.abstract == other.abstract and self.abstract == other.abstract and
self.metadata == other.metadata and self.metadata == other.metadata and
self.sitelinks == other.sitelinks and self.sitelinks == other.sitelinks and
self.matches == other.matches self.matches == other.matches
) )
def __hash__(self): def __hash__(self):
sitelinks_hashable = tuple(sitelinks) if sitelinks is not None else None sitelinks_hashable = tuple(self.sitelinks) if self.sitelinks is not None
matches_hashable = tuple(matches) if matches is not None else None else None
return hash(self.title, self.url, self.abstract, self.metadata, self.sit matches_hashable = tuple(self.matches) if self.matches is not None else
elinks, self.matches) None
return hash(self.title, self.url, self.abstract, self.metadata, sitelink
s_hashable, matches_hashable)
def _print_title_and_url(self, index, title, url, indent=0): def _print_title_and_url(self, index, title, url, indent=0):
colors = self.colors colors = self.colors
if not self.urlexpand: if not self.urlexpand:
url = '[' + urllib.parse.urlparse(url).netloc + ']' url = '[' + urllib.parse.urlparse(url).netloc + ']'
if colors: if colors:
# Adjust index to print result index clearly # Adjust index to print result index clearly
print(" %s%s%-3s%s" % (' ' * indent, colors.index, index + '.', colo rs.reset), end='') print(" %s%s%-3s%s" % (' ' * indent, colors.index, index + '.', colo rs.reset), end='')
skipping to change at line 2691 skipping to change at line 2793
self.results = parser.results self.results = parser.results
self._autocorrected = parser.autocorrected self._autocorrected = parser.autocorrected
self._showing_results_for = parser.showing_results_for self._showing_results_for = parser.showing_results_for
self._results_filtered = parser.filtered self._results_filtered = parser.filtered
self._urltable = {} self._urltable = {}
for r in self.results: for r in self.results:
self._urltable.update(r.urltable()) self._urltable.update(r.urltable())
def warn_no_results(self): def warn_no_results(self):
printerr('No results.') printerr('No results.')
if not self.no_results_instructions_shown: if self.no_results_instructions_shown:
printerr('If you believe this is a bug, please review ' return
'https://git.io/googler-no-results before submitting a bug
report.') try:
self.no_results_instructions_shown = True import json
import urllib.error
import urllib.request
info_json_url = '%s/master/info.json' % RAW_DOWNLOAD_REPO_BASE
logger.debug('Fetching %s for project status...', info_json_url)
try:
with urllib.request.urlopen(info_json_url, timeout=5) as respons
e:
try:
info = json.load(response)
except Exception:
logger.error('Failed to decode project status from %s',
info_json_url)
raise RuntimeError
except urllib.error.HTTPError as e:
logger.error('Failed to fetch project status from %s: HTTP %d',
info_json_url, e.code)
raise RuntimeError
epoch = info.get('epoch')
if epoch > _EPOCH_:
printerr('Your version of googler is broken due to Google-side c
hanges.')
tracking_issue = info.get('tracking_issue')
fixed_on_master = info.get('fixed_on_master')
fixed_in_release = info.get('fixed_in_release')
if fixed_in_release:
printerr('A new version, %s, has been released to address th
e changes.' % fixed_in_release)
printerr('Please upgrade to the latest version.')
elif fixed_on_master:
printerr('The fix has been pushed to master, pending a relea
se.')
printerr('Please download the master version https://git.io/
googler or wait for a release.')
else:
printerr('The issue is tracked at https://github.com/jarun/g
oogler/issues/%s.' % tracking_issue)
return
except RuntimeError:
pass
printerr('If you believe this is a bug, please review '
'https://git.io/googler-no-results before submitting a bug repo
rt.')
self.no_results_instructions_shown = True
@require_keywords @require_keywords
def display_results(self, prelude='\n', json_output=False): def display_results(self, prelude='\n', json_output=False):
"""Display results stored in ``self.results``. """Display results stored in ``self.results``.
Parameters Parameters
---------- ----------
See `fetch_and_display`. See `fetch_and_display`.
""" """
skipping to change at line 3431 skipping to change at line 3569
addarg('-n', '--count', dest='num', type=argparser.positive_int, addarg('-n', '--count', dest='num', type=argparser.positive_int,
default=10, metavar='N', help='show N results (default 10)') default=10, metavar='N', help='show N results (default 10)')
addarg('-N', '--news', action='store_true', addarg('-N', '--news', action='store_true',
help='show results from news section') help='show results from news section')
addarg('-V', '--videos', action='store_true', addarg('-V', '--videos', action='store_true',
help='show results from videos section') help='show results from videos section')
addarg('-c', '--tld', metavar='TLD', addarg('-c', '--tld', metavar='TLD',
help="""country-specific search with top-level domain .TLD, e.g., 'in ' help="""country-specific search with top-level domain .TLD, e.g., 'in '
for India""") for India""")
addarg('-l', '--lang', metavar='LANG', help='display in language LANG') addarg('-l', '--lang', metavar='LANG', help='display in language LANG')
addarg('-g', '--geoloc', metavar='CC',
help="""country-specific geolocation search with country code CC, e.g
.
'in' for India. Country codes are the same as top-level domains""")
addarg('-x', '--exact', action='store_true', addarg('-x', '--exact', action='store_true',
help='disable automatic spelling correction') help='disable automatic spelling correction')
addarg('--colorize', nargs='?', choices=['auto', 'always', 'never'], addarg('--colorize', nargs='?', choices=['auto', 'always', 'never'],
const='always', default='auto', const='always', default='auto',
help="""whether to colorize output; defaults to 'auto', which enables help="""whether to colorize output; defaults to 'auto', which enables
color when stdout is a tty device; using --colorize without an argume nt color when stdout is a tty device; using --colorize without an argume nt
is equivalent to --colorize=always""") is equivalent to --colorize=always""")
addarg('-C', '--nocolor', action='store_true', addarg('-C', '--nocolor', action='store_true',
help='equivalent to --colorize=never') help='equivalent to --colorize=never')
addarg('--colors', dest='colorstr', type=argparser.is_colorstr, addarg('--colors', dest='colorstr', type=argparser.is_colorstr,
skipping to change at line 3456 skipping to change at line 3597
metavar='dN', help='time limit search ' metavar='dN', help='time limit search '
'[h5 (5 hrs), d5 (5 days), w5 (5 weeks), m5 (5 months), y5 (5 years)] ') '[h5 (5 hrs), d5 (5 days), w5 (5 weeks), m5 (5 months), y5 (5 years)] ')
addarg('--from', type=argparser.is_date, addarg('--from', type=argparser.is_date,
help="""starting date/month/year of date range; must use American dat e help="""starting date/month/year of date range; must use American dat e
format with slashes, e.g., 2/24/2020, 2/2020, 2020; can be used in format with slashes, e.g., 2/24/2020, 2/2020, 2020; can be used in
conjunction with --to, and overrides -t, --time""") conjunction with --to, and overrides -t, --time""")
addarg('--to', type=argparser.is_date, addarg('--to', type=argparser.is_date,
help='ending date/month/year of date range; see --from') help='ending date/month/year of date range; see --from')
addarg('-w', '--site', dest='sites', action='append', metavar='SITE', addarg('-w', '--site', dest='sites', action='append', metavar='SITE',
help='search a site using Google') help='search a site using Google')
addarg('-e', '--exclude', dest='exclude', action='append', metavar='SITE',
help='exclude site from results')
addarg('--unfilter', action='store_true', help='do not omit similar results' ) addarg('--unfilter', action='store_true', help='do not omit similar results' )
addarg('-p', '--proxy', default=https_proxy_from_environment(), addarg('-p', '--proxy', default=https_proxy_from_environment(),
help="""tunnel traffic through an HTTP proxy; help="""tunnel traffic through an HTTP proxy;
PROXY is of the form [http://][user:password@]proxyhost[:port]""") PROXY is of the form [http://][user:password@]proxyhost[:port]""")
addarg('--noua', action='store_true', help=argparse.SUPPRESS) addarg('--noua', action='store_true', help=argparse.SUPPRESS)
addarg('--notweak', action='store_true', addarg('--notweak', action='store_true',
help='disable TCP optimizations and forced TLS 1.2') help='disable TCP optimizations and forced TLS 1.2')
addarg('--json', action='store_true', addarg('--json', action='store_true',
help='output in JSON format; implies --noprompt') help='output in JSON format; implies --noprompt')
addarg('--url-handler', metavar='UTIL', addarg('--url-handler', metavar='UTIL',
 End of changes. 28 change blocks. 
41 lines changed or deleted 201 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)