"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "googler" between
googler-3.9.tar.gz and googler-4.0.tar.gz

About: googler is a command line tool to search Google (Web \PKG_DESCR_TTamp; News) from the terminal (requires Python).

googler  (googler-3.9):googler  (googler-4.0)
skipping to change at line 58 skipping to change at line 58
import readline import readline
except ImportError: except ImportError:
pass pass
try: try:
import setproctitle import setproctitle
setproctitle.setproctitle('googler') setproctitle.setproctitle('googler')
except (ImportError, Exception): except (ImportError, Exception):
pass pass
from typing import (
Any,
Dict,
Generator,
Iterable,
Iterator,
List,
Match,
Optional,
Sequence,
Tuple,
Union,
cast,
)
# Basic setup # Basic setup
logging.basicConfig(format='[%(levelname)s] %(message)s') logging.basicConfig(format='[%(levelname)s] %(message)s')
logger = logging.getLogger() logger = logging.getLogger()
def sigint_handler(signum, frame): def sigint_handler(signum, frame):
print('\nInterrupted.', file=sys.stderr) print('\nInterrupted.', file=sys.stderr)
sys.exit(1) sys.exit(1)
try: try:
signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGINT, sigint_handler)
except ValueError: except ValueError:
# signal only works in main thread # signal only works in main thread
pass pass
# Constants # Constants
_VERSION_ = '3.9' _VERSION_ = '4.0'
COLORMAP = {k: '\x1b[%sm' % v for k, v in { COLORMAP = {k: '\x1b[%sm' % v for k, v in {
'a': '30', 'b': '31', 'c': '32', 'd': '33', 'a': '30', 'b': '31', 'c': '32', 'd': '33',
'e': '34', 'f': '35', 'g': '36', 'h': '37', 'e': '34', 'f': '35', 'g': '36', 'h': '37',
'i': '90', 'j': '91', 'k': '92', 'l': '93', 'i': '90', 'j': '91', 'k': '92', 'l': '93',
'm': '94', 'n': '95', 'o': '96', 'p': '97', 'm': '94', 'n': '95', 'o': '96', 'p': '97',
'A': '30;1', 'B': '31;1', 'C': '32;1', 'D': '33;1', 'A': '30;1', 'B': '31;1', 'C': '32;1', 'D': '33;1',
'E': '34;1', 'F': '35;1', 'G': '36;1', 'H': '37;1', 'E': '34;1', 'F': '35;1', 'G': '36;1', 'H': '37;1',
'I': '90;1', 'J': '91;1', 'K': '92;1', 'L': '93;1', 'I': '90;1', 'J': '91;1', 'K': '92;1', 'L': '93;1',
'M': '94;1', 'N': '95;1', 'O': '96;1', 'P': '97;1', 'M': '94;1', 'N': '95;1', 'O': '96;1', 'P': '97;1',
'x': '0', 'X': '1', 'y': '7', 'Y': '7;1', 'x': '0', 'X': '1', 'y': '7', 'Y': '7;1',
}.items()} }.items()}
USER_AGENT = 'googler/%s (like MSIE)' % _VERSION_ USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
text_browsers = ['elinks', 'links', 'lynx', 'w3m', 'www-browser'] text_browsers = ['elinks', 'links', 'lynx', 'w3m', 'www-browser']
# Self-upgrade parameters # Self-upgrade parameters
# #
# Downstream packagers are recommended to turn off the entire self-upgrade # Downstream packagers are recommended to turn off the entire self-upgrade
# mechanism through # mechanism through
# #
# make disable-self-upgrade # make disable-self-upgrade
# #
skipping to change at line 150 skipping to change at line 165
def textwrap_fill(text, width=70, **kwargs): def textwrap_fill(text, width=70, **kwargs):
return '\n'.join(textwrap_wrap(text, width=width, **kwargs)) return '\n'.join(textwrap_wrap(text, width=width, **kwargs))
textwrap.wrap = textwrap_wrap textwrap.wrap = textwrap_wrap
textwrap.fill = textwrap_fill textwrap.fill = textwrap_fill
textwrap.wrap.patched = True textwrap.wrap.patched = True
textwrap.fill.patched = True textwrap.fill.patched = True
monkeypatch_textwrap_for_cjk() monkeypatch_textwrap_for_cjk()
CoordinateType = Tuple[int, int]
class TrackedTextwrap:
"""
Implements a text wrapper that tracks the position of each source
character, and can correctly insert zero-width sequences at given
offsets of the source text.
Wrapping result should be the same as that from PSL textwrap.wrap
with default settings except expand_tabs=False.
"""
def __init__(self, text: str, width: int):
self._original = text
# Do the job of replace_whitespace first so that we can easily
# match text to wrapped lines later. Note that this operation
# does not change text length or offsets.
whitespace = "\t\n\v\f\r "
whitespace_trans = str.maketrans(whitespace, " " * len(whitespace))
text = text.translate(whitespace_trans)
self._lines = textwrap.wrap(
text, width, expand_tabs=False, replace_whitespace=False
)
# self._coords track the (row, column) coordinate of each source
# character in the result text. It is indexed by offset in
# source text.
self._coords = [] # type: List[CoordinateType]
offset = 0
try:
if not self._lines:
# Source text only has whitespaces. We add an empty line
# in order to produce meaningful coordinates.
self._lines = [""]
for row, line in enumerate(self._lines):
assert text[offset : offset + len(line)] == line
col = 0
for _ in line:
self._coords.append((row, col))
offset += 1
col += 1
# All subsequent dropped whitespaces map to the last, imaginary
column
# (the EOL character if you wish) of the current line.
while offset < len(text) and text[offset] == " ":
self._coords.append((row, col))
offset += 1
# One past the final character (think of it as EOF) should
# be treated as a valid offset.
self._coords.append((row, col))
except AssertionError:
raise RuntimeError(
"TrackedTextwrap: the impossible happened at offset {} of text {
!r}".format(
offset, self._original
)
)
# seq should be a zero-width sequence, e.g., an ANSI escape sequence.
# May raise IndexError if offset is out of bounds.
def insert_zero_width_sequence(self, seq: str, offset: int) -> None:
row, col = self._coords[offset]
line = self._lines[row]
self._lines[row] = line[:col] + seq + line[col:]
# Shift coordinates of all characters after the given character
# on the same line.
shift = len(seq)
offset += 1
while offset < len(self._coords) and self._coords[offset][0] == row:
_, col = self._coords[offset]
self._coords[offset] = (row, col + shift)
offset += 1
@property
def original(self) -> str:
return self._original
@property
def lines(self) -> List[str]:
return self._lines
@property
def wrapped(self) -> str:
return "\n".join(self._lines)
# May raise IndexError if offset is out of bounds.
def get_coordinate(self, offset: int) -> CoordinateType:
return self._coords[offset]
### begin dim (DOM implementation with CSS support) ### ### begin dim (DOM implementation with CSS support) ###
### https://github.com/zmwangx/dim/blob/master/dim.py ### ### https://github.com/zmwangx/dim/blob/master/dim.py ###
import html import html
import re import re
import textwrap import textwrap
from collections import OrderedDict from collections import OrderedDict
from enum import Enum from enum import Enum
from html.parser import HTMLParser from html.parser import HTMLParser
try:
from typing import (
Any,
Dict,
Generator,
Iterable,
Iterator,
List,
Match,
Optional,
Tuple,
Union,
cast,
)
except ImportError: # pragma: no cover
# Python 3.4 without external typing module
class _TypeStub:
def __getitem__(self, _): # type: ignore
return None
Any = None
Dict = Generator = Iterable = Iterator = List = Match = _TypeStub() # type:
ignore
Optional = Tuple = Union = _TypeStub() # type: ignore
def cast(typ, val): # type: ignore
return val
SelectorGroupLike = Union[str, "SelectorGroup", "Selector"] SelectorGroupLike = Union[str, "SelectorGroup", "Selector"]
class Node(object): class Node(object):
""" """
Represents a DOM node. Represents a DOM node.
Parts of JavaScript's DOM ``Node`` API and ``Element`` API are Parts of JavaScript's DOM ``Node`` API and ``Element`` API are
mirrored here, with extensions. In particular, ``querySelector`` and mirrored here, with extensions. In particular, ``querySelector`` and
``querySelectorAll`` are mirrored. ``querySelectorAll`` are mirrored.
skipping to change at line 432 skipping to change at line 509
""" """
Represents an element node. Represents an element node.
Note that tag and attribute names are case-insensitive; attribute Note that tag and attribute names are case-insensitive; attribute
values are case-sensitive. values are case-sensitive.
""" """
def __init__( def __init__(
self, self,
tag: str, tag: str,
attrs: Iterable[Tuple[str, str]], attrs: Iterable[Tuple[str, Optional[str]]],
*, *,
parent: Optional["Node"] = None, parent: Optional["Node"] = None,
children: Optional[List["Node"]] = None children: Optional[Sequence["Node"]] = None
) -> None: ) -> None:
Node.__init__(self) Node.__init__(self)
self.tag = tag.lower() # type: str self.tag = tag.lower() # type: str
self.attrs = OrderedDict((attr.lower(), val) for attr, val in attrs) self.attrs = OrderedDict((attr.lower(), val or "") for attr, val in attr s)
self.parent = parent self.parent = parent
self.children = children or [] self.children = list(children or [])
def __repr__(self) -> str: def __repr__(self) -> str:
s = "<" + self.tag s = "<" + self.tag
if self.attrs: if self.attrs:
s += " attrs=%s" % repr(list(self.attrs.items())) s += " attrs=%s" % repr(list(self.attrs.items()))
if self.children: if self.children:
s += " children=%s" % repr(self.children) s += " children=%s" % repr(self.children)
s += ">" s += ">"
return s return s
skipping to change at line 574 skipping to change at line 651
Consume HTML and builds a :class:`Node` tree. Once finished, use Consume HTML and builds a :class:`Node` tree. Once finished, use
:attr:`root` to access the root of the tree. :attr:`root` to access the root of the tree.
This parser cannot parse malformed HTML with tag mismatch. This parser cannot parse malformed HTML with tag mismatch.
""" """
def __init__(self) -> None: def __init__(self) -> None:
super().__init__(convert_charrefs=True) super().__init__(convert_charrefs=True)
self._stack = [] # type: List[Node] self._stack = [] # type: List[Node]
def handle_starttag(self, tag: str, attrs: List[Tuple[str, str]]) -> None: def handle_starttag(
self, tag: str, attrs: Sequence[Tuple[str, Optional[str]]]
) -> None:
node = ElementNode(tag, attrs) node = ElementNode(tag, attrs)
node._partial = True node._partial = True
self._stack.append(node) self._stack.append(node)
# For void elements, immediately invoke the end tag handler (see # For void elements, immediately invoke the end tag handler (see
# handle_startendtag()). # handle_startendtag()).
if _tag_is_void(tag): if _tag_is_void(tag):
self.handle_endtag(tag) self.handle_endtag(tag)
def handle_endtag(self, tag: str) -> None: def handle_endtag(self, tag: str) -> None:
tag = tag.lower() tag = tag.lower()
skipping to change at line 606 skipping to change at line 685
parent.children = list(reversed(children)) parent.children = list(reversed(children))
parent._partial = False parent._partial = False
for child in children: for child in children:
child.parent = parent child.parent = parent
# Make parser behavior for explicitly and implicitly void elements # Make parser behavior for explicitly and implicitly void elements
# (e.g., <hr> vs <hr/>) consistent. The former triggers # (e.g., <hr> vs <hr/>) consistent. The former triggers
# handle_starttag only, whereas the latter triggers # handle_starttag only, whereas the latter triggers
# handle_startendtag (which by default triggers both handle_starttag # handle_startendtag (which by default triggers both handle_starttag
# and handle_endtag). See https://www.bugs.python.org/issue25258. # and handle_endtag). See https://www.bugs.python.org/issue25258.
def handle_startendtag(self, tag: str, attrs: List[Tuple[str, str]]) -> None def handle_startendtag(
: self, tag: str, attrs: Sequence[Tuple[str, Optional[str]]]
) -> None:
self.handle_starttag(tag, attrs) self.handle_starttag(tag, attrs)
def handle_data(self, text: str) -> None: def handle_data(self, text: str) -> None:
if not self._stack: if not self._stack:
# Ignore text nodes before the first tag. # Ignore text nodes before the first tag.
return return
self._stack.append(TextNode(text)) self._stack.append(TextNode(text))
@property @property
def root(self) -> "Node": def root(self) -> "Node":
skipping to change at line 836 skipping to change at line 917
previous (:class:`Optional`\\[:class:`Selector`]): previous (:class:`Optional`\\[:class:`Selector`]):
Reference to the previous sequence of simple selectors in Reference to the previous sequence of simple selectors in
chain. chain.
""" """
def __init__( def __init__(
self, self,
*, *,
tag: Optional[str] = None, tag: Optional[str] = None,
classes: Optional[List[str]] = None, classes: Optional[Sequence[str]] = None,
id: Optional[str] = None, id: Optional[str] = None,
attrs: Optional[List["AttributeSelector"]] = None, attrs: Optional[Sequence["AttributeSelector"]] = None,
combinator: Optional["Combinator"] = None, combinator: Optional["Combinator"] = None,
previous: Optional["Selector"] = None previous: Optional["Selector"] = None
) -> None: ) -> None:
self.tag = tag.lower() if tag else None self.tag = tag.lower() if tag else None
self.classes = classes or [] self.classes = list(classes or [])
self.id = id self.id = id
self.attrs = attrs or [] self.attrs = list(attrs or [])
self.combinator = combinator self.combinator = combinator
self.previous = previous self.previous = previous
def __repr__(self) -> str: def __repr__(self) -> str:
return "<Selector %s>" % repr(str(self)) return "<Selector %s>" % repr(str(self))
def __str__(self) -> str: def __str__(self) -> str:
sequences = [] sequences = []
delimiters = [] delimiters = []
seq = self seq = self
skipping to change at line 2089 skipping to change at line 2170
except ImportError: except ImportError:
import pdb import pdb
pdb.set_trace() pdb.set_trace()
index = 0 index = 0
for div_g in tree.select_all('div.g'): for div_g in tree.select_all('div.g'):
if div_g.select('.hp-xpdbox'): if div_g.select('.hp-xpdbox'):
# Skip smart cards. # Skip smart cards.
continue continue
try: try:
h3 = div_g.select('h3.r') h3 = div_g.select('div.r h3')
a = h3.select('a') if h3:
title = a.text title = h3.text
mime = div_g.select('.mime') url = self.unwrap_link(h3.parent.attr('href'))
if mime: else:
title = mime.text + ' ' + title h3 = div_g.select('h3.r')
url = self.unwrap_link(a.attr('href')) a = h3.select('a')
title = a.text
mime = div_g.select('.mime')
if mime:
title = mime.text + ' ' + title
url = self.unwrap_link(a.attr('href'))
matched_keywords = [] matched_keywords = []
abstract = '' abstract = ''
for childnode in div_g.select('.st').children: for childnode in div_g.select('.st').children:
if 'f' in childnode.classes:
# .f is handled as metadata instead.
continue
if childnode.tag == 'b' and childnode.text != '...': if childnode.tag == 'b' and childnode.text != '...':
matched_keywords.append({'phrase': childnode.text, ' offset': len(abstract)}) matched_keywords.append({'phrase': childnode.text, 'offs et': len(abstract)})
abstract = abstract + childnode.text.replace('\n', '') abstract = abstract + childnode.text.replace('\n', '')
try: try:
metadata = div_g.select('.slp').text metadata = div_g.select('.f').text
metadata = metadata.replace('\u200e', '').replace(' - ', ', ').strip() metadata = metadata.replace('\u200e', '').replace(' - ', ', ').strip()
except AttributeError: except AttributeError:
metadata = None metadata = None
except (AttributeError, ValueError): except (AttributeError, ValueError):
continue continue
sitelinks = [] sitelinks = []
for td in div_g.select_all('td'): for td in div_g.select_all('td'):
try: try:
a = td.select('a') a = td.select('a')
sl_title = a.text sl_title = a.text
skipping to change at line 2127 skipping to change at line 2216
except (AttributeError, ValueError): except (AttributeError, ValueError):
continue continue
index += 1 index += 1
self.results.append(Result(index, title, url, abstract, self.results.append(Result(index, title, url, abstract,
metadata=metadata, sitelinks=sitelinks, m atches=matched_keywords)) metadata=metadata, sitelinks=sitelinks, m atches=matched_keywords))
# Showing results for ... # Showing results for ...
# Search instead for ... # Search instead for ...
spell_orig = tree.select("span.spell_orig") spell_orig = tree.select("span.spell_orig")
if spell_orig: if spell_orig:
self.autocorrected = True showing_results_for_link = next(
self.showing_results_for = next(
filter(lambda el: el.tag == "a", spell_orig.previous_siblings()) , None filter(lambda el: el.tag == "a", spell_orig.previous_siblings()) , None
).text )
if showing_results_for_link:
self.autocorrected = True
self.showing_results_for = showing_results_for_link.text
# No results found for ... # No results found for ...
# Results for ...: # Results for ...:
alt_query_infobox = tree.select('#topstuff') alt_query_infobox = tree.select('#topstuff')
if alt_query_infobox: if alt_query_infobox:
bolds = alt_query_infobox.select_all('div b') bolds = alt_query_infobox.select_all('div b')
if len(bolds) == 2: if len(bolds) == 2:
self.showing_results_for = bolds[1].text self.showing_results_for = bolds[1].text
# In order to show you the most relevant results, we have # In order to show you the most relevant results, we have
# omitted some entries very similar to the N already displayed. # omitted some entries very similar to the N already displayed.
# ... # ...
self.filtered = tree.select('p#ofr') is not None self.filtered = tree.select('p#ofr') is not None
# Unwraps /url?q=http://...&sa=... # Unwraps /url?q=http://...&sa=...
# May raise ValueError. # TODO: don't unwrap if URL isn't in this form.
@staticmethod @staticmethod
def unwrap_link(link): def unwrap_link(link):
qs = urllib.parse.urlparse(link).query qs = urllib.parse.urlparse(link).query
try: try:
url = urllib.parse.parse_qs(qs)['q'][0] url = urllib.parse.parse_qs(qs)['q'][0]
except KeyError: except KeyError:
raise ValueError(link) return link
else: else:
if "://" in url: if "://" in url:
return url return url
else: else:
# Google's internal services link, e.g., # Google's internal services link, e.g.,
# /search?q=google&..., which cannot be unwrapped into # /search?q=google&..., which cannot be unwrapped into
# an actual URL. # an actual URL.
raise ValueError(link) raise ValueError(link)
class Sitelink(object): class Sitelink(object):
skipping to change at line 2234 skipping to change at line 2325
self.matches = [] if matches is None else matches self.matches = [] if matches is None else matches
self._urltable = {index: url} self._urltable = {index: url}
subindex = 'a' subindex = 'a'
for sitelink in self.sitelinks: for sitelink in self.sitelinks:
fullindex = index + subindex fullindex = index + subindex
sitelink.index = fullindex sitelink.index = fullindex
self._urltable[fullindex] = sitelink.url self._urltable[fullindex] = sitelink.url
subindex = chr(ord(subindex) + 1) subindex = chr(ord(subindex) + 1)
def _print_title_and_url(self, index, title, url, indent=3, pre=0): def _print_title_and_url(self, index, title, url, indent=0):
colors = self.colors colors = self.colors
if not self.urlexpand: if not self.urlexpand:
url = '[' + urllib.parse.urlparse(url).netloc + ']' url = '[' + urllib.parse.urlparse(url).netloc + ']'
if colors: if colors:
# Adjust index to print result index clearly # Adjust index to print result index clearly
print(" %s%s%-*s%s" % (' ' * pre, colors.index, indent, index + '.', colors.reset), end='') print(" %s%s%-3s%s" % (' ' * indent, colors.index, index + '.', colo rs.reset), end='')
if not self.urlexpand: if not self.urlexpand:
print(' ' + colors.title + title + colors.reset + ' ' + colors.u rl + url + colors.reset) print(' ' + colors.title + title + colors.reset + ' ' + colors.u rl + url + colors.reset)
else: else:
print(' ' + colors.title + title + colors.reset) print(' ' + colors.title + title + colors.reset)
print(' ' * (indent + 2 + pre) + colors.url + url + colors.reset ) print(' ' * (indent + 5) + colors.url + url + colors.reset)
else: else:
if self.urlexpand: if self.urlexpand:
print(' %s%-*s %s' % (' ' * pre, indent, index + '.', title)) print(' %s%-3s %s' % (' ' * indent, index + '.', title))
print(' %s%s' % (' ' * (indent + 1 + pre), url)) print(' %s%s' % (' ' * (indent + 4), url))
else: else:
print(' %s%-*s %s %s' % (' ' * pre, indent, index + '.', title, url)) print(' %s%-3s %s %s' % (' ' * indent, index + '.', title, url))
def _print_metadata_and_abstract(self, abstract, metadata=None, matches=None , indent=5, pre=0): def _print_metadata_and_abstract(self, abstract, metadata=None, matches=None , indent=0):
colors = self.colors colors = self.colors
try: try:
columns, _ = os.get_terminal_size() columns, _ = os.get_terminal_size()
except OSError: except OSError:
columns = 0 columns = 0
if metadata: if metadata:
if colors: if colors:
print(' ' * (indent + pre) + colors.metadata + metadata + colors .reset) print(' ' * (indent + 5) + colors.metadata + metadata + colors.r eset)
else: else:
print(' ' * (indent + pre) + metadata) print(' ' * (indent + 5) + metadata)
fillwidth = (columns - (indent + 6)) if columns > indent + 6 else len(ab
stract)
wrapped_abstract = TrackedTextwrap(abstract, fillwidth)
if colors:
# Highlight matches.
for match in matches or []:
offset = match['offset']
span = len(match['phrase'])
wrapped_abstract.insert_zero_width_sequence('\x1b[1m', offset)
wrapped_abstract.insert_zero_width_sequence('\x1b[0m', offset +
span)
if colors: if colors:
# Start from the last match, as inserting the bold characters change
s the offsets.
for match in reversed(matches or []):
abstract = (
abstract[: match['offset']]
+ '\033[1m'
+ match['phrase']
+ '\033[0m'
+ abstract[match['offset'] + len(match['phrase']) :]
)
print(colors.abstract, end='') print(colors.abstract, end='')
if columns > indent + 1 + pre: for line in wrapped_abstract.lines:
# Try to fill to columns print('%s%s' % (' ' * (indent + 5), line))
fillwidth = columns - indent - 1
for line in textwrap.wrap(abstract.replace('\n', ''), width=fillwidt
h):
print('%s%s' % (' ' * (indent + pre), line))
print('')
else:
print('%s%s\n' % (' ' * pre, abstract.replace('\n', ' ')))
if colors: if colors:
print(colors.reset, end='') print(colors.reset, end='')
print('')
def print(self): def print(self):
"""Print the result entry.""" """Print the result entry."""
self._print_title_and_url(self.index, self.title, self.url) self._print_title_and_url(self.index, self.title, self.url)
self._print_metadata_and_abstract(self.abstract, metadata=self.metadata, matches=self.matches) self._print_metadata_and_abstract(self.abstract, metadata=self.metadata, matches=self.matches)
for sitelink in self.sitelinks: for sitelink in self.sitelinks:
self._print_title_and_url(sitelink.index, sitelink.title, sitelink.u self._print_title_and_url(sitelink.index, sitelink.title, sitelink.u
rl, pre=4) rl, indent=4)
self._print_metadata_and_abstract(sitelink.abstract, pre=4) self._print_metadata_and_abstract(sitelink.abstract, indent=4)
def jsonizable_object(self): def jsonizable_object(self):
"""Return a JSON-serializable dict representing the result entry.""" """Return a JSON-serializable dict representing the result entry."""
obj = { obj = {
'title': self.title, 'title': self.title,
'url': self.url, 'url': self.url,
'abstract': self.abstract 'abstract': self.abstract
} }
if self.metadata: if self.metadata:
obj['metadata'] = self.metadata obj['metadata'] = self.metadata
skipping to change at line 2412 skipping to change at line 2499
atexit.register(self._conn.close) atexit.register(self._conn.close)
self.results = [] self.results = []
self._autocorrected = None self._autocorrected = None
self._showing_results_for = None self._showing_results_for = None
self._results_filtered = False self._results_filtered = False
self._urltable = {} self._urltable = {}
self.promptcolor = True if os.getenv('DISABLE_PROMPT_COLOR') is None els e False self.promptcolor = True if os.getenv('DISABLE_PROMPT_COLOR') is None els e False
self.no_results_instructions_shown = False
@property @property
def options(self): def options(self):
"""Current options.""" """Current options."""
return self._opts return self._opts
@property @property
def keywords(self): def keywords(self):
"""Current keywords.""" """Current keywords."""
return self._google_url.keywords return self._google_url.keywords
skipping to change at line 2459 skipping to change at line 2548
parser = GoogleParser(page, news=self._google_url.news) parser = GoogleParser(page, news=self._google_url.news)
self.results = parser.results self.results = parser.results
self._autocorrected = parser.autocorrected self._autocorrected = parser.autocorrected
self._showing_results_for = parser.showing_results_for self._showing_results_for = parser.showing_results_for
self._results_filtered = parser.filtered self._results_filtered = parser.filtered
self._urltable = {} self._urltable = {}
for r in self.results: for r in self.results:
self._urltable.update(r.urltable()) self._urltable.update(r.urltable())
def warn_no_results(self):
printerr('No results.')
if not self.no_results_instructions_shown:
printerr('If you believe this is a bug, please review '
'https://git.io/googler-no-results before submitting a bug
report.')
self.no_results_instructions_shown = True
@require_keywords @require_keywords
def display_results(self, prelude='\n', json_output=False): def display_results(self, prelude='\n', json_output=False):
"""Display results stored in ``self.results``. """Display results stored in ``self.results``.
Parameters Parameters
---------- ----------
See `fetch_and_display`. See `fetch_and_display`.
""" """
if json_output: if json_output:
# JSON output # JSON output
import json import json
results_object = [r.jsonizable_object() for r in self.results] results_object = [r.jsonizable_object() for r in self.results]
print(json.dumps(results_object, indent=2, sort_keys=True, ensure_as cii=False)) print(json.dumps(results_object, indent=2, sort_keys=True, ensure_as cii=False))
else: else:
# Regular output # Regular output
if not self.results: if not self.results:
print('No results.', file=sys.stderr) self.warn_no_results()
else: else:
sys.stderr.write(prelude) sys.stderr.write(prelude)
for r in self.results: for r in self.results:
r.print() r.print()
@require_keywords @require_keywords
def showing_results_for_alert(self, interactive=True): def showing_results_for_alert(self, interactive=True):
colors = self.colors colors = self.colors
if self._showing_results_for: if self._showing_results_for:
if colors: if colors:
 End of changes. 38 change blocks. 
88 lines changed or deleted 185 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)