", methods=["GET", "POST"])
def answer(topic=None):
"""
@@ -242,16 +266,19 @@ def answer(topic=None):
request.query_string
"""
- user_agent = request.headers.get('User-Agent', '').lower()
+ user_agent = request.headers.get("User-Agent", "").lower()
html_needed = _is_html_needed(user_agent)
options = parse_args(request.args)
- if topic in ['apple-touch-icon-precomposed.png', 'apple-touch-icon.png', 'apple-touch-icon-120x120-precomposed.png'] \
- or (topic is not None and any(topic.endswith('/'+x) for x in ['favicon.ico'])):
- return ''
+ if topic in [
+ "apple-touch-icon-precomposed.png",
+ "apple-touch-icon.png",
+ "apple-touch-icon-120x120-precomposed.png",
+ ] or (topic is not None and any(topic.endswith("/" + x) for x in ["favicon.ico"])):
+ return ""
- request_id = request.cookies.get('id')
- if topic is not None and topic.lstrip('/') == ':last':
+ request_id = request.cookies.get("id")
+ if topic is not None and topic.lstrip("/") == ":last":
if request_id:
topic = last_query(request_id)
else:
@@ -260,43 +287,47 @@ def answer(topic=None):
if request_id:
save_query(request_id, topic)
- if request.method == 'POST':
+ if request.method == "POST":
process_post_request(request, html_needed)
if html_needed:
return redirect("/")
return "OK\n"
- if 'topic' in request.args:
- return redirect("/%s" % request.args.get('topic'))
+ if "topic" in request.args:
+ return redirect("/%s" % request.args.get("topic"))
if topic is None:
topic = ":firstpage"
- if topic.startswith(':shell-x/'):
+ if topic.startswith(":shell-x/"):
return _proxy()
- #return requests.get('http://127.0.0.1:3000'+topic[8:]).text
+ # return requests.get('http://127.0.0.1:3000'+topic[8:]).text
lang = get_answer_language(request)
if lang:
- options['lang'] = lang
+ options["lang"] = lang
ip_address = get_request_ip(request)
- if '+' in topic:
+ if "+" in topic:
not_allowed = LIMITS.check_ip(ip_address)
if not_allowed:
return "429 %s\n" % not_allowed, 429
html_is_needed = _is_html_needed(user_agent) and not is_result_a_script(topic)
if html_is_needed:
- output_format='html'
+ output_format = "html"
else:
- output_format='ansi'
- result, found = cheat_wrapper(topic, request_options=options, output_format=output_format)
- if 'Please come back in several hours' in result and html_is_needed:
- malformed_response = open(os.path.join(CONFIG["path.internal.malformed"])).read()
+ output_format = "ansi"
+ result, found = cheat_wrapper(
+ topic, request_options=options, output_format=output_format
+ )
+ if "Please come back in several hours" in result and html_is_needed:
+ malformed_response = open(
+ os.path.join(CONFIG["path.internal.malformed"])
+ ).read()
return malformed_response
log_query(ip_address, found, topic, user_agent)
if html_is_needed:
return result
- return Response(result, mimetype='text/plain')
+ return Response(result, mimetype="text/plain")
diff --git a/bin/clean_cache.py b/bin/clean_cache.py
index bc19aa84..53e3f9c0 100644
--- a/bin/clean_cache.py
+++ b/bin/clean_cache.py
@@ -1,7 +1,7 @@
import sys
import redis
-REDIS = redis.Redis(host='localhost', port=6379, db=0)
+
+REDIS = redis.Redis(host="localhost", port=6379, db=0)
for key in sys.argv[1:]:
REDIS.delete(key)
-
diff --git a/bin/srv.py b/bin/srv.py
index 847375a7..1d4aab3b 100644
--- a/bin/srv.py
+++ b/bin/srv.py
@@ -5,6 +5,7 @@
from gevent.monkey import patch_all
from gevent.pywsgi import WSGIServer
+
patch_all()
import os
@@ -13,16 +14,16 @@
from app import app, CONFIG
-if '--debug' in sys.argv:
+if "--debug" in sys.argv:
# Not all debug mode features are available under `gevent`
# https://github.com/pallets/flask/issues/3825
app.debug = True
-if 'CHEATSH_PORT' in os.environ:
- port = int(os.environ.get('CHEATSH_PORT'))
+if "CHEATSH_PORT" in os.environ:
+ port = int(os.environ.get("CHEATSH_PORT"))
else:
- port = CONFIG['server.port']
+ port = CONFIG["server.port"]
-srv = WSGIServer((CONFIG['server.bind'], port), app)
+srv = WSGIServer((CONFIG["server.bind"], port), app)
print("Starting gevent server on {}:{}".format(srv.address[0], srv.address[1]))
srv.serve_forever()
diff --git a/doc/README-ja.md b/doc/README-ja.md
index e1a0cd72..cb331c88 100644
--- a/doc/README-ja.md
+++ b/doc/README-ja.md
@@ -360,7 +360,7 @@ let g:syntastic_shell_checkers = ['shellcheck']
scrooloose / syntastic – 構文チェックプラグイン
cheat.sh-vim – Vimのサポート
-Syntasticは警告とエラー(code analysysツールで見つかった: jshint 、 jshint 、 pylint 、 shellcheckt etc.), and cheat.sh-vim`を表示すると、エディタに書き込まれたプログラミング言語のクエリに関するエラーと警告と回答の説明が表示されます。
+Syntasticは警告とエラー(code analysisツールで見つかった: jshint 、 jshint 、 pylint 、 shellcheckt etc.), and cheat.sh-vim`を表示すると、エディタに書き込まれたプログラミング言語のクエリに関するエラーと警告と回答の説明が表示されます。
cheat.sh Vimプラグインの最も重要な機能が表示されているデモをご覧ください(5分):
diff --git a/lib/adapter/__init__.py b/lib/adapter/__init__.py
index 50d30cd5..f1a54663 100644
--- a/lib/adapter/__init__.py
+++ b/lib/adapter/__init__.py
@@ -12,7 +12,8 @@
__all__ = [
basename(f)[:-3]
for f in glob.glob(join(dirname(__file__), "*.py"))
- if isfile(f) and not f.endswith('__init__.py')]
+ if isfile(f) and not f.endswith("__init__.py")
+]
from .adapter import all_adapters
from . import *
diff --git a/lib/adapter/adapter.py b/lib/adapter/adapter.py
index ffa55ec4..ee1ae069 100644
--- a/lib/adapter/adapter.py
+++ b/lib/adapter/adapter.py
@@ -11,16 +11,19 @@
from six import with_metaclass
from config import CONFIG
+
class AdapterMC(type):
"""
Adapter Metaclass.
Defines string representation of adapters
"""
+
def __repr__(cls):
- if hasattr(cls, '_class_repr'):
- return getattr(cls, '_class_repr')()
+ if hasattr(cls, "_class_repr"):
+ return getattr(cls, "_class_repr")()
return super(AdapterMC, cls).__repr__()
+
class Adapter(with_metaclass(AdapterMC, object)):
"""
An abstract class, defines methods:
@@ -39,7 +42,7 @@ class Adapter(with_metaclass(AdapterMC, object)):
"""
_adapter_name = None
- _output_format = 'code'
+ _output_format = "code"
_cache_needed = False
_repository_url = None
_local_repository_location = None
@@ -49,7 +52,7 @@ class Adapter(with_metaclass(AdapterMC, object)):
@classmethod
def _class_repr(cls):
- return '[Adapter: %s (%s)]' % (cls._adapter_name, cls.__name__)
+ return "[Adapter: %s (%s)]" % (cls._adapter_name, cls.__name__)
def __init__(self):
self._list = {None: self._get_list()}
@@ -108,13 +111,13 @@ def _get_page(self, topic, request_options=None):
pass
def _get_output_format(self, topic):
- if '/' in topic:
- subquery = topic.split('/')[-1]
+ if "/" in topic:
+ subquery = topic.split("/")[-1]
else:
subquery = topic
- if subquery in [':list']:
- return 'text'
+ if subquery in [":list"]:
+ return "text"
return self._output_format
# pylint: disable=unused-argument
@@ -142,11 +145,11 @@ def get_page_dict(self, topic, request_options=None):
answer = {"answer": answer}
answer_dict = {
- 'topic': topic,
- 'topic_type': self._adapter_name,
- 'format': self._get_output_format(topic),
- 'cache': self._cache_needed,
- }
+ "topic": topic,
+ "topic_type": self._adapter_name,
+ "format": self._get_output_format(topic),
+ "cache": self._cache_needed,
+ }
answer_dict.update(answer)
# pylint: disable=assignment-from-none
@@ -176,9 +179,9 @@ def local_repository_location(cls, cheat_sheets_location=False):
if not dirname and cls._repository_url:
dirname = cls._repository_url
- if dirname.startswith('https://'):
+ if dirname.startswith("https://"):
dirname = dirname[8:]
- elif dirname.startswith('http://'):
+ elif dirname.startswith("http://"):
dirname = dirname[7:]
# if we did not manage to find out dirname up to this point,
@@ -187,7 +190,7 @@ def local_repository_location(cls, cheat_sheets_location=False):
if not dirname:
return None
- if dirname.startswith('/'):
+ if dirname.startswith("/"):
return dirname
# it is possible that several repositories will
@@ -195,10 +198,10 @@ def local_repository_location(cls, cheat_sheets_location=False):
# (because only the last part of the path is used)
# in this case provide the name in _local_repository_location
# (detected by fetch.py)
- if '/' in dirname:
- dirname = dirname.split('/')[-1]
+ if "/" in dirname:
+ dirname = dirname.split("/")[-1]
- path = os.path.join(CONFIG['path.repositories'], dirname)
+ path = os.path.join(CONFIG["path.repositories"], dirname)
if cheat_sheets_location:
path = os.path.join(path, cls._cheatsheet_files_prefix)
@@ -225,7 +228,8 @@ def fetch_command(cls):
# in this case `fetch` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
- "Do not known how to handle this repository: %s" % cls._repository_url)
+ "Do not known how to handle this repository: %s" % cls._repository_url
+ )
@classmethod
def update_command(cls):
@@ -245,7 +249,8 @@ def update_command(cls):
# in this case `update` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
- "Do not known how to handle this repository: %s" % cls._repository_url)
+ "Do not known how to handle this repository: %s" % cls._repository_url
+ )
@classmethod
def current_state_command(cls):
@@ -264,7 +269,8 @@ def current_state_command(cls):
# in this case `update` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
- "Do not known how to handle this repository: %s" % cls._repository_url)
+ "Do not known how to handle this repository: %s" % cls._repository_url
+ )
@classmethod
def save_state(cls, state):
@@ -273,8 +279,8 @@ def save_state(cls, state):
Must be called after the cache clean up.
"""
local_repository_dir = cls.local_repository_location()
- state_filename = os.path.join(local_repository_dir, '.cached_revision')
- open(state_filename, 'w').write(state)
+ state_filename = os.path.join(local_repository_dir, ".cached_revision")
+ open(state_filename, "w").write(state)
@classmethod
def get_state(cls):
@@ -284,10 +290,10 @@ def get_state(cls):
"""
local_repository_dir = cls.local_repository_location()
- state_filename = os.path.join(local_repository_dir, '.cached_revision')
+ state_filename = os.path.join(local_repository_dir, ".cached_revision")
state = None
if os.path.exists(state_filename):
- state = open(state_filename, 'r').read()
+ state = open(state_filename, "r").read()
return state
@classmethod
@@ -317,20 +323,23 @@ def get_updates_list(cls, updated_files_list):
answer.append(entry)
return answer
+
def all_adapters(as_dict=False):
"""
Return list of all known adapters
If `as_dict` is True, return dict {'name': adapter} instead of a list.
"""
+
def _all_subclasses(cls):
- return set(cls.__subclasses__()).union(set(
- [s for c in cls.__subclasses__() for s in _all_subclasses(c)]
- ))
+ return set(cls.__subclasses__()).union(
+ set([s for c in cls.__subclasses__() for s in _all_subclasses(c)])
+ )
if as_dict:
- return {x.name():x for x in _all_subclasses(Adapter)}
+ return {x.name(): x for x in _all_subclasses(Adapter)}
return list(_all_subclasses(Adapter))
+
def adapter_by_name(name):
"""
Return adapter having this name,
diff --git a/lib/adapter/cheat_cheat.py b/lib/adapter/cheat_cheat.py
index 9ba99550..ca80f08e 100644
--- a/lib/adapter/cheat_cheat.py
+++ b/lib/adapter/cheat_cheat.py
@@ -9,6 +9,7 @@
from .git_adapter import GitRepositoryAdapter
+
class Cheat(GitRepositoryAdapter):
"""
cheat/cheat adapter
diff --git a/lib/adapter/cheat_sheets.py b/lib/adapter/cheat_sheets.py
index 01666a2d..3e27ec23 100644
--- a/lib/adapter/cheat_sheets.py
+++ b/lib/adapter/cheat_sheets.py
@@ -11,24 +11,26 @@
from .git_adapter import GitRepositoryAdapter
+
def _remove_initial_underscore(filename):
- if filename.startswith('_'):
+ if filename.startswith("_"):
filename = filename[1:]
return filename
+
def _sanitize_dirnames(filename, restore=False):
"""
Remove (or add) leading _ in the directories names in `filename`
The `restore` param means that the path name should be restored from the queryname,
i.e. conversion should be done in the opposite direction
"""
- parts = filename.split('/')
+ parts = filename.split("/")
newparts = []
for part in parts[:-1]:
if restore:
- newparts.append('_'+part)
+ newparts.append("_" + part)
continue
- if part.startswith('_'):
+ if part.startswith("_"):
newparts.append(part[1:])
else:
newparts.append(part)
@@ -36,8 +38,8 @@ def _sanitize_dirnames(filename, restore=False):
return "/".join(newparts)
-class CheatSheets(GitRepositoryAdapter):
+class CheatSheets(GitRepositoryAdapter):
"""
Adapter for the cheat.sheets cheat sheets.
"""
@@ -56,18 +58,17 @@ def _get_list(self, prefix=None):
hidden_files = ["_info.yaml"]
answer = []
prefix = os.path.join(
- self.local_repository_location(),
- self._cheatsheet_files_prefix)
- for mask in ['*', '*/*']:
- template = os.path.join(
- prefix,
- mask)
+ self.local_repository_location(), self._cheatsheet_files_prefix
+ )
+ for mask in ["*", "*/*"]:
+ template = os.path.join(prefix, mask)
answer += [
- _sanitize_dirnames(f_name[len(prefix):])
+ _sanitize_dirnames(f_name[len(prefix) :])
for f_name in glob.glob(template)
if not os.path.isdir(f_name)
- and os.path.basename(f_name) not in hidden_files]
+ and os.path.basename(f_name) not in hidden_files
+ ]
return sorted(answer)
@@ -76,18 +77,19 @@ def _get_page(self, topic, request_options=None):
filename = os.path.join(
self.local_repository_location(),
self._cheatsheet_files_prefix,
- _sanitize_dirnames(topic, restore=True))
+ _sanitize_dirnames(topic, restore=True),
+ )
if os.path.exists(filename):
- answer = self._format_page(open(filename, 'r').read())
+ answer = self._format_page(open(filename, "r").read())
else:
# though it should not happen
answer = "%s:%s not found" % (str(self.__class__), topic)
return answer
-class CheatSheetsDir(CheatSheets):
+class CheatSheetsDir(CheatSheets):
"""
Adapter for the cheat sheets directories.
Provides pages named according to subdirectories:
@@ -103,14 +105,16 @@ class CheatSheetsDir(CheatSheets):
def _get_list(self, prefix=None):
template = os.path.join(
- self.local_repository_location(),
- self._cheatsheet_files_prefix,
- '*')
+ self.local_repository_location(), self._cheatsheet_files_prefix, "*"
+ )
- answer = sorted([
- _remove_initial_underscore(os.path.basename(f_name)) + "/"
- for f_name in glob.glob(template)
- if os.path.isdir(f_name)])
+ answer = sorted(
+ [
+ _remove_initial_underscore(os.path.basename(f_name)) + "/"
+ for f_name in glob.glob(template)
+ if os.path.isdir(f_name)
+ ]
+ )
return answer
@@ -122,12 +126,12 @@ def _get_page(self, topic, request_options=None):
template = os.path.join(
self.local_repository_location(),
self._cheatsheet_files_prefix,
- topic.rstrip('/'),
- '*')
+ topic.rstrip("/"),
+ "*",
+ )
- answer = sorted([
- os.path.basename(f_name) for f_name in glob.glob(template)])
+ answer = sorted([os.path.basename(f_name) for f_name in glob.glob(template)])
return "\n".join(answer) + "\n"
def is_found(self, topic):
- return CheatSheets.is_found(self, topic.rstrip('/'))
+ return CheatSheets.is_found(self, topic.rstrip("/"))
diff --git a/lib/adapter/cmd.py b/lib/adapter/cmd.py
index edcf923a..ee10329e 100644
--- a/lib/adapter/cmd.py
+++ b/lib/adapter/cmd.py
@@ -1,5 +1,4 @@
-"""
-"""
+""" """
# pylint: disable=unused-argument,abstract-method
@@ -19,13 +18,12 @@ def _get_abspath(path):
return path
import __main__
- return os.path.join(
- os.path.dirname(os.path.dirname(__main__.__file__)),
- path)
+
+ return os.path.join(os.path.dirname(os.path.dirname(__main__.__file__)), path)
+
class CommandAdapter(Adapter):
- """
- """
+ """ """
_command = []
@@ -37,14 +35,17 @@ def _get_page(self, topic, request_options=None):
if cmd:
try:
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- answer = proc.communicate()[0].decode('utf-8', 'ignore')
+ answer = proc.communicate()[0].decode("utf-8", "ignore")
except OSError:
- return "ERROR of the \"%s\" adapter: please create an issue" % self._adapter_name
+ return (
+ 'ERROR of the "%s" adapter: please create an issue'
+ % self._adapter_name
+ )
return answer
return ""
-class Fosdem(CommandAdapter):
+class Fosdem(CommandAdapter):
"""
Show the output of the `current-fosdem-slide` command,
which shows the current slide open in some terminal.
@@ -66,22 +67,26 @@ class Fosdem(CommandAdapter):
_pages_list = [":fosdem"]
_command = ["sudo", "/usr/local/bin/current-fosdem-slide"]
+
class Translation(CommandAdapter):
- """
- """
+ """ """
_adapter_name = "translation"
_output_format = "text"
_cache_needed = True
def _get_page(self, topic, request_options=None):
- from_, topic = topic.split('/', 1)
- to_ = request_options.get('lang', 'en')
- if '-' in from_:
- from_, to_ = from_.split('-', 1)
+ from_, topic = topic.split("/", 1)
+ to_ = request_options.get("lang", "en")
+ if "-" in from_:
+ from_, to_ = from_.split("-", 1)
- return ["/home/igor/cheat.sh/bin/get_translation",
- from_, to_, topic.replace('+', ' ')]
+ return [
+ "/home/igor/cheat.sh/bin/get_translation",
+ from_,
+ to_,
+ topic.replace("+", " "),
+ ]
class AdapterRfc(CommandAdapter):
@@ -112,6 +117,7 @@ def _get_list(self, prefix=None):
def is_found(self, topic):
return True
+
class AdapterOeis(CommandAdapter):
"""
Show OEIS by its number.
@@ -145,13 +151,14 @@ def _get_command(self, topic, request_options=None):
suffix = " :list"
topic = topic[:-6]
- topic = re.sub('[^a-zA-Z0-9-:]+', ' ', topic) + suffix
+ topic = re.sub("[^a-zA-Z0-9-:]+", " ", topic) + suffix
return cmd + [topic]
def is_found(self, topic):
return True
+
class AdapterChmod(CommandAdapter):
"""
Show chmod numeric values and strings
@@ -170,8 +177,7 @@ def _get_command(self, topic, request_options=None):
# remove all non (alphanumeric, '-') chars
if topic.startswith("chmod/"):
topic = topic[6:]
- topic = re.sub('[^a-zA-Z0-9-]', '', topic)
-
+ topic = re.sub("[^a-zA-Z0-9-]", "", topic)
return cmd + [topic]
diff --git a/lib/adapter/common.py b/lib/adapter/common.py
index 9efce004..d2904147 100644
--- a/lib/adapter/common.py
+++ b/lib/adapter/common.py
@@ -1,6 +1,6 @@
class Adapter(object):
pass
+
class cheatAdapter(Adapter):
pass
-
diff --git a/lib/adapter/git_adapter.py b/lib/adapter/git_adapter.py
index 8aa1864b..7766fbee 100644
--- a/lib/adapter/git_adapter.py
+++ b/lib/adapter/git_adapter.py
@@ -5,11 +5,13 @@
import glob
import os
-from .adapter import Adapter # pylint: disable=relative-import
+from .adapter import Adapter # pylint: disable=relative-import
+
def _get_filenames(path):
return [os.path.split(topic)[1] for topic in glob.glob(path)]
+
class RepositoryAdapter(Adapter):
"""
Implements methods needed to handle standard
@@ -26,25 +28,26 @@ def _get_list(self, prefix=None):
os.path.join(
self.local_repository_location(),
self._cheatsheet_files_prefix,
- '*'+self._cheatsheet_files_extension))
+ "*" + self._cheatsheet_files_extension,
+ )
+ )
ext = self._cheatsheet_files_extension
if ext:
- answer = [filename[:-len(ext)]
- for filename in answer
- if filename.endswith(ext)]
+ answer = [
+ filename[: -len(ext)] for filename in answer if filename.endswith(ext)
+ ]
return answer
def _get_page(self, topic, request_options=None):
filename = os.path.join(
- self.local_repository_location(),
- self._cheatsheet_files_prefix,
- topic)
+ self.local_repository_location(), self._cheatsheet_files_prefix, topic
+ )
if os.path.exists(filename) and not os.path.isdir(filename):
- answer = self._format_page(open(filename, 'r').read())
+ answer = self._format_page(open(filename, "r").read())
else:
# though it should not happen
answer = "%s:%s not found" % (str(self.__class__), topic)
@@ -52,7 +55,7 @@ def _get_page(self, topic, request_options=None):
return answer
-class GitRepositoryAdapter(RepositoryAdapter): #pylint: disable=abstract-method
+class GitRepositoryAdapter(RepositoryAdapter): # pylint: disable=abstract-method
"""
Implements all methods needed to handle cache handling
for git-repository-based adapters
@@ -69,17 +72,18 @@ def fetch_command(cls):
if not cls._repository_url:
return None
- if not cls._repository_url.startswith('https://github.com/'):
+ if not cls._repository_url.startswith("https://github.com/"):
# in this case `fetch` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
- "Do not known how to handle this repository: %s" % cls._repository_url)
+ "Do not known how to handle this repository: %s" % cls._repository_url
+ )
local_repository_dir = cls.local_repository_location()
if not local_repository_dir:
return None
- return ['git', 'clone', '--depth=1', cls._repository_url, local_repository_dir]
+ return ["git", "clone", "--depth=1", cls._repository_url, local_repository_dir]
@classmethod
def update_command(cls):
@@ -96,13 +100,14 @@ def update_command(cls):
if not local_repository_dir:
return None
- if not cls._repository_url.startswith('https://github.com/'):
+ if not cls._repository_url.startswith("https://github.com/"):
# in this case `update` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
- "Do not known how to handle this repository: %s" % cls._repository_url)
+ "Do not known how to handle this repository: %s" % cls._repository_url
+ )
- return ['git', 'pull']
+ return ["git", "pull"]
@classmethod
def current_state_command(cls):
@@ -118,13 +123,14 @@ def current_state_command(cls):
if not local_repository_dir:
return None
- if not cls._repository_url.startswith('https://github.com/'):
+ if not cls._repository_url.startswith("https://github.com/"):
# in this case `update` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
- "Do not known how to handle this repository: %s" % cls._repository_url)
+ "Do not known how to handle this repository: %s" % cls._repository_url
+ )
- return ['git', 'rev-parse', '--short', 'HEAD', "--"]
+ return ["git", "rev-parse", "--short", "HEAD", "--"]
@classmethod
def save_state(cls, state):
@@ -133,8 +139,8 @@ def save_state(cls, state):
Must be called after the cache clean up.
"""
local_repository_dir = cls.local_repository_location()
- state_filename = os.path.join(local_repository_dir, '.cached_revision')
- open(state_filename, 'wb').write(state)
+ state_filename = os.path.join(local_repository_dir, ".cached_revision")
+ open(state_filename, "wb").write(state)
@classmethod
def get_state(cls):
@@ -144,10 +150,10 @@ def get_state(cls):
"""
local_repository_dir = cls.local_repository_location()
- state_filename = os.path.join(local_repository_dir, '.cached_revision')
+ state_filename = os.path.join(local_repository_dir, ".cached_revision")
state = None
if os.path.exists(state_filename):
- state = open(state_filename, 'r').read()
+ state = open(state_filename, "r").read()
return state
@classmethod
@@ -158,5 +164,5 @@ def get_updates_list_command(cls):
"""
current_state = cls.get_state()
if not current_state:
- return ['git', 'ls-tree', '--full-tree', '-r', '--name-only', 'HEAD', "--"]
- return ['git', 'diff', '--name-only', current_state, 'HEAD', "--"]
+ return ["git", "ls-tree", "--full-tree", "-r", "--name-only", "HEAD", "--"]
+ return ["git", "diff", "--name-only", current_state, "HEAD", "--"]
diff --git a/lib/adapter/internal.py b/lib/adapter/internal.py
index da1e4165..500f3fcb 100644
--- a/lib/adapter/internal.py
+++ b/lib/adapter/internal.py
@@ -11,10 +11,12 @@
try:
from rapidfuzz import process, fuzz
- _USING_FUZZYWUZZY=False
+
+ _USING_FUZZYWUZZY = False
except ImportError:
from fuzzywuzzy import process, fuzz
- _USING_FUZZYWUZZY=True
+
+ _USING_FUZZYWUZZY = True
from config import CONFIG
from .adapter import Adapter
@@ -37,16 +39,17 @@
":styles-demo",
":vim",
":zsh",
- ]
+]
_COLORIZED_INTERNAL_TOPICS = [
- ':intro',
+ ":intro",
]
+
class InternalPages(Adapter):
- _adapter_name = 'internal'
- _output_format = 'ansi'
+ _adapter_name = "internal"
+ _output_format = "ansi"
def __init__(self, get_topic_type=None, get_topics_list=None):
Adapter.__init__(self)
@@ -54,10 +57,9 @@ def __init__(self, get_topic_type=None, get_topics_list=None):
self.get_topics_list = get_topics_list
def _get_stat(self):
- stat = collections.Counter([
- self.get_topic_type(topic)
- for topic in self.get_topics_list()
- ])
+ stat = collections.Counter(
+ [self.get_topic_type(topic) for topic in self.get_topics_list()]
+ )
answer = ""
for key, val in stat.items():
@@ -69,13 +71,15 @@ def get_list(prefix=None):
return _INTERNAL_TOPICS
def _get_list_answer(self, topic, request_options=None):
- if '/' in topic:
- topic_type, topic_name = topic.split('/', 1)
+ if "/" in topic:
+ topic_type, topic_name = topic.split("/", 1)
if topic_name == ":list":
- topic_list = [x[len(topic_type)+1:]
- for x in self.get_topics_list()
- if x.startswith(topic_type + "/")]
- return "\n".join(topic_list)+"\n"
+ topic_list = [
+ x[len(topic_type) + 1 :]
+ for x in self.get_topics_list()
+ if x.startswith(topic_type + "/")
+ ]
+ return "\n".join(topic_list) + "\n"
answer = ""
if topic == ":list":
@@ -84,31 +88,31 @@ def _get_list_answer(self, topic, request_options=None):
return answer
def _get_page(self, topic, request_options=None):
- if topic.endswith('/:list') or topic.lstrip('/') == ':list':
+ if topic.endswith("/:list") or topic.lstrip("/") == ":list":
return self._get_list_answer(topic)
answer = ""
- if topic == ':styles':
+ if topic == ":styles":
answer = "\n".join(CONFIG["frontend.styles"]) + "\n"
elif topic == ":stat":
- answer = self._get_stat()+"\n"
+ answer = self._get_stat() + "\n"
elif topic in _INTERNAL_TOPICS:
- answer = open(os.path.join(CONFIG["path.internal.pages"], topic[1:]+".txt"), "r").read()
+ answer = open(
+ os.path.join(CONFIG["path.internal.pages"], topic[1:] + ".txt"), "r"
+ ).read()
if topic in _COLORIZED_INTERNAL_TOPICS:
answer = colorize_internal(answer)
return answer
def is_found(self, topic):
- return (
- topic in self.get_list()
- or topic.endswith('/:list')
- )
+ return topic in self.get_list() or topic.endswith("/:list")
+
class UnknownPages(InternalPages):
- _adapter_name = 'unknown'
- _output_format = 'text'
+ _adapter_name = "unknown"
+ _output_format = "text"
@staticmethod
def get_list(prefix=None):
@@ -120,27 +124,35 @@ def is_found(topic):
def _get_page(self, topic, request_options=None):
topics_list = self.get_topics_list()
- if topic.startswith(':'):
- topics_list = [x for x in topics_list if x.startswith(':')]
+ if topic.startswith(":"):
+ topics_list = [x for x in topics_list if x.startswith(":")]
else:
- topics_list = [x for x in topics_list if not x.startswith(':')]
+ topics_list = [x for x in topics_list if not x.startswith(":")]
if _USING_FUZZYWUZZY:
possible_topics = process.extract(topic, topics_list, scorer=fuzz.ratio)[:3]
else:
- possible_topics = process.extract(topic, topics_list, limit=3, scorer=fuzz.ratio)
- possible_topics_text = "\n".join([(" * %s %s" % (x[0], int(x[1]))) for x in possible_topics])
- return """
+ possible_topics = process.extract(
+ topic, topics_list, limit=3, scorer=fuzz.ratio
+ )
+ possible_topics_text = "\n".join(
+ [(" * %s %s" % (x[0], int(x[1]))) for x in possible_topics]
+ )
+ return (
+ """
Unknown topic.
Do you mean one of these topics maybe?
%s
- """ % possible_topics_text
+ """
+ % possible_topics_text
+ )
+
class Search(Adapter):
- _adapter_name = 'search'
- _output_format = 'text'
+ _adapter_name = "search"
+ _output_format = "text"
_cache_needed = False
@staticmethod
diff --git a/lib/adapter/latenz.py b/lib/adapter/latenz.py
index 4b73ee3e..a4a467f6 100644
--- a/lib/adapter/latenz.py
+++ b/lib/adapter/latenz.py
@@ -12,8 +12,8 @@
import os
from .git_adapter import GitRepositoryAdapter
-class Latenz(GitRepositoryAdapter):
+class Latenz(GitRepositoryAdapter):
"""
chubin/late.nz Adapter
"""
@@ -23,12 +23,13 @@ class Latenz(GitRepositoryAdapter):
_repository_url = "https://github.com/chubin/late.nz"
def _get_page(self, topic, request_options=None):
- sys.path.append(os.path.join(self.local_repository_location(), 'bin'))
+ sys.path.append(os.path.join(self.local_repository_location(), "bin"))
import latencies
+
return latencies.render()
def _get_list(self, prefix=None):
- return ['latencies']
+ return ["latencies"]
def is_found(self, topic):
- return topic.lower() in ['latencies', 'late.nz', 'latency']
+ return topic.lower() in ["latencies", "late.nz", "latency"]
diff --git a/lib/adapter/learnxiny.py b/lib/adapter/learnxiny.py
index 3b16b727..37213de9 100644
--- a/lib/adapter/learnxiny.py
+++ b/lib/adapter/learnxiny.py
@@ -14,14 +14,14 @@
from config import CONFIG
from .git_adapter import GitRepositoryAdapter
-class LearnXinY(GitRepositoryAdapter):
+class LearnXinY(GitRepositoryAdapter):
"""
Adapter for the LearnXinY project
"""
- _adapter_name = 'learnxiny'
- _output_format = 'code'
+ _adapter_name = "learnxiny"
+ _output_format = "code"
_cache_needed = True
_repository_url = "https://github.com/adambard/learnxinyminutes-docs"
@@ -34,9 +34,9 @@ def _get_page(self, topic, request_options=None):
Return cheat sheet for `topic`
or empty string if nothing found
"""
- lang, topic = topic.split('/', 1)
+ lang, topic = topic.split("/", 1)
if lang not in self.adapters:
- return ''
+ return ""
return self.adapters[lang].get_page(topic)
def _get_list(self, prefix=None):
@@ -53,25 +53,25 @@ def is_found(self, topic):
Return whether `topic` is a valid learnxiny topic
"""
- if '/' not in topic:
+ if "/" not in topic:
return False
- lang, topic = topic.split('/', 1)
+ lang, topic = topic.split("/", 1)
if lang not in self.adapters:
return False
return self.adapters[lang].is_valid(topic)
-class LearnXYAdapter(object):
+class LearnXYAdapter(object):
"""
Parent class of all languages adapters
"""
_learn_xy_path = LearnXinY.local_repository_location()
_replace_with = {}
- _filename = ''
- prefix = ''
+ _filename = ""
+ prefix = ""
_replace_with = {}
_splitted = True
_block_cut_start = 2
@@ -83,19 +83,23 @@ def __init__(self):
self._topics_list = [x for x, _ in self._blocks]
if "Comments" in self._topics_list:
- self._topics_list = [x for x in self._topics_list if x != "Comments"] + ["Comments"]
+ self._topics_list = [x for x in self._topics_list if x != "Comments"] + [
+ "Comments"
+ ]
self._topics_list += [":learn", ":list"]
if self._whole_cheatsheet and CONFIG.get("log.level") >= 5:
print(self.prefix, self._topics_list)
def _is_block_separator(self, before, now, after):
- if (re.match(r'////////*', before)
- and re.match(r'// ', now)
- and re.match(r'////////*', after)):
- block_name = re.sub(r'//\s*', '', now).replace('(', '').replace(')', '')
- block_name = '_'.join(block_name.strip(", ").split())
- for character in '/,':
- block_name = block_name.replace(character, '')
+ if (
+ re.match(r"////////*", before)
+ and re.match(r"// ", now)
+ and re.match(r"////////*", after)
+ ):
+ block_name = re.sub(r"//\s*", "", now).replace("(", "").replace(")", "")
+ block_name = "_".join(block_name.strip(", ").split())
+ for character in "/,":
+ block_name = block_name.replace(character, "")
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
@@ -104,12 +108,12 @@ def _is_block_separator(self, before, now, after):
def _cut_block(self, block, start_block=False):
if not start_block:
- answer = block[self._block_cut_start:-self._block_cut_end]
+ answer = block[self._block_cut_start : -self._block_cut_end]
if answer == []:
return answer
- if answer[0].strip() == '':
+ if answer[0].strip() == "":
answer = answer[1:]
- if answer[-1].strip() == '':
+ if answer[-1].strip() == "":
answer = answer[:1]
return answer
@@ -125,14 +129,14 @@ def _read_cheatsheet(self):
code_mode = False
answer = []
for line in f_cheat_sheet.readlines():
- if line.startswith('```'):
+ if line.startswith("```"):
if not code_mode:
code_mode = True
continue
else:
code_mode = False
if code_mode:
- answer.append(line.rstrip('\n'))
+ answer.append(line.rstrip("\n"))
return answer
def _extract_blocks(self):
@@ -148,7 +152,7 @@ def _extract_blocks(self):
block = []
block_name = "Comments"
- for before, now, after in zip([""]+lines, lines, lines[1:]):
+ for before, now, after in zip([""] + lines, lines, lines[1:]):
new_block_name = self._is_block_separator(before, now, after)
if new_block_name:
if block_name:
@@ -212,42 +216,54 @@ def get_page(self, name, partial=False):
return None
+
#
# Specific programming languages LearnXY cheat sheets configurations
# Contains much code for the moment; should contain data only
# ideally should be replaced with YAML
#
+
class LearnAwkAdapter(LearnXYAdapter):
"Learn AWK in Y Minutes"
+
prefix = "awk"
_filename = "awk.html.markdown"
_splitted = False
+
class LearnBashAdapter(LearnXYAdapter):
"Learn Bash in Y Minutes"
+
prefix = "bash"
_filename = "bash.html.markdown"
_splitted = False
+
class LearnBfAdapter(LearnXYAdapter):
"Learn Brainfuck in Y Minutes"
+
prefix = "bf"
_filename = "bf.html.markdown"
_splitted = False
+
class LearnCAdapter(LearnXYAdapter):
"Learn C in Y Minutes"
+
prefix = "c"
_filename = "c.html.markdown"
_splitted = False
+
class LearnChapelAdapter(LearnXYAdapter):
"Learn Chapel in Y Minutes"
+
prefix = "chapel"
_filename = "chapel.html.markdown"
_splitted = False
+
class LearnClojureAdapter(LearnXYAdapter):
"""
Learn Clojure in Y Minutes
@@ -257,11 +273,15 @@ class LearnClojureAdapter(LearnXYAdapter):
_filename = "clojure.html.markdown"
def _is_block_separator(self, before, now, after):
- if (re.match(r'\s*$', before)
- and re.match(r';\s*', now)
- and re.match(r';;;;;;+', after)):
- block_name = re.sub(r';\s*', '', now)
- block_name = '_'.join([x.strip(",&:") for x in block_name.strip(", ").split()])
+ if (
+ re.match(r"\s*$", before)
+ and re.match(r";\s*", now)
+ and re.match(r";;;;;;+", after)
+ ):
+ block_name = re.sub(r";\s*", "", now)
+ block_name = "_".join(
+ [x.strip(",&:") for x in block_name.strip(", ").split()]
+ )
return block_name
return None
@@ -269,18 +289,21 @@ def _is_block_separator(self, before, now, after):
def _cut_block(block, start_block=False):
if not start_block:
answer = block[2:]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnCoffeeScriptAdapter(LearnXYAdapter):
"Learn coffeescript in Y Minutes"
+
prefix = "coffee"
_filename = "coffeescript.html.markdown"
_splitted = False
+
class LearnCppAdapter(LearnXYAdapter):
"""
Learn C++ in Y Minutes
@@ -289,17 +312,19 @@ class LearnCppAdapter(LearnXYAdapter):
prefix = "cpp"
_filename = "c++.html.markdown"
_replace_with = {
- 'More_about_Objects': 'Prototypes',
+ "More_about_Objects": "Prototypes",
}
def _is_block_separator(self, before, now, after):
- if (re.match(r'////////*', before)
- and re.match(r'// ', now)
- and re.match(r'////////*', after)):
- block_name = re.sub(r'//\s*', '', now).replace('(', '').replace(')', '')
- block_name = '_'.join(block_name.strip(", ").split())
- for character in '/,':
- block_name = block_name.replace(character, '')
+ if (
+ re.match(r"////////*", before)
+ and re.match(r"// ", now)
+ and re.match(r"////////*", after)
+ ):
+ block_name = re.sub(r"//\s*", "", now).replace("(", "").replace(")", "")
+ block_name = "_".join(block_name.strip(", ").split())
+ for character in "/,":
+ block_name = block_name.replace(character, "")
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
@@ -311,54 +336,69 @@ def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer == []:
return answer
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnCsharpAdapter(LearnXYAdapter):
"Learn C# in Y Minutes"
+
prefix = "csharp"
_filename = "csharp.html.markdown"
_splitted = False
+
class LearnDAdapter(LearnXYAdapter):
"Learn D in Y Minutes"
+
prefix = "d"
_filename = "d.html.markdown"
_splitted = False
+
class LearnDartAdapter(LearnXYAdapter):
"Learn Dart in Y Minutes"
+
prefix = "dart"
_filename = "dart.html.markdown"
_splitted = False
+
class LearnFactorAdapter(LearnXYAdapter):
"Learn Factor in Y Minutes"
+
prefix = "factor"
_filename = "factor.html.markdown"
_splitted = False
+
class LearnForthAdapter(LearnXYAdapter):
"Learn Forth in Y Minutes"
+
prefix = "forth"
_filename = "forth.html.markdown"
_splitted = False
+
class LearnFsharpAdapter(LearnXYAdapter):
"Learn F# in Y Minutes"
+
prefix = "fsharp"
_filename = "fsharp.html.markdown"
_splitted = False
+
class LearnElispAdapter(LearnXYAdapter):
"Learn Elisp in Y Minutes"
+
prefix = "elisp"
_filename = "elisp.html.markdown"
_splitted = False
+
class LearnElixirAdapter(LearnXYAdapter):
"""
Learn Elixir in Y Minutes
@@ -367,17 +407,19 @@ class LearnElixirAdapter(LearnXYAdapter):
prefix = "elixir"
_filename = "elixir.html.markdown"
_replace_with = {
- 'More_about_Objects': 'Prototypes',
+ "More_about_Objects": "Prototypes",
}
def _is_block_separator(self, before, now, after):
- if (re.match(r'## ---*', before)
- and re.match(r'## --', now)
- and re.match(r'## ---*', after)):
- block_name = re.sub(r'## --\s*', '', now)
- block_name = '_'.join(block_name.strip(", ").split())
- for character in '/,':
- block_name = block_name.replace(character, '')
+ if (
+ re.match(r"## ---*", before)
+ and re.match(r"## --", now)
+ and re.match(r"## ---*", after)
+ ):
+ block_name = re.sub(r"## --\s*", "", now)
+ block_name = "_".join(block_name.strip(", ").split())
+ for character in "/,":
+ block_name = block_name.replace(character, "")
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
@@ -387,12 +429,13 @@ def _is_block_separator(self, before, now, after):
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnElmAdapter(LearnXYAdapter):
"""
Learn Elm in Y Minutes
@@ -401,18 +444,20 @@ class LearnElmAdapter(LearnXYAdapter):
prefix = "elm"
_filename = "elm.html.markdown"
_replace_with = {
- 'More_about_Objects': 'Prototypes',
+ "More_about_Objects": "Prototypes",
}
def _is_block_separator(self, before, now, after):
- if (re.match(r'\s*', before)
- and re.match(r'\{--.*--\}', now)
- and re.match(r'\s*', after)):
- block_name = re.sub(r'\{--+\s*', '', now)
- block_name = re.sub(r'--\}', '', block_name)
- block_name = '_'.join(block_name.strip(", ").split())
- for character in '/,':
- block_name = block_name.replace(character, '')
+ if (
+ re.match(r"\s*", before)
+ and re.match(r"\{--.*--\}", now)
+ and re.match(r"\s*", after)
+ ):
+ block_name = re.sub(r"\{--+\s*", "", now)
+ block_name = re.sub(r"--\}", "", block_name)
+ block_name = "_".join(block_name.strip(", ").split())
+ for character in "/,":
+ block_name = block_name.replace(character, "")
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
@@ -422,12 +467,13 @@ def _is_block_separator(self, before, now, after):
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnErlangAdapter(LearnXYAdapter):
"""
Learn Erlang in Y Minutes
@@ -437,63 +483,77 @@ class LearnErlangAdapter(LearnXYAdapter):
_filename = "erlang.html.markdown"
def _is_block_separator(self, before, now, after):
- if (re.match('%%%%%%+', before)
- and re.match(r'%%\s+[0-9]+\.', now)
- and re.match('%%%%%%+', after)):
- block_name = re.sub(r'%%+\s+[0-9]+\.\s*', '', now)
- block_name = '_'.join(block_name.strip('.').strip().split())
+ if (
+ re.match("%%%%%%+", before)
+ and re.match(r"%%\s+[0-9]+\.", now)
+ and re.match("%%%%%%+", after)
+ ):
+ block_name = re.sub(r"%%+\s+[0-9]+\.\s*", "", now)
+ block_name = "_".join(block_name.strip(".").strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnFortranAdapter(LearnXYAdapter):
"Learn Fortran in Y Minutes"
+
prefix = "fortran"
_filename = "fortran95.html.markdown"
_splitted = False
+
class LearnGoAdapter(LearnXYAdapter):
"Learn Go in Y Minutes"
+
prefix = "go"
_filename = "go.html.markdown"
_splitted = False
+
class LearnGroovyAdapter(LearnXYAdapter):
"Learn Groovy in Y Minutes"
+
prefix = "groovy"
_filename = "groovy.html.markdown"
_splitted = False
+
class LearnJavaAdapter(LearnXYAdapter):
"Learn Java in Y Minutes"
+
prefix = "java"
_filename = "java.html.markdown"
_splitted = False
+
class LearnJavaScriptAdapter(LearnXYAdapter):
"""
Learn JavaScript in Y Minutes
"""
+
prefix = "js"
_filename = "javascript.html.markdown"
_replace_with = {
- 'More_about_Objects': 'Prototypes',
+ "More_about_Objects": "Prototypes",
}
def _is_block_separator(self, before, now, after):
- if (re.match('//////+', before)
- and re.match(r'//+\s+[0-9]+\.', now)
- and re.match(r'\s*', after)):
- block_name = re.sub(r'//+\s+[0-9]+\.\s*', '', now)
- block_name = '_'.join(block_name.strip(", ").split())
+ if (
+ re.match("//////+", before)
+ and re.match(r"//+\s+[0-9]+\.", now)
+ and re.match(r"\s*", after)
+ ):
+ block_name = re.sub(r"//+\s+[0-9]+\.\s*", "", now)
+ block_name = "_".join(block_name.strip(", ").split())
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
@@ -503,53 +563,61 @@ def _is_block_separator(self, before, now, after):
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnJuliaAdapter(LearnXYAdapter):
"""
Learn Julia in Y Minutes
"""
+
prefix = "julia"
_filename = "julia.html.markdown"
def _is_block_separator(self, before, now, after):
- if (re.match('####+', before)
- and re.match(r'##\s*', now)
- and re.match('####+', after)):
- block_name = re.sub(r'##\s+[0-9]+\.\s*', '', now)
- block_name = '_'.join(block_name.strip(", ").split())
+ if (
+ re.match("####+", before)
+ and re.match(r"##\s*", now)
+ and re.match("####+", after)
+ ):
+ block_name = re.sub(r"##\s+[0-9]+\.\s*", "", now)
+ block_name = "_".join(block_name.strip(", ").split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnHaskellAdapter(LearnXYAdapter):
"""
Learn Haskell in Y Minutes
"""
+
prefix = "haskell"
_filename = "haskell.html.markdown"
_replace_with = {
- 'More_about_Objects': 'Prototypes',
+ "More_about_Objects": "Prototypes",
}
def _is_block_separator(self, before, now, after):
- if (re.match('------+', before)
- and re.match(r'--+\s+[0-9]+\.', now)
- and re.match('------+', after)):
- block_name = re.sub(r'--+\s+[0-9]+\.\s*', '', now)
- block_name = '_'.join(block_name.strip(", ").split())
+ if (
+ re.match("------+", before)
+ and re.match(r"--+\s+[0-9]+\.", now)
+ and re.match("------+", after)
+ ):
+ block_name = re.sub(r"--+\s+[0-9]+\.\s*", "", now)
+ block_name = "_".join(block_name.strip(", ").split())
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
@@ -559,36 +627,42 @@ def _is_block_separator(self, before, now, after):
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnLispAdapter(LearnXYAdapter):
"Learn Lisp in Y Minutes"
+
prefix = "lisp"
_filename = "common-lisp.html.markdown"
_splitted = False
+
class LearnLuaAdapter(LearnXYAdapter):
"""
Learn Lua in Y Minutes
"""
+
prefix = "lua"
_filename = "lua.html.markdown"
_replace_with = {
- '1_Metatables_and_metamethods': 'Metatables',
- '2_Class-like_tables_and_inheritance': 'Class-like_tables',
- 'Variables_and_flow_control': 'Flow_control',
+ "1_Metatables_and_metamethods": "Metatables",
+ "2_Class-like_tables_and_inheritance": "Class-like_tables",
+ "Variables_and_flow_control": "Flow_control",
}
def _is_block_separator(self, before, now, after):
- if (re.match('-----+', before)
- and re.match('-------+', after)
- and re.match(r'--\s+[0-9]+\.', now)):
- block_name = re.sub(r'--+\s+[0-9]+\.\s*', '', now)
- block_name = '_'.join(block_name.strip('.').strip().split())
+ if (
+ re.match("-----+", before)
+ and re.match("-------+", after)
+ and re.match(r"--\s+[0-9]+\.", now)
+ ):
+ block_name = re.sub(r"--+\s+[0-9]+\.\s*", "", now)
+ block_name = "_".join(block_name.strip(".").strip().split())
if block_name in self._replace_with:
block_name = self._replace_with[block_name]
return block_name
@@ -597,78 +671,94 @@ def _is_block_separator(self, before, now, after):
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnMathematicaAdapter(LearnXYAdapter):
"Learn Mathematica in Y Minutes"
+
prefix = "mathematica"
_filename = "wolfram.html.markdown"
_splitted = False
+
class LearnMatlabAdapter(LearnXYAdapter):
"Learn Matlab in Y Minutes"
+
prefix = "matlab"
_filename = "matlab.html.markdown"
_splitted = False
+
class LearnOctaveAdapter(LearnXYAdapter):
"Learn Octave in Y Minutes"
+
prefix = "octave"
_filename = "matlab.html.markdown"
_splitted = False
+
class LearnKotlinAdapter(LearnXYAdapter):
"""
Learn Kotlin in Y Minutes
"""
+
prefix = "kotlin"
_filename = "kotlin.html.markdown"
def _is_block_separator(self, before, now, after):
- if (re.match('#######+', before)
- and re.match('#######+', after)
- and re.match(r'#+\s+[0-9]+\.', now)):
- block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now)
- block_name = '_'.join(block_name.strip().split())
+ if (
+ re.match("#######+", before)
+ and re.match("#######+", after)
+ and re.match(r"#+\s+[0-9]+\.", now)
+ ):
+ block_name = re.sub(r"#+\s+[0-9]+\.\s*", "", now)
+ block_name = "_".join(block_name.strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnObjectiveCAdapter(LearnXYAdapter):
"Learn Objective C in Y Minutes"
+
prefix = "objective-c"
_filename = "objective-c.html.markdown"
_splitted = False
+
class LearnOCamlAdapter(LearnXYAdapter):
"""
Learn OCaml in Y Minutes
"""
+
prefix = "ocaml"
_filename = "ocaml.html.markdown"
_replace_with = {
- 'More_about_Objects': 'Prototypes',
+ "More_about_Objects": "Prototypes",
}
def _is_block_separator(self, before, now, after):
- if (re.match(r'\s*', before)
- and re.match(r'\(\*\*\*+', now)
- and re.match(r'\s*', after)):
- block_name = re.sub(r'\(\*\*\*+\s*', '', now)
- block_name = re.sub(r'\s*\*\*\*\)', '', block_name)
- block_name = '_'.join(block_name.strip(", ").split())
+ if (
+ re.match(r"\s*", before)
+ and re.match(r"\(\*\*\*+", now)
+ and re.match(r"\s*", after)
+ ):
+ block_name = re.sub(r"\(\*\*\*+\s*", "", now)
+ block_name = re.sub(r"\s*\*\*\*\)", "", block_name)
+ block_name = "_".join(block_name.strip(", ").split())
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
@@ -678,29 +768,31 @@ def _is_block_separator(self, before, now, after):
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnPerlAdapter(LearnXYAdapter):
"""
Learn Perl in Y Minutes
"""
+
prefix = "perl"
_filename = "perl.html.markdown"
_replace_with = {
- 'Conditional_and_looping_constructs': 'Control_Flow',
- 'Perl_variable_types': 'Types',
- 'Files_and_I/O': 'Files',
- 'Writing_subroutines': 'Subroutines',
+ "Conditional_and_looping_constructs": "Control_Flow",
+ "Perl_variable_types": "Types",
+ "Files_and_I/O": "Files",
+ "Writing_subroutines": "Subroutines",
}
def _is_block_separator(self, before, now, after):
- if re.match(r'####+\s+', now):
- block_name = re.sub(r'#+\s', '', now)
- block_name = '_'.join(block_name.strip().split())
+ if re.match(r"####+\s+", now):
+ block_name = re.sub(r"#+\s", "", now)
+ block_name = "_".join(block_name.strip().split())
if block_name in self._replace_with:
block_name = self._replace_with[block_name]
return block_name
@@ -713,32 +805,38 @@ def _cut_block(block, start_block=False):
answer = block[2:]
if answer == []:
return answer
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnPerl6Adapter(LearnXYAdapter):
"Learn Perl 6 in Y Minutes"
+
prefix = "perl6"
_filename = "perl6.html.markdown"
_splitted = False
+
class LearnPHPAdapter(LearnXYAdapter):
"""
Learn PHP in Y Minutes
"""
+
prefix = "php"
_filename = "php.html.markdown"
def _is_block_separator(self, before, now, after):
- if (re.match(r'/\*\*\*\*\*+', before)
- and re.match(r'\s*\*/', after)
- and re.match(r'\s*\*\s*', now)):
- block_name = re.sub(r'\s*\*\s*', '', now)
- block_name = re.sub(r'&', '', block_name)
- block_name = '_'.join(block_name.strip().split())
+ if (
+ re.match(r"/\*\*\*\*\*+", before)
+ and re.match(r"\s*\*/", after)
+ and re.match(r"\s*\*\s*", now)
+ ):
+ block_name = re.sub(r"\s*\*\s*", "", now)
+ block_name = re.sub(r"&", "", block_name)
+ block_name = "_".join(block_name.strip().split())
return block_name
return None
@@ -746,49 +844,60 @@ def _is_block_separator(self, before, now, after):
def _cut_block(block, start_block=False):
return block[2:]
+
class LearnPythonAdapter(LearnXYAdapter):
"""
Learn Python in Y Minutes
"""
+
prefix = "python"
_filename = "python.html.markdown"
def _is_block_separator(self, before, now, after):
- if (re.match('#######+', before)
- and re.match('#######+', after)
- and re.match(r'#+\s+[0-9]+\.', now)):
- block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now)
- block_name = '_'.join(block_name.strip().split())
+ if (
+ re.match("#######+", before)
+ and re.match("#######+", after)
+ and re.match(r"#+\s+[0-9]+\.", now)
+ ):
+ block_name = re.sub(r"#+\s+[0-9]+\.\s*", "", now)
+ block_name = "_".join(block_name.strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnPython3Adapter(LearnXYAdapter):
"Learn Python 3 in Y Minutes"
+
prefix = "python3"
_filename = "python3.html.markdown"
_splitted = False
+
class LearnRAdapter(LearnXYAdapter):
"Learn R in Y Minutes"
+
prefix = "r"
_filename = "r.html.markdown"
_splitted = False
+
class LearnRacketAdapter(LearnXYAdapter):
"Learn Racket in Y Minutes"
+
prefix = "racket"
_filename = "racket.html.markdown"
_splitted = False
+
class LearnRubyAdapter(LearnXYAdapter):
"""
Learn Ruby in Y Minutes
@@ -798,85 +907,109 @@ class LearnRubyAdapter(LearnXYAdapter):
if number of extracted cheat sheets is suddenly became 1,
one should check the markup
"""
+
prefix = "ruby"
_filename = "ruby.html.markdown"
def _is_block_separator(self, before, now, after):
- if (re.match('#######+', before)
- and re.match('#######+', after)
- and re.match(r'#+\s+[0-9]+\.', now)):
- block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now)
- block_name = '_'.join(block_name.strip().split())
+ if (
+ re.match("#######+", before)
+ and re.match("#######+", after)
+ and re.match(r"#+\s+[0-9]+\.", now)
+ ):
+ block_name = re.sub(r"#+\s+[0-9]+\.\s*", "", now)
+ block_name = "_".join(block_name.strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
- if answer[0].split() == '':
+ if answer[0].split() == "":
answer = answer[1:]
- if answer[-1].split() == '':
+ if answer[-1].split() == "":
answer = answer[:1]
return answer
+
class LearnRustAdapter(LearnXYAdapter):
"Learn Rust in Y Minutes"
+
prefix = "rust"
_filename = "rust.html.markdown"
_splitted = False
+
class LearnSolidityAdapter(LearnXYAdapter):
"Learn Solidity in Y Minutes"
+
prefix = "solidity"
_filename = "solidity.html.markdown"
_splitted = False
+
class LearnSwiftAdapter(LearnXYAdapter):
"Learn Swift in Y Minutes"
+
prefix = "swift"
_filename = "swift.html.markdown"
_splitted = False
+
class LearnTclAdapter(LearnXYAdapter):
"Learn Tcl in Y Minutes"
+
prefix = "tcl"
_filename = "tcl.html.markdown"
_splitted = False
+
class LearnTcshAdapter(LearnXYAdapter):
"Learn Tcsh in Y Minutes"
+
prefix = "tcsh"
_filename = "tcsh.html.markdown"
_splitted = False
+
class LearnVisualBasicAdapter(LearnXYAdapter):
"Learn Visual Basic in Y Minutes"
+
prefix = "vb"
_filename = "visualbasic.html.markdown"
_splitted = False
+
class LearnCMakeAdapter(LearnXYAdapter):
"Learn CMake in Y Minutes"
+
prefix = "cmake"
_filename = "cmake.html.markdown"
_splitted = False
+
class LearnNimAdapter(LearnXYAdapter):
"Learn Nim in Y Minutes"
+
prefix = "nim"
_filename = "nim.html.markdown"
_splitted = False
+
class LearnGitAdapter(LearnXYAdapter):
"Learn Git in Y Minutes"
+
prefix = "git"
_filename = "git.html.markdown"
_splitted = False
+
class LearnLatexAdapter(LearnXYAdapter):
"Learn Nim in Y Minutes"
+
prefix = "latex"
_filename = "latex.html.markdown"
_splitted = False
-_ADAPTERS = {cls.prefix: cls() for cls in vars()['LearnXYAdapter'].__subclasses__()}
+
+_ADAPTERS = {cls.prefix: cls() for cls in vars()["LearnXYAdapter"].__subclasses__()}
diff --git a/lib/adapter/question.py b/lib/adapter/question.py
index 994156d4..8153a4c9 100644
--- a/lib/adapter/question.py
+++ b/lib/adapter/question.py
@@ -38,8 +38,8 @@
github.com/chubin/cheat.sh or ping @igor_chubin
"""
-class Question(UpstreamAdapter):
+class Question(UpstreamAdapter):
"""
Answer to a programming language question, using Stackoverflow
as the main data source. Heavy lifting is done by an external
@@ -62,55 +62,65 @@ def _get_page(self, topic, request_options=None):
if not os.path.exists(CONFIG["path.internal.bin.upstream"]):
# if the upstream program is not found, use normal upstream adapter
self._output_format = "ansi"
- return UpstreamAdapter._get_page(self, topic, request_options=request_options)
+ return UpstreamAdapter._get_page(
+ self, topic, request_options=request_options
+ )
- topic = topic.replace('+', ' ')
+ topic = topic.replace("+", " ")
# if there is a language name in the section name,
# cut it off (de:python => python)
- if '/' in topic:
- section_name, topic = topic.split('/', 1)
- if ':' in section_name:
- _, section_name = section_name.split(':', 1)
+ if "/" in topic:
+ section_name, topic = topic.split("/", 1)
+ if ":" in section_name:
+ _, section_name = section_name.split(":", 1)
section_name = SO_NAME.get(section_name, section_name)
topic = "%s/%s" % (section_name, topic)
# some clients send queries with - instead of + so we have to rewrite them to
- topic = re.sub(r"(? 2 \
- or supposed_lang in ['az', 'ru', 'uk', 'de', 'fr', 'es', 'it', 'nl']:
+ if len(topic_words) > 2 or supposed_lang in [
+ "az",
+ "ru",
+ "uk",
+ "de",
+ "fr",
+ "es",
+ "it",
+ "nl",
+ ]:
lang = supposed_lang
- if supposed_lang.startswith('zh_') or supposed_lang == 'zh':
- lang = 'zh'
- elif supposed_lang.startswith('pt_'):
- lang = 'pt'
- if supposed_lang in ['ja', 'ko']:
+ if supposed_lang.startswith("zh_") or supposed_lang == "zh":
+ lang = "zh"
+ elif supposed_lang.startswith("pt_"):
+ lang = "pt"
+ if supposed_lang in ["ja", "ko"]:
lang = supposed_lang
except UnknownLanguage:
print("Unknown language (%s)" % query_text)
- if lang != 'en':
- topic = ['--human-language', lang, topic]
+ if lang != "en":
+ topic = ["--human-language", lang, topic]
else:
topic = [topic]
cmd = [CONFIG["path.internal.bin.upstream"]] + topic
proc = Popen(cmd, stdin=open(os.devnull, "r"), stdout=PIPE, stderr=PIPE)
- answer = proc.communicate()[0].decode('utf-8')
+ answer = proc.communicate()[0].decode("utf-8")
if not answer:
return NOT_FOUND_MESSAGE
diff --git a/lib/adapter/rosetta.py b/lib/adapter/rosetta.py
index 2ed23b49..f90d2269 100644
--- a/lib/adapter/rosetta.py
+++ b/lib/adapter/rosetta.py
@@ -15,8 +15,8 @@
from .git_adapter import GitRepositoryAdapter
from .cheat_sheets import CheatSheets
-class Rosetta(GitRepositoryAdapter):
+class Rosetta(GitRepositoryAdapter):
"""
Adapter for RosettaCode
"""
@@ -36,17 +36,19 @@ def __init__(self):
def _load_rosetta_code_names():
answer = {}
- lang_files_location = CheatSheets.local_repository_location(cheat_sheets_location=True)
- for filename in glob.glob(os.path.join(lang_files_location, '*/_info.yaml')):
- text = open(filename, 'r').read()
+ lang_files_location = CheatSheets.local_repository_location(
+ cheat_sheets_location=True
+ )
+ for filename in glob.glob(os.path.join(lang_files_location, "*/_info.yaml")):
+ text = open(filename, "r").read()
data = yaml.load(text, Loader=yaml.SafeLoader)
if data is None:
continue
lang = os.path.basename(os.path.dirname(filename))
- if lang.startswith('_'):
+ if lang.startswith("_"):
lang = lang[1:]
- if 'rosetta' in data:
- answer[lang] = data['rosetta']
+ if "rosetta" in data:
+ answer[lang] = data["rosetta"]
return answer
def _rosetta_get_list(self, query, task=None):
@@ -56,9 +58,13 @@ def _rosetta_get_list(self, query, task=None):
lang = self._rosetta_code_name[query]
answer = []
if task:
- glob_path = os.path.join(self.local_repository_location(), 'Lang', lang, task, '*')
+ glob_path = os.path.join(
+ self.local_repository_location(), "Lang", lang, task, "*"
+ )
else:
- glob_path = os.path.join(self.local_repository_location(), 'Lang', lang, '*')
+ glob_path = os.path.join(
+ self.local_repository_location(), "Lang", lang, "*"
+ )
for filename in glob.glob(glob_path):
taskname = os.path.basename(filename)
answer.append(taskname)
@@ -68,8 +74,8 @@ def _rosetta_get_list(self, query, task=None):
@staticmethod
def _parse_query(query):
- if '/' in query:
- task, subquery = query.split('/', 1)
+ if "/" in query:
+ task, subquery = query.split("/", 1)
else:
task, subquery = query, None
return task, subquery
@@ -80,9 +86,9 @@ def _get_task(self, lang, query):
task, subquery = self._parse_query(query)
- if task == ':list':
+ if task == ":list":
return self._rosetta_get_list(lang)
- if subquery == ':list':
+ if subquery == ":list":
return self._rosetta_get_list(lang, task=task)
# if it is not a number or the number is too big, just ignore it
@@ -95,41 +101,43 @@ def _get_task(self, lang, query):
lang_name = self._rosetta_code_name[lang]
- tasks = sorted(glob.glob(
- os.path.join(self.local_repository_location(), 'Lang', lang_name, task, '*')))
+ tasks = sorted(
+ glob.glob(
+ os.path.join(
+ self.local_repository_location(), "Lang", lang_name, task, "*"
+ )
+ )
+ )
if not tasks:
return ""
if len(tasks) < index or index < 1:
index = 1
- answer_filename = tasks[index-1]
- answer = open(answer_filename, 'r').read()
+ answer_filename = tasks[index - 1]
+ answer = open(answer_filename, "r").read()
return answer
def _starting_page(self, query):
number_of_pages = self._rosetta_get_list(query)
- answer = (
- "# %s pages available\n"
- "# use /:list to list"
- ) % number_of_pages
+ answer = ("# %s pages available\n" "# use /:list to list") % number_of_pages
return answer
def _get_page(self, topic, request_options=None):
- if '/' not in topic:
+ if "/" not in topic:
return self._rosetta_get_list(topic)
- lang, topic = topic.split('/', 1)
+ lang, topic = topic.split("/", 1)
# this part should be generalized
# currently we just remove the name of the adapter from the path
if topic == self.__section_name:
return self._starting_page(topic)
- if topic.startswith(self.__section_name + '/'):
- topic = topic[len(self.__section_name + '/'):]
+ if topic.startswith(self.__section_name + "/"):
+ topic = topic[len(self.__section_name + "/") :]
return self._get_task(lang, topic)
@@ -139,7 +147,7 @@ def _get_list(self, prefix=None):
def get_list(self, prefix=None):
answer = [self.__section_name]
for i in self._rosetta_code_name:
- answer.append('%s/%s/' % (i, self.__section_name))
+ answer.append("%s/%s/" % (i, self.__section_name))
return answer
def is_found(self, _):
diff --git a/lib/adapter/tldr.py b/lib/adapter/tldr.py
index 9b32434e..849ab51a 100644
--- a/lib/adapter/tldr.py
+++ b/lib/adapter/tldr.py
@@ -14,8 +14,8 @@
from .git_adapter import GitRepositoryAdapter
-class Tldr(GitRepositoryAdapter):
+class Tldr(GitRepositoryAdapter):
"""
tldr-pages/tldr adapter
"""
@@ -41,7 +41,7 @@ def _format_page(text):
skip_empty = False
header = 2
for line in text.splitlines():
- if line.strip() == '':
+ if line.strip() == "":
if skip_empty and not header:
continue
if header == 1:
@@ -51,17 +51,17 @@ def _format_page(text):
else:
skip_empty = False
- if line.startswith('-'):
- line = '# '+line[2:]
+ if line.startswith("-"):
+ line = "# " + line[2:]
skip_empty = True
- elif line.startswith('> '):
+ elif line.startswith("> "):
if header == 2:
header = 1
- line = '# '+line[2:]
+ line = "# " + line[2:]
skip_empty = True
- elif line.startswith('`') and line.endswith('`'):
+ elif line.startswith("`") and line.endswith("`"):
line = line[1:-1]
- line = re.sub(r'{{(.*?)}}', r'\1', line)
+ line = re.sub(r"{{(.*?)}}", r"\1", line)
answer.append(line)
@@ -73,23 +73,22 @@ def _get_page(self, topic, request_options=None):
and as soon as anything is found, format and return it.
"""
- search_order = ['common', 'linux', 'osx', 'sunos', 'windows', "android"]
+ search_order = ["common", "linux", "osx", "sunos", "windows", "android"]
local_rep = self.local_repository_location()
ext = self._cheatsheet_files_extension
filename = None
for subdir in search_order:
- _filename = os.path.join(
- local_rep, 'pages', subdir, "%s%s" % (topic, ext))
+ _filename = os.path.join(local_rep, "pages", subdir, "%s%s" % (topic, ext))
if os.path.exists(_filename):
filename = _filename
break
if filename:
- answer = self._format_page(open(filename, 'r').read())
+ answer = self._format_page(open(filename, "r").read())
else:
# though it should not happen
- answer = ''
+ answer = ""
return answer
@@ -104,5 +103,5 @@ def get_updates_list(cls, updated_files_list):
for entry in updated_files_list:
if entry.endswith(ext):
- answer.append(entry.split('/')[-1][:-len(ext)])
+ answer.append(entry.split("/")[-1][: -len(ext)])
return answer
diff --git a/lib/adapter/upstream.py b/lib/adapter/upstream.py
index 786d9b22..f24eb8c4 100644
--- a/lib/adapter/upstream.py
+++ b/lib/adapter/upstream.py
@@ -15,6 +15,7 @@
from config import CONFIG
from .adapter import Adapter
+
def _are_you_offline():
return textwrap.dedent(
"""
@@ -32,10 +33,11 @@ def _are_you_offline():
|____|_______|____| the authors to develop it as soon as possible
.
- """)
+ """
+ )
-class UpstreamAdapter(Adapter):
+class UpstreamAdapter(Adapter):
"""
Connect to the upstream server `CONFIG["upstream.url"]` and fetch
response from it. The response is supposed to have the "ansi" format.
@@ -52,15 +54,21 @@ class UpstreamAdapter(Adapter):
def _get_page(self, topic, request_options=None):
- options_string = "&".join(["%s=%s" % (x, y) for (x, y) in request_options.items()])
- url = CONFIG["upstream.url"].rstrip('/') \
- + '/' + topic.lstrip('/') \
- + "?" + options_string
+ options_string = "&".join(
+ ["%s=%s" % (x, y) for (x, y) in request_options.items()]
+ )
+ url = (
+ CONFIG["upstream.url"].rstrip("/")
+ + "/"
+ + topic.lstrip("/")
+ + "?"
+ + options_string
+ )
try:
response = requests.get(url, timeout=CONFIG["upstream.timeout"])
answer = {"cache": False, "answer": response.text}
except requests.exceptions.ConnectionError:
- answer = {"cache": False, "answer":_are_you_offline()}
+ answer = {"cache": False, "answer": _are_you_offline()}
return answer
def _get_list(self, prefix=None):
diff --git a/lib/buttons.py b/lib/buttons.py
index 8c577361..17ed7f0f 100644
--- a/lib/buttons.py
+++ b/lib/buttons.py
@@ -17,4 +17,3 @@
"""
-
diff --git a/lib/cache.py b/lib/cache.py
index 8e2b1263..16608e51 100644
--- a/lib/cache.py
+++ b/lib/cache.py
@@ -17,17 +17,20 @@
from config import CONFIG
_REDIS = None
-if CONFIG['cache.type'] == 'redis':
+if CONFIG["cache.type"] == "redis":
import redis
+
_REDIS = redis.Redis(
- host=CONFIG['cache.redis.host'],
- port=CONFIG['cache.redis.port'],
- db=CONFIG['cache.redis.db'])
+ host=CONFIG["cache.redis.host"],
+ port=CONFIG["cache.redis.port"],
+ db=CONFIG["cache.redis.db"],
+ )
-_REDIS_PREFIX = ''
+_REDIS_PREFIX = ""
if CONFIG.get("cache.redis.prefix", ""):
_REDIS_PREFIX = CONFIG["cache.redis.prefix"] + ":"
+
def put(key, value):
"""
Save `value` with `key`, and serialize it if needed
@@ -42,6 +45,7 @@ def put(key, value):
_REDIS.set(key, value)
+
def get(key):
"""
Read `value` by `key`, and deserialize it if needed
@@ -59,6 +63,7 @@ def get(key):
return value
return None
+
def delete(key):
"""
Remove `key` from the database
diff --git a/lib/cheat_wrapper.py b/lib/cheat_wrapper.py
index fdcaff67..0e47b2ac 100644
--- a/lib/cheat_wrapper.py
+++ b/lib/cheat_wrapper.py
@@ -19,19 +19,21 @@
import frontend.html
import frontend.ansi
+
def _add_section_name(query):
# temporary solution before we don't find a fixed one
- if ' ' not in query and '+' not in query:
+ if " " not in query and "+" not in query:
return query
- if '/' in query:
+ if "/" in query:
return query
- if ' ' in query:
- return re.sub(r' +', '/', query, count=1)
- if '+' in query:
+ if " " in query:
+ return re.sub(r" +", "/", query, count=1)
+ if "+" in query:
# replace only single + to avoid catching g++ and friends
- return re.sub(r'([^\+])\+([^\+])', r'\1/\2', query, count=1)
+ return re.sub(r"([^\+])\+([^\+])", r"\1/\2", query, count=1)
+
-def cheat_wrapper(query, request_options=None, output_format='ansi'):
+def cheat_wrapper(query, request_options=None, output_format="ansi"):
"""
Function that delivers cheat sheet for `query`.
If `html` is True, the answer is formatted as HTML.
@@ -39,8 +41,8 @@ def cheat_wrapper(query, request_options=None, output_format='ansi'):
"""
def _rewrite_aliases(word):
- if word == ':bash.completion':
- return ':bash_completion'
+ if word == ":bash.completion":
+ return ":bash_completion"
return word
def _rewrite_section_name(query):
@@ -49,22 +51,22 @@ def _rewrite_section_name(query):
* EDITOR:NAME => emacs:go-mode
"""
- if '/' not in query:
+ if "/" not in query:
return query
- section_name, rest = query.split('/', 1)
+ section_name, rest = query.split("/", 1)
- if ':' in section_name:
+ if ":" in section_name:
section_name = rewrite_editor_section_name(section_name)
section_name = LANGUAGE_ALIAS.get(section_name, section_name)
return "%s/%s" % (section_name, rest)
def _sanitize_query(query):
- return re.sub('[<>"]', '', query)
+ return re.sub('[<>"]', "", query)
def _strip_hyperlink(query):
- return re.sub('(,[0-9]+)+$', '', query)
+ return re.sub("(,[0-9]+)+$", "", query)
def _parse_query(query):
topic = query
@@ -72,16 +74,16 @@ def _parse_query(query):
search_options = ""
keyword = None
- if '~' in query:
+ if "~" in query:
topic = query
- pos = topic.index('~')
- keyword = topic[pos+1:]
+ pos = topic.index("~")
+ keyword = topic[pos + 1 :]
topic = topic[:pos]
- if '/' in keyword:
+ if "/" in keyword:
search_options = keyword[::-1]
- search_options = search_options[:search_options.index('/')]
- keyword = keyword[:-len(search_options)-1]
+ search_options = search_options[: search_options.index("/")]
+ keyword = keyword[: -len(search_options) - 1]
return topic, keyword, search_options
@@ -97,25 +99,27 @@ def _parse_query(query):
if keyword:
answers = find_answers_by_keyword(
- topic, keyword, options=search_options, request_options=request_options)
+ topic, keyword, options=search_options, request_options=request_options
+ )
else:
answers = get_answers(topic, request_options=request_options)
answers = [
postprocessing.postprocess(
- answer, keyword, search_options, request_options=request_options)
+ answer, keyword, search_options, request_options=request_options
+ )
for answer in answers
]
answer_data = {
- 'query': query,
- 'keyword': keyword,
- 'answers': answers,
- }
+ "query": query,
+ "keyword": keyword,
+ "answers": answers,
+ }
- if output_format == 'html':
- answer_data['topics_list'] = get_topics_list()
+ if output_format == "html":
+ answer_data["topics_list"] = get_topics_list()
return frontend.html.visualize(answer_data, request_options)
- elif output_format == 'json':
+ elif output_format == "json":
return json.dumps(answer_data, indent=4)
return frontend.ansi.visualize(answer_data, request_options)
diff --git a/lib/cheat_wrapper_test.py b/lib/cheat_wrapper_test.py
index 72449aba..fd6b6043 100644
--- a/lib/cheat_wrapper_test.py
+++ b/lib/cheat_wrapper_test.py
@@ -30,10 +30,11 @@
g++/-O1
"""
+
def test_header_split():
for inp in unchanged.strip().splitlines():
assert inp == _add_section_name(inp)
- for test in split.strip().split('\n\n'):
- inp, outp = test.split('\n')
+ for test in split.strip().split("\n\n"):
+ inp, outp = test.split("\n")
assert outp == _add_section_name(inp)
diff --git a/lib/config.py b/lib/config.py
index 25c9785b..fbb11824 100644
--- a/lib/config.py
+++ b/lib/config.py
@@ -45,12 +45,14 @@
import os
from pygments.styles import get_all_styles
-#def get_all_styles():
+
+# def get_all_styles():
# return []
_ENV_VAR_PREFIX = "CHEATSH"
-_MYDIR = os.path.abspath(os.path.join(__file__, '..', '..'))
+_MYDIR = os.path.abspath(os.path.join(__file__, "..", ".."))
+
def _config_locations():
"""
@@ -59,17 +61,24 @@ def _config_locations():
* `_WORKDIR`, `_CONF_FILE_WORKDIR`, `_CONF_FILE_MYDIR`
"""
- var = _ENV_VAR_PREFIX + '_PATH_WORKDIR'
- workdir = os.environ[var] if var in os.environ \
- else os.path.join(os.environ['HOME'], '.cheat.sh')
-
- var = _ENV_VAR_PREFIX + '_CONFIG'
- conf_file_workdir = os.environ[var] if var in os.environ \
- else os.path.join(workdir, 'etc/config.yaml')
-
- conf_file_mydir = os.path.join(_MYDIR, 'etc/config.yaml')
+ var = _ENV_VAR_PREFIX + "_PATH_WORKDIR"
+ workdir = (
+ os.environ[var]
+ if var in os.environ
+ else os.path.join(os.environ["HOME"], ".cheat.sh")
+ )
+
+ var = _ENV_VAR_PREFIX + "_CONFIG"
+ conf_file_workdir = (
+ os.environ[var]
+ if var in os.environ
+ else os.path.join(workdir, "etc/config.yaml")
+ )
+
+ conf_file_mydir = os.path.join(_MYDIR, "etc/config.yaml")
return workdir, conf_file_workdir, conf_file_mydir
+
_WORKDIR, _CONF_FILE_WORKDIR, _CONF_FILE_MYDIR = _config_locations()
_CONFIG = {
@@ -87,10 +96,10 @@ def _config_locations():
"rfc",
"oeis",
"chmod",
- ],
+ ],
"adapters.mandatory": [
"search",
- ],
+ ],
"cache.redis.db": 0,
"cache.redis.host": "localhost",
"cache.redis.port": 6379,
@@ -101,7 +110,9 @@ def _config_locations():
"path.internal.ansi2html": os.path.join(_MYDIR, "share/ansi2html.sh"),
"path.internal.bin": os.path.join(_MYDIR, "bin"),
"path.internal.bin.upstream": os.path.join(_MYDIR, "bin", "upstream"),
- "path.internal.malformed": os.path.join(_MYDIR, "share/static/malformed-response.html"),
+ "path.internal.malformed": os.path.join(
+ _MYDIR, "share/static/malformed-response.html"
+ ),
"path.internal.pages": os.path.join(_MYDIR, "share"),
"path.internal.static": os.path.join(_MYDIR, "share/static"),
"path.internal.templates": os.path.join(_MYDIR, "share/templates"),
@@ -121,7 +132,7 @@ def _config_locations():
("^:", "internal"),
("/:list$", "internal"),
("/$", "cheat.sheets dir"),
- ],
+ ],
"routing.main": [
("", "cheat.sheets"),
("", "cheat"),
@@ -133,14 +144,15 @@ def _config_locations():
"routing.post": [
("^[^/ +]*$", "unknown"),
("^[a-z][a-z]-[a-z][a-z]$", "translation"),
- ],
+ ],
"routing.default": "question",
"upstream.url": "https://cheat.sh",
"upstream.timeout": 5,
"search.limit": 20,
"server.bind": "0.0.0.0",
"server.port": 8002,
- }
+}
+
class Config(dict):
"""
@@ -149,16 +161,16 @@ class Config(dict):
"""
def _absolute_path(self, val):
- if val.startswith('/'):
+ if val.startswith("/"):
return val
- return os.path.join(self['path.workdir'], val)
+ return os.path.join(self["path.workdir"], val)
def __init__(self, *args, **kwargs):
dict.__init__(self)
self.update(*args, **kwargs)
def __setitem__(self, key, val):
- if key.startswith('path.') and not val.startswith('/'):
+ if key.startswith("path.") and not val.startswith("/"):
val = self._absolute_path(val)
dict.__setitem__(self, key, val)
@@ -170,12 +182,13 @@ def update(self, *args, **kwargs):
"""
newdict = dict(*args, **kwargs)
- if 'path.workdir' in newdict:
- self['path.workdir'] = newdict['path.workdir']
+ if "path.workdir" in newdict:
+ self["path.workdir"] = newdict["path.workdir"]
for key, val in newdict.items():
self[key] = val
+
def _load_config_from_environ(config):
update = {}
@@ -183,7 +196,7 @@ def _load_config_from_environ(config):
if not isinstance(val, str) or isinstance(val, int):
continue
- env_var = _ENV_VAR_PREFIX + '_' + key.replace('.', '_').upper()
+ env_var = _ENV_VAR_PREFIX + "_" + key.replace(".", "_").upper()
if not env_var in os.environ:
continue
@@ -198,6 +211,7 @@ def _load_config_from_environ(config):
return update
+
def _get_nested(data, key):
"""
Return value for a hierrachical key (like a.b.c).
@@ -215,12 +229,12 @@ def _get_nested(data, key):
if not data or not isinstance(data, dict):
return None
- if '.' not in key:
+ if "." not in key:
return data.get(key)
if key in data:
return data[key]
- parts = key.split('.')
+ parts = key.split(".")
for i in range(len(parts))[::-1]:
prefix = ".".join(parts[:i])
if prefix in data:
@@ -228,6 +242,7 @@ def _get_nested(data, key):
return None
+
def _load_config_from_file(default_config, filename):
import yaml
@@ -252,6 +267,7 @@ def _load_config_from_file(default_config, filename):
return update
+
CONFIG = Config()
CONFIG.update(_CONFIG)
CONFIG.update(_load_config_from_file(_CONFIG, _CONF_FILE_MYDIR))
@@ -261,4 +277,5 @@ def _load_config_from_file(default_config, filename):
if __name__ == "__main__":
import doctest
+
doctest.testmod()
diff --git a/lib/fetch.py b/lib/fetch.py
index bc210473..ee1862b8 100644
--- a/lib/fetch.py
+++ b/lib/fetch.py
@@ -24,6 +24,7 @@
from config import CONFIG
+
def _log(*message):
logging.info(*message)
if len(message) > 1:
@@ -31,15 +32,18 @@ def _log(*message):
else:
message = message[0].rstrip("\n")
- sys.stdout.write(message+"\n")
+ sys.stdout.write(message + "\n")
+
def _run_cmd(cmd):
shell = isinstance(cmd, str)
process = subprocess.Popen(
- cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+ )
output = process.communicate()[0]
return process.returncode, output
+
def fetch_all(skip_existing=True):
"""
Fetch all known repositories mentioned in the adapters
@@ -58,8 +62,11 @@ def _fetch_locations(known_location):
sys.stdout.flush()
try:
process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
- universal_newlines=True)
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True,
+ )
except OSError:
print("\nERROR: %s" % cmd)
raise
@@ -76,10 +83,14 @@ def _fetch_locations(known_location):
location = adptr.local_repository_location()
if not location:
continue
- if location in known_location \
- and adptr.repository_url() != known_location[location].repository_url():
- fatal("Duplicate location: %s for %s and %s"
- % (location, adptr, known_location[location]))
+ if (
+ location in known_location
+ and adptr.repository_url() != known_location[location].repository_url()
+ ):
+ fatal(
+ "Duplicate location: %s for %s and %s"
+ % (location, adptr, known_location[location])
+ )
known_location[location] = adptr
# Parent directories creation
@@ -101,9 +112,12 @@ def _fetch_locations(known_location):
os.makedirs(parent)
- known_location = {k:v for k, v in known_location.items() if k not in existing_locations}
+ known_location = {
+ k: v for k, v in known_location.items() if k not in existing_locations
+ }
_fetch_locations(known_location)
+
def _update_adapter(adptr):
"""
Update implementation.
@@ -118,7 +132,10 @@ def _update_adapter(adptr):
errorcode, output = _run_cmd(cmd)
if errorcode:
- _log("\nERROR:\n---%s\n" % output.decode("utf-8") + "\n---\nCould not update %s" % adptr)
+ _log(
+ "\nERROR:\n---%s\n" % output.decode("utf-8")
+ + "\n---\nCould not update %s" % adptr
+ )
return False
# Getting current repository state
@@ -129,7 +146,11 @@ def _update_adapter(adptr):
if cmd:
errorcode, state = _run_cmd(cmd)
if errorcode:
- _log("\nERROR:\n---\n" + state + "\n---\nCould not get repository state: %s" % adptr)
+ _log(
+ "\nERROR:\n---\n"
+ + state
+ + "\n---\nCould not get repository state: %s" % adptr
+ )
return False
state = state.strip()
@@ -141,7 +162,11 @@ def _update_adapter(adptr):
errorcode, output = _run_cmd(cmd)
output = output.decode("utf-8")
if errorcode:
- _log("\nERROR:\n---\n" + output + "\n---\nCould not get list of pages to be updated: %s" % adptr)
+ _log(
+ "\nERROR:\n---\n"
+ + output
+ + "\n---\nCould not get list of pages to be updated: %s" % adptr
+ )
return False
updates = output.splitlines()
@@ -161,6 +186,7 @@ def _update_adapter(adptr):
adptr.save_state(state)
return True
+
def update_all():
"""
Update all known repositories, mentioned in the adapters
@@ -177,14 +203,18 @@ def update_all():
_update_adapter(adptr)
+
def update_by_name(name):
"""
Find adapter by its `name` and update only it.
"""
pass
+
def _show_usage():
- sys.stdout.write(textwrap.dedent("""
+ sys.stdout.write(
+ textwrap.dedent(
+ """
Usage:
python lib/fetch.py [command]
@@ -195,7 +225,10 @@ def _show_usage():
update [name] -- update repository of the adapter `name`
fetch-all -- fetch all configured repositories
- """))
+ """
+ )
+ )
+
def main(args):
"""
@@ -213,17 +246,19 @@ def main(args):
logging.basicConfig(
filename=CONFIG["path.log.fetch"],
level=logging.DEBUG,
- format='%(asctime)s %(message)s')
+ format="%(asctime)s %(message)s",
+ )
- if args[0] == 'fetch-all':
+ if args[0] == "fetch-all":
fetch_all()
- elif args[0] == 'update':
+ elif args[0] == "update":
update_by_name(sys.argv[1])
- elif args[0] == 'update-all':
+ elif args[0] == "update-all":
update_all()
else:
_show_usage()
sys.exit(0)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main(sys.argv[1:])
diff --git a/lib/fmt/comments.py b/lib/fmt/comments.py
index 8bd122d7..49a413b7 100644
--- a/lib/fmt/comments.py
+++ b/lib/fmt/comments.py
@@ -33,29 +33,33 @@
from languages_data import VIM_NAME
import cache
-FNULL = open(os.devnull, 'w')
+FNULL = open(os.devnull, "w")
TEXT = 0
CODE = 1
UNDEFINED = -1
CODE_WHITESPACE = -2
+
+
def _language_name(name):
return VIM_NAME.get(name, name)
def _remove_empty_lines_from_beginning(lines):
start = 0
- while start < len(lines) and lines[start].strip() == '':
+ while start < len(lines) and lines[start].strip() == "":
start += 1
lines = lines[start:]
return lines
+
def _remove_empty_lines_from_end(lines):
end = len(lines) - 1
- while end >= 0 and lines[end].strip() == '':
+ while end >= 0 and lines[end].strip() == "":
end -= 1
- lines = lines[:end+1]
+ lines = lines[: end + 1]
return lines
+
def _cleanup_lines(lines):
"""
Cleanup `lines` a little bit: remove empty lines at the beginning
@@ -66,9 +70,14 @@ def _cleanup_lines(lines):
if lines == []:
return lines
# remove repeating empty lines
- lines = list(chain.from_iterable(
- [(list(x[1]) if x[0] else [''])
- for x in groupby(lines, key=lambda x: x.strip() != '')]))
+ lines = list(
+ chain.from_iterable(
+ [
+ (list(x[1]) if x[0] else [""])
+ for x in groupby(lines, key=lambda x: x.strip() != "")
+ ]
+ )
+ )
return lines
@@ -89,31 +98,32 @@ def _line_type(line):
or if it is the first/last line and it has
code on the other side.
"""
- if line.strip() == '':
+ if line.strip() == "":
return UNDEFINED
# some line may start with spaces but still be not code.
# we need some heuristics here, but for the moment just
# whitelist such cases:
- if line.strip().startswith('* ') or re.match(r'[0-9]+\.', line.strip()):
+ if line.strip().startswith("* ") or re.match(r"[0-9]+\.", line.strip()):
return TEXT
- if line.startswith(' '):
+ if line.startswith(" "):
return CODE
return TEXT
+
def _classify_lines(lines):
line_types = [_line_type(line) for line in lines]
# pass 2:
# adding empty code lines to the code
for i in range(len(line_types) - 1):
- if line_types[i] == CODE and line_types[i+1] == UNDEFINED:
- line_types[i+1] = CODE_WHITESPACE
+ if line_types[i] == CODE and line_types[i + 1] == UNDEFINED:
+ line_types[i + 1] = CODE_WHITESPACE
changed = True
for i in range(len(line_types) - 1)[::-1]:
- if line_types[i] == UNDEFINED and line_types[i+1] == CODE:
+ if line_types[i] == UNDEFINED and line_types[i + 1] == CODE:
line_types[i] = CODE_WHITESPACE
changed = True
line_types = [CODE if x == CODE_WHITESPACE else x for x in line_types]
@@ -127,12 +137,12 @@ def _classify_lines(lines):
# changing all lines types that are near the text
for i in range(len(line_types) - 1):
- if line_types[i] == TEXT and line_types[i+1] == UNDEFINED:
- line_types[i+1] = TEXT
+ if line_types[i] == TEXT and line_types[i + 1] == UNDEFINED:
+ line_types[i + 1] = TEXT
changed = True
for i in range(len(line_types) - 1)[::-1]:
- if line_types[i] == UNDEFINED and line_types[i+1] == TEXT:
+ if line_types[i] == UNDEFINED and line_types[i + 1] == TEXT:
line_types[i] = TEXT
changed = True
@@ -140,15 +150,17 @@ def _classify_lines(lines):
line_types = [CODE if x == UNDEFINED else x for x in line_types]
return line_types
+
def _unindent_code(line, shift=0):
- if shift == -1 and line != '':
- return ' ' + line
+ if shift == -1 and line != "":
+ return " " + line
- if shift > 0 and line.startswith(' '*shift):
+ if shift > 0 and line.startswith(" " * shift):
return line[shift:]
return line
+
def _wrap_lines(lines_classes, unindent_code=False):
"""
Wrap classified lines. Add the split lines to the stream.
@@ -169,6 +181,7 @@ def _wrap_lines(lines_classes, unindent_code=False):
return result
+
def _run_vim_script(script_lines, text_lines):
"""
Apply `script_lines` to `lines_classes`
@@ -185,34 +198,43 @@ def _run_vim_script(script_lines, text_lines):
textfile.file.close()
my_env = os.environ.copy()
- my_env['HOME'] = CONFIG["path.internal.vim"]
+ my_env["HOME"] = CONFIG["path.internal.vim"]
- cmd = ["script", "-q", "-c",
- "vim -S %s %s" % (script_vim.name, textfile.name)]
+ cmd = ["script", "-q", "-c", "vim -S %s %s" % (script_vim.name, textfile.name)]
- Popen(cmd, shell=False,
- stdin=open(os.devnull, 'r'),
- stdout=FNULL, stderr=FNULL, env=my_env).communicate()
+ Popen(
+ cmd,
+ shell=False,
+ stdin=open(os.devnull, "r"),
+ stdout=FNULL,
+ stderr=FNULL,
+ env=my_env,
+ ).communicate()
return open(textfile.name, "r").read()
+
def _commenting_script(lines_blocks, filetype):
script_lines = []
block_start = 1
for block in lines_blocks:
lines = list(block[1])
- block_end = block_start + len(lines)-1
+ block_end = block_start + len(lines) - 1
if block[0] == 0:
- comment_type = 'sexy'
- if block_end - block_start < 1 or filetype == 'ruby':
- comment_type = 'comment'
-
- script_lines.insert(0, "%s,%s call NERDComment(1, '%s')"
- % (block_start, block_end, comment_type))
- script_lines.insert(0, "%s,%s call NERDComment(1, 'uncomment')"
- % (block_start, block_end))
+ comment_type = "sexy"
+ if block_end - block_start < 1 or filetype == "ruby":
+ comment_type = "comment"
+
+ script_lines.insert(
+ 0,
+ "%s,%s call NERDComment(1, '%s')"
+ % (block_start, block_end, comment_type),
+ )
+ script_lines.insert(
+ 0, "%s,%s call NERDComment(1, 'uncomment')" % (block_start, block_end)
+ )
block_start = block_end + 1
@@ -221,6 +243,7 @@ def _commenting_script(lines_blocks, filetype):
return script_lines
+
def _beautify(text, filetype, add_comments=False, remove_text=False):
"""
Main function that actually does the whole beautification job.
@@ -230,7 +253,7 @@ def _beautify(text, filetype, add_comments=False, remove_text=False):
# or remove the text completely. Otherwise the code has to remain aligned
unindent_code = add_comments or remove_text
- lines = [x.decode("utf-8").rstrip('\n') for x in text.splitlines()]
+ lines = [x.decode("utf-8").rstrip("\n") for x in text.splitlines()]
lines = _cleanup_lines(lines)
lines_classes = zip(_classify_lines(lines), lines)
lines_classes = _wrap_lines(lines_classes, unindent_code=unindent_code)
@@ -239,34 +262,33 @@ def _beautify(text, filetype, add_comments=False, remove_text=False):
lines = [line[1] for line in lines_classes if line[0] == 1]
lines = _cleanup_lines(lines)
output = "\n".join(lines)
- if not output.endswith('\n'):
+ if not output.endswith("\n"):
output += "\n"
elif not add_comments:
output = "\n".join(line[1] for line in lines_classes)
else:
lines_blocks = groupby(lines_classes, key=lambda x: x[0])
script_lines = _commenting_script(lines_blocks, filetype)
- output = _run_vim_script(
- script_lines,
- [line for (_, line) in lines_classes])
+ output = _run_vim_script(script_lines, [line for (_, line) in lines_classes])
return output
+
def code_blocks(text, wrap_lines=False, unindent_code=False):
"""
Split `text` into blocks of text and code.
Return list of tuples TYPE, TEXT
"""
- text = text.encode('utf-8')
+ text = text.encode("utf-8")
- lines = [x.rstrip('\n') for x in text.splitlines()]
+ lines = [x.rstrip("\n") for x in text.splitlines()]
lines_classes = zip(_classify_lines(lines), lines)
if wrap_lines:
lines_classes = _wrap_lines(lines_classes, unindent_code=unindent_code)
lines_blocks = groupby(lines_classes, key=lambda x: x[0])
- answer = [(x[0], "\n".join([y[1] for y in x[1]])+"\n") for x in lines_blocks]
+ answer = [(x[0], "\n".join([y[1] for y in x[1]]) + "\n") for x in lines_blocks]
return answer
@@ -279,21 +301,22 @@ def beautify(text, lang, options):
"""
options = options or {}
- beauty_options = dict((k, v) for k, v in options.items() if k in
- ['add_comments', 'remove_text'])
+ beauty_options = dict(
+ (k, v) for k, v in options.items() if k in ["add_comments", "remove_text"]
+ )
- mode = ''
- if beauty_options.get('add_comments'):
- mode += 'c'
- if beauty_options.get('remove_text'):
- mode += 'q'
+ mode = ""
+ if beauty_options.get("add_comments"):
+ mode += "c"
+ if beauty_options.get("remove_text"):
+ mode += "q"
if beauty_options == {}:
# if mode is unknown, just don't transform the text at all
return text
if isinstance(text, str):
- text = text.encode('utf-8')
+ text = text.encode("utf-8")
digest = "t:%s:%s:%s" % (hashlib.md5(text).hexdigest(), lang, mode)
# temporary added line that removes invalid cache entries
@@ -309,6 +332,7 @@ def beautify(text, lang, options):
return answer
+
def __main__():
text = sys.stdin.read()
filetype = sys.argv[1]
@@ -321,5 +345,6 @@ def __main__():
result = beautify(text, filetype, options)
sys.stdout.write(result)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
__main__()
diff --git a/lib/fmt/internal.py b/lib/fmt/internal.py
index a30ab951..115378bd 100644
--- a/lib/fmt/internal.py
+++ b/lib/fmt/internal.py
@@ -16,7 +16,7 @@
1: {
1: Fore.CYAN,
2: Fore.GREEN,
- 3: colored.fg('orange_3'),
+ 3: colored.fg("orange_3"),
4: Style.DIM,
5: Style.DIM,
},
@@ -27,12 +27,9 @@
}
-
def _reverse_palette(code):
- return {
- 1 : Fore.BLACK + _back_color(code),
- 2 : Style.DIM
- }
+ return {1: Fore.BLACK + _back_color(code), 2: Style.DIM}
+
def _back_color(code):
if code == 0 or (isinstance(code, str) and code.lower() == "white"):
@@ -44,6 +41,7 @@ def _back_color(code):
return Back.WHITE
+
def colorize_internal(text, palette_number=1):
"""
Colorize `text`, use `palette`
@@ -51,26 +49,27 @@ def colorize_internal(text, palette_number=1):
palette = PALETTES[palette_number]
palette_reverse = _reverse_palette(palette_number)
+
def _process_text(text):
text = text.group()[1:-1]
factor = 1
- if text.startswith('-'):
+ if text.startswith("-"):
text = text[1:]
factor = -1
- stripped = text.lstrip('0123456789')
+ stripped = text.lstrip("0123456789")
return (text, stripped, factor)
def _extract_color_number(text, stripped, factor=1):
- return int(text[:len(text)-len(stripped)])*factor
+ return int(text[: len(text) - len(stripped)]) * factor
def _colorize_curlies_block(text):
text, stripped, factor = _process_text(text)
color_number = _extract_color_number(text, stripped, factor)
- if stripped.startswith('='):
+ if stripped.startswith("="):
stripped = stripped[1:]
- reverse = (color_number < 0)
+ reverse = color_number < 0
if reverse:
color_number = -color_number
@@ -82,10 +81,10 @@ def _colorize_curlies_block(text):
return stripped
def _colorize_headers(text):
- if text.group(0).endswith('\n'):
- newline = '\n'
+ if text.group(0).endswith("\n"):
+ newline = "\n"
else:
- newline = ''
+ newline = ""
color_number = 3
return palette[color_number] + text.group(0).strip() + Style.RESET_ALL + newline
@@ -94,6 +93,7 @@ def _colorize_headers(text):
text = re.sub("#(.*?)\n", _colorize_headers, text)
return text
+
def colorize_internal_firstpage_v1(answer):
"""
Colorize "/:firstpage-v1".
@@ -101,28 +101,39 @@ def colorize_internal_firstpage_v1(answer):
"""
def _colorize_line(line):
- if line.startswith('T'):
- line = colored.fg("grey_62") + line + colored.attr('reset')
- line = re.sub(r"\{(.*?)\}", colored.fg("orange_3") + r"\1"+colored.fg('grey_35'), line)
+ if line.startswith("T"):
+ line = colored.fg("grey_62") + line + colored.attr("reset")
+ line = re.sub(
+ r"\{(.*?)\}",
+ colored.fg("orange_3") + r"\1" + colored.fg("grey_35"),
+ line,
+ )
return line
- line = re.sub(r"\[(F.*?)\]",
- colored.bg("black") + colored.fg("cyan") + r"[\1]"+colored.attr('reset'),
- line)
- line = re.sub(r"\[(g.*?)\]",
- colored.bg("dark_gray")+colored.fg("grey_0")+r"[\1]"+colored.attr('reset'),
- line)
- line = re.sub(r"\{(.*?)\}",
- colored.fg("orange_3") + r"\1"+colored.attr('reset'),
- line)
- line = re.sub(r"<(.*?)>",
- colored.fg("cyan") + r"\1"+colored.attr('reset'),
- line)
+ line = re.sub(
+ r"\[(F.*?)\]",
+ colored.bg("black") + colored.fg("cyan") + r"[\1]" + colored.attr("reset"),
+ line,
+ )
+ line = re.sub(
+ r"\[(g.*?)\]",
+ colored.bg("dark_gray")
+ + colored.fg("grey_0")
+ + r"[\1]"
+ + colored.attr("reset"),
+ line,
+ )
+ line = re.sub(
+ r"\{(.*?)\}", colored.fg("orange_3") + r"\1" + colored.attr("reset"), line
+ )
+ line = re.sub(
+ r"<(.*?)>", colored.fg("cyan") + r"\1" + colored.attr("reset"), line
+ )
return line
lines = answer.splitlines()
answer_lines = lines[:9]
- answer_lines.append(colored.fg('grey_35')+lines[9]+colored.attr('reset'))
+ answer_lines.append(colored.fg("grey_35") + lines[9] + colored.attr("reset"))
for line in lines[10:]:
answer_lines.append(_colorize_line(line))
answer = "\n".join(answer_lines) + "\n"
diff --git a/lib/fmt/markdown.py b/lib/fmt/markdown.py
index e120a8cf..793cdb77 100644
--- a/lib/fmt/markdown.py
+++ b/lib/fmt/markdown.py
@@ -11,6 +11,7 @@
import ansiwrap
import colored
+
def format_text(text, config=None, highlighter=None):
"""
Renders `text` according to markdown rules.
@@ -19,38 +20,42 @@ def format_text(text, config=None, highlighter=None):
"""
return _format_section(text, config=config, highlighter=highlighter)
+
def _split_into_paragraphs(text):
- return re.split('\n\n+', text)
+ return re.split("\n\n+", text)
+
def _colorize(text):
- return \
+ return re.sub(
+ r"`(.*?)`",
+ colored.bg("dark_gray")
+ + colored.fg("white")
+ + " "
+ + r"\1"
+ + " "
+ + colored.attr("reset"),
re.sub(
- r"`(.*?)`",
- colored.bg("dark_gray") \
- + colored.fg("white") \
- + " " + r"\1" + " " \
- + colored.attr('reset'),
- re.sub(
- r"\*\*(.*?)\*\*",
- colored.attr('bold') \
- + colored.fg("white") \
- + r"\1" \
- + colored.attr('reset'),
- text))
+ r"\*\*(.*?)\*\*",
+ colored.attr("bold") + colored.fg("white") + r"\1" + colored.attr("reset"),
+ text,
+ ),
+ )
+
def _format_section(section_text, config=None, highlighter=None):
- answer = ''
+ answer = ""
# cut code blocks
block_number = 0
while True:
section_text, replacements = re.subn(
- '^```.*?^```',
- 'MULTILINE_BLOCK_%s' % block_number,
+ "^```.*?^```",
+ "MULTILINE_BLOCK_%s" % block_number,
section_text,
1,
- flags=re.S | re.MULTILINE)
+ flags=re.S | re.MULTILINE,
+ )
block_number += 1
if not replacements:
break
@@ -58,32 +63,33 @@ def _format_section(section_text, config=None, highlighter=None):
# cut links
links = []
while True:
- regexp = re.compile(r'\[(.*?)\]\((.*?)\)')
+ regexp = re.compile(r"\[(.*?)\]\((.*?)\)")
match = regexp.search(section_text)
if match:
links.append(match.group(0))
text = match.group(1)
# links are not yet supported
#
- text = '\x1B]8;;%s\x1B\\\\%s\x1B]8;;\x1B\\\\' % (match.group(2), match.group(1))
+ text = "\x1b]8;;%s\x1b\\\\%s\x1b]8;;\x1b\\\\" % (
+ match.group(2),
+ match.group(1),
+ )
else:
break
-
section_text, replacements = regexp.subn(
- text, # 'LINK_%s' % len(links),
- section_text,
- 1)
+ text, section_text, 1 # 'LINK_%s' % len(links),
+ )
block_number += 1
if not replacements:
break
for paragraph in _split_into_paragraphs(section_text):
- answer += "\n".join(
- ansiwrap.fill(_colorize(line)) + "\n"
- for line in paragraph.splitlines()) + "\n"
-
- return {
- 'ansi': answer,
- 'links': links
- }
+ answer += (
+ "\n".join(
+ ansiwrap.fill(_colorize(line)) + "\n" for line in paragraph.splitlines()
+ )
+ + "\n"
+ )
+
+ return {"ansi": answer, "links": links}
diff --git a/lib/frontend/ansi.py b/lib/frontend/ansi.py
index 276d58ba..9540f235 100644
--- a/lib/frontend/ansi.py
+++ b/lib/frontend/ansi.py
@@ -28,78 +28,107 @@
import colored
from pygments import highlight as pygments_highlight
-from pygments.formatters import Terminal256Formatter # pylint: disable=no-name-in-module
- # pylint: disable=wrong-import-position
-sys.path.append(os.path.abspath(os.path.join(__file__, '..')))
+from pygments.formatters import (
+ Terminal256Formatter,
+) # pylint: disable=no-name-in-module
+
+# pylint: disable=wrong-import-position
+sys.path.append(os.path.abspath(os.path.join(__file__, "..")))
from config import CONFIG
-import languages_data # pylint: enable=wrong-import-position
+import languages_data # pylint: enable=wrong-import-position
import fmt.internal
import fmt.comments
+
def visualize(answer_data, request_options):
"""
Renders `answer_data` as ANSI output.
"""
- answers = answer_data['answers']
- return _visualize(answers, request_options, search_mode=bool(answer_data['keyword']))
+ answers = answer_data["answers"]
+ return _visualize(
+ answers, request_options, search_mode=bool(answer_data["keyword"])
+ )
+
+
+ANSI_ESCAPE = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
+
-ANSI_ESCAPE = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
def remove_ansi(sometext):
"""
Remove ANSI sequences from `sometext` and convert it into plaintext.
"""
- return ANSI_ESCAPE.sub('', sometext)
+ return ANSI_ESCAPE.sub("", sometext)
-def _limited_answer(answer):
- return colored.bg('dark_goldenrod') + colored.fg('yellow_1') \
- + ' ' + answer + ' ' \
- + colored.attr('reset') + "\n"
-def _colorize_ansi_answer(topic, answer, color_style, # pylint: disable=too-many-arguments
- highlight_all=True, highlight_code=False,
- unindent_code=False, language=None):
+def _limited_answer(answer):
+ return (
+ colored.bg("dark_goldenrod")
+ + colored.fg("yellow_1")
+ + " "
+ + answer
+ + " "
+ + colored.attr("reset")
+ + "\n"
+ )
+
+
+def _colorize_ansi_answer(
+ topic,
+ answer,
+ color_style, # pylint: disable=too-many-arguments
+ highlight_all=True,
+ highlight_code=False,
+ unindent_code=False,
+ language=None,
+):
color_style = color_style or "native"
- lexer_class = languages_data.LEXER['bash']
- if '/' in topic:
+ lexer_class = languages_data.LEXER["bash"]
+ if "/" in topic:
if language is None:
- section_name = topic.split('/', 1)[0].lower()
+ section_name = topic.split("/", 1)[0].lower()
else:
section_name = language
section_name = languages_data.get_lexer_name(section_name)
lexer_class = languages_data.LEXER.get(section_name, lexer_class)
- if section_name == 'php':
+ if section_name == "php":
answer = "\n%s?>\n" % answer
if highlight_all:
- highlight = lambda answer: pygments_highlight(
- answer, lexer_class(), Terminal256Formatter(style=color_style)).strip('\n')+'\n'
+ highlight = (
+ lambda answer: pygments_highlight(
+ answer, lexer_class(), Terminal256Formatter(style=color_style)
+ ).strip("\n")
+ + "\n"
+ )
else:
highlight = lambda x: x
if highlight_code:
blocks = fmt.comments.code_blocks(
- answer, wrap_lines=True, unindent_code=(4 if unindent_code else False))
+ answer, wrap_lines=True, unindent_code=(4 if unindent_code else False)
+ )
highlighted_blocks = []
for block in blocks:
if block[0] == 1:
this_block = highlight(block[1])
else:
- this_block = block[1].strip('\n')+'\n'
+ this_block = block[1].strip("\n") + "\n"
highlighted_blocks.append(this_block)
result = "\n".join(highlighted_blocks)
else:
- result = highlight(answer).lstrip('\n')
+ result = highlight(answer).lstrip("\n")
return result
+
def _visualize(answers, request_options, search_mode=False):
- highlight = not bool(request_options and request_options.get('no-terminal'))
- color_style = (request_options or {}).get('style', '')
- if color_style not in CONFIG['frontend.styles']:
- color_style = ''
+ highlight = not bool(request_options and request_options.get("no-terminal"))
+ color_style = (request_options or {}).get("style", "")
+ if color_style not in CONFIG["frontend.styles"]:
+ color_style = ""
# if there is more than one answer,
# show the source of the answer
@@ -108,39 +137,51 @@ def _visualize(answers, request_options, search_mode=False):
found = True
result = ""
for answer_dict in answers:
- topic = answer_dict['topic']
- topic_type = answer_dict['topic_type']
- answer = answer_dict['answer']
- found = found and not topic_type == 'unknown'
+ topic = answer_dict["topic"]
+ topic_type = answer_dict["topic_type"]
+ answer = answer_dict["answer"]
+ found = found and not topic_type == "unknown"
- if multiple_answers and topic != 'LIMITED':
+ if multiple_answers and topic != "LIMITED":
section_name = f"{topic_type}:{topic}"
if not highlight:
result += f"#[{section_name}]\n"
else:
- result += "".join([
- "\n", colored.bg('dark_gray'), colored.attr("res_underlined"),
- f" {section_name} ",
- colored.attr("res_underlined"), colored.attr('reset'), "\n"])
-
- if answer_dict['format'] in ['ansi', 'text']:
+ result += "".join(
+ [
+ "\n",
+ colored.bg("dark_gray"),
+ colored.attr("res_underlined"),
+ f" {section_name} ",
+ colored.attr("res_underlined"),
+ colored.attr("reset"),
+ "\n",
+ ]
+ )
+
+ if answer_dict["format"] in ["ansi", "text"]:
result += answer
- elif topic == ':firstpage-v1':
+ elif topic == ":firstpage-v1":
result += fmt.internal.colorize_internal_firstpage_v1(answer)
- elif topic == 'LIMITED':
+ elif topic == "LIMITED":
result += _limited_answer(topic)
else:
result += _colorize_ansi_answer(
- topic, answer, color_style,
+ topic,
+ answer,
+ color_style,
highlight_all=highlight,
- highlight_code=(topic_type == 'question'
- and not request_options.get('add_comments')
- and not request_options.get('remove_text')),
- language=answer_dict.get("filetype"))
-
- if request_options.get('no-terminal'):
+ highlight_code=(
+ topic_type == "question"
+ and not request_options.get("add_comments")
+ and not request_options.get("remove_text")
+ ),
+ language=answer_dict.get("filetype"),
+ )
+
+ if request_options.get("no-terminal"):
result = remove_ansi(result)
- result = result.strip('\n') + "\n"
+ result = result.strip("\n") + "\n"
return result, found
diff --git a/lib/frontend/html.py b/lib/frontend/html.py
index 43469d34..79469b69 100644
--- a/lib/frontend/html.py
+++ b/lib/frontend/html.py
@@ -10,7 +10,7 @@
import re
from subprocess import Popen, PIPE
-MYDIR = os.path.abspath(os.path.join(__file__, '..', '..'))
+MYDIR = os.path.abspath(os.path.join(__file__, "..", ".."))
sys.path.append("%s/lib/" % MYDIR)
# pylint: disable=wrong-import-position
@@ -22,37 +22,44 @@
# temporary having it here, but actually we have the same data
# in the adapter module
GITHUB_REPOSITORY = {
- "late.nz" : 'chubin/late.nz',
- "cheat.sheets" : 'chubin/cheat.sheets',
- "cheat.sheets dir" : 'chubin/cheat.sheets',
- "tldr" : 'tldr-pages/tldr',
- "cheat" : 'chrisallenlane/cheat',
- "learnxiny" : 'adambard/learnxinyminutes-docs',
- "internal" : '',
- "search" : '',
- "unknown" : '',
+ "late.nz": "chubin/late.nz",
+ "cheat.sheets": "chubin/cheat.sheets",
+ "cheat.sheets dir": "chubin/cheat.sheets",
+ "tldr": "tldr-pages/tldr",
+ "cheat": "chrisallenlane/cheat",
+ "learnxiny": "adambard/learnxinyminutes-docs",
+ "internal": "",
+ "search": "",
+ "unknown": "",
}
+
def visualize(answer_data, request_options):
- query = answer_data['query']
- answers = answer_data['answers']
- topics_list = answer_data['topics_list']
- editable = (len(answers) == 1 and answers[0]['topic_type'] == 'cheat.sheets')
+ query = answer_data["query"]
+ answers = answer_data["answers"]
+ topics_list = answer_data["topics_list"]
+ editable = len(answers) == 1 and answers[0]["topic_type"] == "cheat.sheets"
- repository_button = ''
+ repository_button = ""
if len(answers) == 1:
- repository_button = _github_button(answers[0]['topic_type'])
+ repository_button = _github_button(answers[0]["topic_type"])
result, found = frontend.ansi.visualize(answer_data, request_options)
- return _render_html(query, result, editable, repository_button, topics_list, request_options), found
+ return (
+ _render_html(
+ query, result, editable, repository_button, topics_list, request_options
+ ),
+ found,
+ )
+
def _github_button(topic_type):
- full_name = GITHUB_REPOSITORY.get(topic_type, '')
+ full_name = GITHUB_REPOSITORY.get(topic_type, "")
if not full_name:
- return ''
+ return ""
- short_name = full_name.split('/', 1)[1] # pylint: disable=unused-variable
+ short_name = full_name.split("/", 1)[1] # pylint: disable=unused-variable
button = (
""
@@ -66,62 +73,78 @@ def _github_button(topic_type):
) % locals()
return button
-def _render_html(query, result, editable, repository_button, topics_list, request_options):
+
+def _render_html(
+ query, result, editable, repository_button, topics_list, request_options
+):
def _html_wrapper(data):
"""
Convert ANSI text `data` to HTML
"""
- cmd = ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"]
+ cmd = [
+ "bash",
+ CONFIG["path.internal.ansi2html"],
+ "--palette=solarized",
+ "--bg=dark",
+ ]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
print("ERROR: %s" % cmd)
raise
- data = data.encode('utf-8')
+ data = data.encode("utf-8")
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
- error((stdout + stderr).decode('utf-8'))
- return stdout.decode('utf-8')
-
+ error((stdout + stderr).decode("utf-8"))
+ return stdout.decode("utf-8")
result = result + "\n$"
result = _html_wrapper(result)
title = "cheat.sh/%s" % query
- submit_button = ('')
- topic_list = (''
- % ("\n".join("" % x for x in topics_list)))
+ submit_button = (
+ ''
+ )
+ topic_list = '' % (
+ "\n".join("" % x for x in topics_list)
+ )
curl_line = "$ curl cheat.sh/"
- if query == ':firstpage':
+ if query == ":firstpage":
query = ""
- form_html = ('') \
- % (submit_button, curl_line, query, topic_list)
-
- edit_button = ''
+ form_html = (
+ '"
+ ) % (submit_button, curl_line, query, topic_list)
+
+ edit_button = ""
if editable:
# It's possible that topic directory starts with omitted underscore
- if '/' in query:
- query = '_' + query
- edit_page_link = 'https://github.com/chubin/cheat.sheets/edit/master/sheets/' + query
+ if "/" in query:
+ query = "_" + query
+ edit_page_link = (
+ "https://github.com/chubin/cheat.sheets/edit/master/sheets/" + query
+ )
edit_button = (
''
'[edit]'
- '') % edit_page_link
+ ""
+ ) % edit_page_link
result = re.sub("", edit_button + form_html + "", result)
result = re.sub("", "" + title, result)
- if not request_options.get('quiet'):
- result = result.replace('',
- TWITTER_BUTTON \
- + GITHUB_BUTTON \
- + repository_button \
- + GITHUB_BUTTON_FOOTER \
- + '')
+ if not request_options.get("quiet"):
+ result = result.replace(
+ "