From dfd04948e0770c516057e5c15de8373965745174 Mon Sep 17 00:00:00 2001 From: xxyzz Date: Sat, 26 Aug 2023 23:42:23 +0800 Subject: [PATCH] Break long lines --- config.py | 15 +++++--- custom_lemmas.py | 16 ++++++--- database.py | 15 ++++++-- deps.py | 18 +++++++--- dump_lemmas.py | 23 +++++++++--- epub.py | 92 +++++++++++++++++++++++++++++++++++++----------- error_dialogs.py | 55 +++++++++++++++++++---------- import_lemmas.py | 11 ++++-- mediawiki.py | 53 +++++++++++++++++++++------- tests/convert.py | 3 +- utils.py | 39 ++++++++++---------- 11 files changed, 247 insertions(+), 93 deletions(-) diff --git a/config.py b/config.py index 0608210..24c00d9 100644 --- a/config.py +++ b/config.py @@ -107,7 +107,8 @@ def __init__(self): ) self.search_people_box.setToolTip( _( - "Enable this option for nonfiction books and novels that have character pages on Wikipedia/Fandom" + "Enable this option for nonfiction books and novels that have character" + " pages on Wikipedia/Fandom" ) ) self.search_people_box.setChecked(prefs["search_people"]) @@ -122,7 +123,8 @@ def __init__(self): self.use_gpu_box = QCheckBox(_("Run spaCy with GPU(requires CUDA)")) self.use_gpu_box.setToolTip( _( - "GPU will be used when creating X-Ray file if spaCy has transformer model for the book language with ner component." + "GPU will be used when creating X-Ray file if spaCy has transformer" + " model for the book language with ner component." ) ) self.use_gpu_box.setChecked(prefs["use_gpu"]) @@ -157,7 +159,8 @@ def __init__(self): minimal_x_ray_label = QLabel(_("Minimal X-Ray occurrences")) minimal_x_ray_label.setToolTip( _( - "X-Ray entities that appear less then this number and don't have description from Wikipedia/Fandom will be removed" + "X-Ray entities that appear less then this number and don't have " + "description from Wikipedia/Fandom will be removed" ) ) form_layout.addRow(minimal_x_ray_label, self.minimal_x_ray_count) @@ -423,7 +426,8 @@ def __init__(self, formats: list[str]) -> None: message = QLabel( _( - "This book has multiple supported formats. Choose the format you want to use." + "This book has multiple supported formats. Choose the format " + "you want to use." ) ) vl.addWidget(message) @@ -486,7 +490,8 @@ def __init__(self, parent: QObject, is_kindle: bool): wiktionary_gloss_label = QLabel(_("Use Wiktionary definition")) wiktionary_gloss_label.setToolTip( _( - "Change Word Wise language to Chinese on your Kindle device to view definition from Wiktionary" + "Change Word Wise language to Chinese on your Kindle device to " + "view definition from Wiktionary" ) ) form_layout.addRow(wiktionary_gloss_label, self.use_wiktionary_box) diff --git a/custom_lemmas.py b/custom_lemmas.py index 96baa45..317e8e2 100644 --- a/custom_lemmas.py +++ b/custom_lemmas.py @@ -163,7 +163,8 @@ def init_wiktionary_buttons( difficulty_label = QLabel(_("Difficulty limit")) difficulty_label.setToolTip( _( - "Difficult words have lower value. Words have difficulty value higher than this value are disabled." + "Difficult words have lower value. Words have difficulty value higher " + "than this value are disabled." ) ) self.difficulty_limit_box = QComboBox() @@ -227,7 +228,12 @@ def check_empty_kindle_gloss(self) -> None: klld_conn = sqlite3.connect(klld_path) for sense_id, short_def, full_def, example in klld_conn.execute( - "SELECT senses.id, short_def, full_def, example_sentence FROM lemmas JOIN senses ON lemmas.id = display_lemma_id WHERE (full_def IS NOT NULL OR short_def IS NOT NULL) AND lemma NOT like '-%'" + """ + SELECT senses.id, short_def, full_def, example_sentence + FROM lemmas JOIN senses ON lemmas.id = display_lemma_id + WHERE (full_def IS NOT NULL OR short_def IS NOT NULL) + AND lemma NOT like '-%' + """ ): short_def = base64.b64decode(short_def if short_def else full_def).decode( "utf-8" @@ -235,7 +241,8 @@ def check_empty_kindle_gloss(self) -> None: full_def = base64.b64decode(full_def).decode("utf-8") if full_def else "" example = base64.b64decode(example).decode("utf-8") if example else "" custom_db_conn.execute( - "UPDATE senses SET short_def = ?, full_def = ?, example = ? WHERE id = ?", + "UPDATE senses SET short_def = ?, full_def = ?, example = ? " + "WHERE id = ?", (short_def, full_def, example, sense_id), ) klld_conn.close() @@ -441,7 +448,8 @@ def __init__(self, parent): text = QLabel( _( - 'Export text separated by tab, can be imported to Anki.
"Allow HTML in fields" option needs to be enabled in Anki.' + "Export text separated by tab, can be imported to Anki.
" + '"Allow HTML in fields" option needs to be enabled in Anki.' ) ) vl.addWidget(text) diff --git a/database.py b/database.py index a1769e7..b293730 100644 --- a/database.py +++ b/database.py @@ -52,7 +52,10 @@ def create_lang_layer( def insert_lemma(ll_conn: sqlite3.Connection, data: tuple[int, int, int, int]) -> None: ll_conn.execute( - "INSERT INTO glosses (start, end, difficulty, sense_id, low_confidence) VALUES (?, ?, ?, ?, 0)", + """ + INSERT INTO glosses (start, end, difficulty, sense_id, low_confidence) + VALUES (?, ?, ?, ?, 0) + """, data, ) @@ -150,7 +153,10 @@ def create_x_ray_db( str_list.append([22, "en", f"{prefs['fandom']}/wiki/%s" if prefs["fandom"] else ""]) x_ray_conn.execute( - "INSERT INTO source (id, label, url, license_label, license_url) VALUES(2, 4, 22, 7, 8)" + """ + INSERT INTO source (id, label, url, license_label, license_url) + VALUES(2, 4, 22, 7, 8) + """ ) x_ray_conn.executemany("INSERT INTO string VALUES(?, ?, ?)", str_list) return x_ray_conn, db_path @@ -176,7 +182,10 @@ def insert_x_entities( conn: sqlite3.Connection, data: Iterator[tuple[int, str, int, int]] ) -> None: conn.executemany( - "INSERT INTO entity (id, label, type, count, has_info_card) VALUES(?, ?, ?, ?, 1)", + """ + INSERT INTO entity (id, label, type, count, has_info_card) + VALUES(?, ?, ?, ?, 1) + """, data, ) diff --git a/deps.py b/deps.py index 91278c1..3d6c560 100644 --- a/deps.py +++ b/deps.py @@ -17,9 +17,9 @@ custom_lemmas_folder, get_plugin_path, get_wiktionary_klld_path, - mac_bin_path, kindle_db_path, load_plugin_json, + mac_bin_path, run_subprocess, use_kindle_ww_db, wiktionary_db_path, @@ -50,13 +50,17 @@ def install_deps(pkg: str, notif: Any) -> None: model_version = dep_versions[ "spacy_trf_model" if pkg.endswith("_trf") else "spacy_cpu_model" ] - url = f"https://github.com/explosion/spacy-models/releases/download/{pkg}-{model_version}/{pkg}-{model_version}-py3-none-any.whl" + url = ( + "https://github.com/explosion/spacy-models/releases/download/" + f"{pkg}-{model_version}/{pkg}-{model_version}-py3-none-any.whl" + ) pip_install(pkg, model_version, url=url, notif=notif) if pkg.endswith("_trf"): from .config import prefs pip_install("cupy-wheel", dep_versions["cupy"], notif=notif) - # PyTorch's Windows package on pypi.org is CPU build version, reintall the CUDA build version + # PyTorch's Windows package on pypi.org is CPU build version, + # reintall the CUDA build version if iswindows or prefs["cuda"] == "cu118": pip_install( "torch", @@ -159,7 +163,8 @@ def download_word_wise_file( notifications.put( ( 0, - f"Downloading {lemma_lang}-{gloss_lang} {'Kindle' if is_kindle else 'Wiktionary'} file", + f"Downloading {lemma_lang}-{gloss_lang} " + f"{'Kindle' if is_kindle else 'Wiktionary'} file", ) ) plugin_path = get_plugin_path() @@ -179,7 +184,10 @@ def download_word_wise_file( if is_kindle: klld_path = get_wiktionary_klld_path(plugin_path, lemma_lang, gloss_lang) if not klld_path.exists(): - url = f"{PROFICIENCY_RELEASE_URL}/kll.{lemma_lang}.{gloss_lang}_v{PROFICIENCY_VERSION}.klld.bz2" + url = ( + PROFICIENCY_RELEASE_URL + + f"/kll.{lemma_lang}.{gloss_lang}_v{PROFICIENCY_VERSION}.klld.bz2" + ) download_and_extract(url, extract_folder) diff --git a/dump_lemmas.py b/dump_lemmas.py index ce3f299..eb94742 100644 --- a/dump_lemmas.py +++ b/dump_lemmas.py @@ -35,7 +35,9 @@ def spacy_doc_path( is_kindle = False py_version = ".".join(platform.python_version_tuple()[:2]) path = custom_lemmas_folder(plugin_path).joinpath( - f"{lemma_lang}/{spacy_model}_{'kindle' if is_kindle else 'wiktionary'}_{gloss_lang}_{model_version}_{py_version}" + f"{lemma_lang}/{spacy_model}_" + f"{'kindle' if is_kindle else 'wiktionary'}" + f"_{gloss_lang}_{model_version}_{py_version}" ) if prefs["use_pos"]: if is_phrase: @@ -135,7 +137,11 @@ def save_spacy_docs( def create_lemma_patterns_with_pos(lemma_lang, conn, nlp, difficulty_limit): - query_sql = "SELECT DISTINCT lemma, lemma_id FROM senses JOIN lemmas ON senses.lemma_id = lemmas.id WHERE enabled = 1" + query_sql = """ + SELECT DISTINCT lemma, lemma_id + FROM senses JOIN lemmas ON senses.lemma_id = lemmas.id + WHERE enabled = 1 + """ if difficulty_limit is not None: query_sql += f" AND difficulty <= {difficulty_limit}" for lemma, lemma_id in conn.execute(query_sql): @@ -148,13 +154,22 @@ def create_lemma_patterns_with_pos(lemma_lang, conn, nlp, difficulty_limit): def create_lemma_patterns_without_pos(conn, nlp, difficulty_limit): - query_sql = "SELECT DISTINCT lemma FROM senses JOIN lemmas ON senses.lemma_id = lemmas.id WHERE enabled = 1" + query_sql = """ + SELECT DISTINCT lemma + FROM senses JOIN lemmas ON senses.lemma_id = lemmas.id + WHERE enabled = 1 + """ if difficulty_limit is not None: query_sql += f" AND difficulty <= {difficulty_limit}" for (lemma,) in conn.execute(query_sql): yield nlp.make_doc(lemma) - query_sql = "SELECT DISTINCT form FROM senses JOIN forms ON senses.lemma_id = forms.lemma_id AND senses.pos = forms.pos WHERE enabled = 1" + query_sql = """ + SELECT DISTINCT form + FROM senses JOIN forms + ON senses.lemma_id = forms.lemma_id AND senses.pos = forms.pos + WHERE enabled = 1 + """ if difficulty_limit is not None: query_sql += f" AND difficulty <= {difficulty_limit}" for (form,) in conn.execute(query_sql): diff --git a/epub.py b/epub.py index b899c06..c098a9c 100644 --- a/epub.py +++ b/epub.py @@ -265,7 +265,10 @@ def insert_anchor_elements(self, lang: str) -> None: continue new_xhtml_str += xhtml_str[last_end:start] if isinstance(entity_id, int): - new_xhtml_str += f'{entity}' + new_xhtml_str += ( + f'{entity}' + ) else: new_xhtml_str += self.build_word_wise_tag(entity_id, entity, lang) last_end = end @@ -282,7 +285,9 @@ def insert_anchor_elements(self, lang: str) -> None: if self.lemmas: new_xhtml_str = new_xhtml_str.replace( "", - "", + "", ) f.write(new_xhtml_str) @@ -297,9 +302,16 @@ def build_word_wise_tag(self, word: str, origin_word: str, lang: str) -> str: len_ratio = 3 if lang in CJK_LANGS else 2.5 word_id = self.lemmas[word] if len(short_def) / len(origin_word) > len_ratio: - return f'{origin_word}' + return ( + '{origin_word}' + ) else: - return f'{origin_word}({short_def})' + return ( + '{origin_word}({short_def}' + ")" + ) def split_p_tags(self, intro: str) -> str: intro = escape(intro) @@ -327,24 +339,35 @@ def create_x_ray_footnotes(self, prefs: Prefs, lang: str) -> None: for entity, data in self.entities.items(): if custom_data := self.custom_x_ray.get(entity): custom_desc, custom_source_id, _ = custom_data - s += f'" + ) return tag_str def modify_opf(self) -> None: @@ -417,10 +452,16 @@ def modify_opf(self) -> None: image_prefix = f"{self.image_folder.name}/" manifest = self.opf_root.find("opf:manifest", NAMESPACES) if self.entities: - s = f'' + s = ( + f'' + ) manifest.append(etree.fromstring(s)) if self.lemmas: - s = f'' + s = ( + f'' + ) manifest.append(etree.fromstring(s)) for filename in self.image_filenames: filename_lower = filename.lower() @@ -434,7 +475,10 @@ def modify_opf(self) -> None: media_type = "webp" else: media_type = Path(filename).suffix.replace(".", "") - s = f'' + s = ( + f'' + ) manifest.append(etree.fromstring(s)) spine = self.opf_root.find("opf:spine", NAMESPACES) if self.entities: @@ -477,14 +521,18 @@ def query_gloss_with_pos( if " " in lemma: for data in self.lemmas_conn.execute( # type: ignore sql - + "JOIN forms ON senses.lemma_id = forms.lemma_id AND senses.pos = forms.pos WHERE form = ?", + + "JOIN forms ON " + + "senses.lemma_id = forms.lemma_id AND senses.pos = forms.pos " + + "WHERE form = ?", (lemma,), ): lemmas_data.append(data) elif lang == "zh": for data in self.lemmas_conn.execute( # type: ignore sql - + "JOIN forms ON senses.lemma_id = forms.lemma_id AND senses.pos = forms.pos WHERE form = ? AND forms.pos = ?", + + "JOIN forms " + + "ON senses.lemma_id = forms.lemma_id AND senses.pos = forms.pos " + + "WHERE form = ? AND forms.pos = ?", (lemma, pos), ): lemmas_data.append(data) @@ -499,7 +547,9 @@ def query_gloss_without_pos( return [data] for data in self.lemmas_conn.execute( # type: ignore sql - + "JOIN forms ON senses.lemma_id = forms.lemma_id AND senses.pos = forms.pos WHERE form = ? AND enabled = 1 LIMIT 1", + + "JOIN forms " + + "ON senses.lemma_id = forms.lemma_id AND senses.pos = forms.pos " + + "WHERE form = ? AND enabled = 1 LIMIT 1", (lemma,), ): return [data] diff --git a/error_dialogs.py b/error_dialogs.py index 5089db6..f6d9706 100644 --- a/error_dialogs.py +++ b/error_dialogs.py @@ -25,7 +25,8 @@ def job_failed(job: Any, parent: Any = None) -> bool: error_dialog( "We want... a shrubbery!", _( - "Can't find Python. Please read the document of how to install Python." + "Can't find Python. Please read the document " + "of how to install Python." ).format(INSTALL_PYTHON_DOC), job.details, parent, @@ -34,7 +35,8 @@ def job_failed(job: Any, parent: Any = None) -> bool: error_dialog( "Outdated Python", _( - "Your Python version is too old, please update to a newer version and read the document for more information." + "Your Python version is too old, please update to a newer version " + "and read the document for more information." ).format(INSTALL_PYTHON_DOC), job.details, parent, @@ -47,7 +49,9 @@ def job_failed(job: Any, parent: Any = None) -> bool: error_dialog( "Joint MOBI", _( - "Please use KindleUnpack's '-s' option to split the book." + "Please use " + "" + "KindleUnpack's '-s' option to split the book." ), job.details, parent, @@ -56,7 +60,9 @@ def job_failed(job: Any, parent: Any = None) -> bool: error_dialog( "Welcome to DLL Hell", _( - "Install Visual C++ 2019 Redistributable" + "Install " + "Visual C++ 2019 Redistributable" ), job.datails, parent, @@ -71,10 +77,13 @@ def subprocess_error(job: Any, parent: Any) -> None: exception = job.exception.stderr.decode("utf-8", "ignore") if "No module named pip" in exception: error_dialog( - "Hello, my name is Philip, but everyone calls me Pip, because they hate me.", + "Hello, my name is Philip, but everyone calls me Pip, " + "because they hate me.", _( - """

Please read the friendly of how to install pip.

-

If you still have this error, make sure you installed calibre with the binary install command but not from Flathub or Snap Store.

""" + "

Please read the friendly of how to install pip." + "

If you still have this error, make sure you installed calibre " + "with the binary " + "install command but not from Flathub or Snap Store.

" ).format(INSTALL_PYTHON_DOC), job.details + exception, parent, @@ -85,7 +94,8 @@ def subprocess_error(job: Any, parent: Any) -> None: error_dialog( _("Can't find CUDA"), _( - "'Run spaCy with GPU' feature requires CUDA" + "'Run spaCy with GPU' feature requires " + "CUDA" ), job.details + exception, parent, @@ -108,16 +118,20 @@ def module_not_found_error(error: str, parent: Any) -> None: def check_network_error(error: str, parent: Any) -> None: - CALIBRE_PROXY_FAQ = "https://manual.calibre-ebook.com/faq.html#how-do-i-get-calibre-to-use-my-http-proxy" + CALIBRE_PROXY_FAQ = ( + "https://manual.calibre-ebook.com/faq.html" + "#how-do-i-get-calibre-to-use-my-http-proxy" + ) if "check_hostname requires server_hostname" in error or "SSLError" in error: error_dialog( "Cyberspace is not a place beyond the rule of law", _( - """

Read calibre FAQ first then check your proxy environment variables, they should be set by these commands:

-

$ export HTTP_PROXY='http://host:port'

-

$ export HTTPS_PROXY='http://host:port'

-

If you're allergic to terminal, close your proxy and use a VPN.

""" + "

Read calibre FAQ first then check your proxy " + "environment variables, they should be set by these commands:

" + "

$ export HTTP_PROXY='http://host:port'

" + "

$ export HTTPS_PROXY='http://host:port'

" + "

If you're allergic to terminal, close your proxy and use a VPN.

" ).format(CALIBRE_PROXY_FAQ), error, parent, @@ -126,7 +140,8 @@ def check_network_error(error: str, parent: Any) -> None: error_dialog( "It was a pleasure to burn", _( - "Is GitHub/Wikipedia/Fandom blocked by your ISP? You might need to bypass Internet censorship. Please read calibre FAQ." + "Is GitHub/Wikipedia/Fandom blocked by your ISP? You might need to " + "bypass Internet censorship. Please read calibre FAQ." ).format(CALIBRE_PROXY_FAQ), error, parent, @@ -135,7 +150,8 @@ def check_network_error(error: str, parent: Any) -> None: error_dialog( "Tonnerre de Brest!", _( - 'An error occurred, please copy error message then report bug at GitHub.' + "An error occurred, please copy error message then report bug at " + 'GitHub.' ).format(GITHUB_URL), error, parent, @@ -163,7 +179,8 @@ def device_not_found_dialog(parent: Any) -> None: warning_dialog( _("Device not found"), _( - "Definition data will be added when Kindle or Android(requires adb) device is connected." + "Definition data will be added when Kindle or Android(requires adb) " + "device is connected." ), parent, ) @@ -173,7 +190,8 @@ def ww_db_not_found_dialog(parent: Any) -> None: warning_dialog( _("Word Wise database not found"), _( - "Can't find Word Wise database on your device, open a Word Wise enabled book to download this file." + "Can't find Word Wise database on your device, open a Word Wise enabled " + "book to download this file." ), parent, ) @@ -183,7 +201,8 @@ def kindle_epub_dialog(parent: Any) -> None: warning_dialog( _("Kindle doesn't support EPUB"), _( - "Kindle doesn't support EPUB format natively, please convert the book format then try again." + "Kindle doesn't support EPUB format natively, please convert the book " + "format then try again." ), parent, ) diff --git a/import_lemmas.py b/import_lemmas.py index 76aef9e..56b87e1 100644 --- a/import_lemmas.py +++ b/import_lemmas.py @@ -62,7 +62,11 @@ def query_vocabulary_builder(lang: str, db_path: Path) -> dict[str, int]: conn = sqlite3.connect(db_path) words = {} for stem, category, lookups in conn.execute( - "SELECT stem, category, count(*) FROM WORDS JOIN LOOKUPS ON LOOKUPS.word_key = WORDS.id WHERE lang = ? GROUP BY stem", + """ + SELECT stem, category, count(*) + FROM WORDS JOIN LOOKUPS ON LOOKUPS.word_key = WORDS.id + WHERE lang = ? GROUP BY stem + """, (lang,), ): words[stem] = lookups_to_difficulty(lookups, category) @@ -146,7 +150,10 @@ def export_lemmas_job( query_sql = f", {prefs['zh_ipa']}" else: query_sql = ", ipa" - query_sql += " FROM senses JOIN lemmas ON senses.lemma_id = lemmas.id WHERE difficulty <= ?" + query_sql += ( + " FROM senses JOIN lemmas ON senses.lemma_id = lemmas.id " + "WHERE difficulty <= ?" + ) if only_enabled: query_sql += " AND enabled = 1" diff --git a/mediawiki.py b/mediawiki.py index 0baf7f6..31eae4c 100644 --- a/mediawiki.py +++ b/mediawiki.py @@ -43,8 +43,11 @@ def init_db(self, plugin_path: Path, lang: str) -> sqlite3.Connection: db_conn = sqlite3.connect(db_path) db_conn.executescript( """ - CREATE TABLE IF NOT EXISTS titles (title TEXT PRIMARY KEY COLLATE NOCASE, desc_id INTEGER); - CREATE TABLE IF NOT EXISTS descriptions (id INTEGER PRIMARY KEY, description TEXT, wikidata_item TEXT); + CREATE TABLE IF NOT EXISTS titles + (title TEXT PRIMARY KEY COLLATE NOCASE, desc_id INTEGER); + + CREATE TABLE IF NOT EXISTS descriptions + (id INTEGER PRIMARY KEY, description TEXT, wikidata_item TEXT); """ ) return db_conn @@ -68,7 +71,8 @@ def close(self): def add_cache(self, title: str, intro: str, wikidata_item: str | None) -> int: desc_id = 0 for (new_desc_id,) in self.db_conn.execute( - "INSERT INTO descriptions (description, wikidata_item) VALUES(?, ?) RETURNING id", + "INSERT INTO descriptions (description, wikidata_item) " + "VALUES(?, ?) RETURNING id", (intro, wikidata_item), ): desc_id = new_desc_id @@ -82,7 +86,11 @@ def has_cache(self, title: str) -> bool: def get_cache(self, title: str) -> WikipediaCache | None: for desc, wikidata_item in self.db_conn.execute( - "SELECT description, wikidata_item FROM titles JOIN descriptions ON titles.desc_id = descriptions.id WHERE title = ?", + """ + SELECT description, wikidata_item + FROM titles JOIN descriptions ON titles.desc_id = descriptions.id + WHERE title = ? + """, (title,), ): return {"intro": desc, "item_id": wikidata_item} @@ -97,7 +105,11 @@ def redirected_titles(self, title: str) -> list[str]: return [ other_title for (other_title,) in self.db_conn.execute( - "SELECT title FROM titles WHERE desc_id = (SELECT desc_id FROM titles WHERE title = ?) AND title != ?", + """ + SELECT title FROM titles + WHERE title != ? AND + desc_id = (SELECT desc_id FROM titles WHERE title = ?) + """, (title, title), ) ] @@ -227,8 +239,11 @@ def init_db(self, plugin_path: Path, fandom_url: str) -> sqlite3.Connection: db_conn = sqlite3.connect(db_path) db_conn.executescript( """ - CREATE TABLE IF NOT EXISTS titles (title TEXT PRIMARY KEY COLLATE NOCASE, desc_id INTEGER); - CREATE TABLE IF NOT EXISTS descriptions (id INTEGER PRIMARY KEY, description TEXT); + CREATE TABLE IF NOT EXISTS titles + (title TEXT PRIMARY KEY COLLATE NOCASE, desc_id INTEGER); + + CREATE TABLE IF NOT EXISTS descriptions + (id INTEGER PRIMARY KEY, description TEXT); """ ) return db_conn @@ -274,7 +289,10 @@ def has_cache(self, title: str) -> bool: def get_cache(self, title: str) -> str | None: for (desc,) in self.db_conn.execute( - "SELECT description FROM titles JOIN descriptions ON titles.desc_id = descriptions.id WHERE title = ?", + """ + SELECT description FROM titles JOIN descriptions + ON titles.desc_id = descriptions.id WHERE title = ? + """, (title,), ): return desc @@ -289,7 +307,11 @@ def redirected_titles(self, title: str) -> list[str]: return [ other_title for (other_title,) in self.db_conn.execute( - "SELECT title FROM titles WHERE desc_id = (SELECT desc_id FROM titles WHERE title = ?) AND title != ?", + """ + SELECT title FROM titles + WHERE title != ? AND + desc_id = (SELECT desc_id FROM titles WHERE title = ?) + """, (title, title), ) ] @@ -334,7 +356,8 @@ def query(self, page: str, from_disambiguation_title: str | None = None) -> None html = etree.HTML(text) # Remove infobox, quote, references, error for e in html.xpath( - "//table | //aside | //dl | //*[contains(@class, 'reference')] | //span[contains(@class, 'error')]" + "//table | //aside | //dl | //*[contains(@class, 'reference')] | " + "//span[contains(@class, 'error')]" ): e.getparent().remove(e) intro = html.xpath("string()").strip() @@ -395,7 +418,10 @@ def init_db(self, db_path: Path) -> None: self.db_conn = sqlite3.connect(db_path) if create_db: self.db_conn.execute( - "CREATE TABLE wikidata (item TEXT PRIMARY KEY, map_filename TEXT, inception TEXT)" + """ + CREATE TABLE wikidata + (item TEXT PRIMARY KEY, map_filename TEXT, inception TEXT) + """ ) def close(self): @@ -467,7 +493,10 @@ def inception_text(inception_str: str) -> str: inception = datetime.fromisoformat(inception_str) # Python 3.11: datetime.now(timezone.utc) - inception years = (datetime.now() - inception).days // 365 - return f"Inception: {inception.strftime('%d %B %Y').lstrip('0')}({years} years ago)" + return ( + f"Inception: {inception.strftime('%d %B %Y').lstrip('0')}" + f"({years} years ago)" + ) def query_mediawiki( diff --git a/tests/convert.py b/tests/convert.py index 12514eb..087b351 100644 --- a/tests/convert.py +++ b/tests/convert.py @@ -41,7 +41,8 @@ def convert(path, table_sql_list): table_sql_list = [ ( "glosses", - f"SELECT start, difficulty, sense_id FROM glosses ORDER BY start LIMIT {LIMIT}", + "SELECT start, difficulty, sense_id FROM glosses " + f"ORDER BY start LIMIT {LIMIT}", ), ("count", "SELECT count(*) FROM glosses"), ("metadata", "SELECT * FROM metadata"), diff --git a/utils.py b/utils.py index 75220ed..8dcbde2 100644 --- a/utils.py +++ b/utils.py @@ -7,7 +7,6 @@ import sys import webbrowser import zipfile - from pathlib import Path from typing import Any, TypedDict @@ -58,7 +57,11 @@ def run_subprocess( input=input_str, check=True, capture_output=True, - creationflags=subprocess.CREATE_NO_WINDOW if platform.system() == "Windows" else 0, # type: ignore + creationflags=( + subprocess.CREATE_NO_WINDOW # type: ignore + if platform.system() == "Windows" + else 0 + ), ) @@ -86,44 +89,43 @@ def insert_lib_path(path: str) -> None: def insert_installed_libs(plugin_path: Path) -> None: py_v = ".".join(platform.python_version_tuple()[:2]) - insert_lib_path(str(plugin_path.parent.joinpath(f"worddumb-libs-py{py_v}"))) + insert_lib_path(str(plugin_path.parent / f"worddumb-libs-py{py_v}")) def get_plugin_path() -> Path: from calibre.utils.config import config_dir - return Path(config_dir).joinpath("plugins/WordDumb.zip") + return Path(config_dir) / "plugins/WordDumb.zip" def custom_lemmas_folder(plugin_path: Path) -> Path: - return plugin_path.parent.joinpath("worddumb-lemmas") + return plugin_path.parent / "worddumb-lemmas" def use_kindle_ww_db(lemma_lang: str, prefs: Prefs) -> bool: return ( lemma_lang == "en" - and prefs["kindle_gloss_lang"] - in [ - "en", - "zh", - "zh_cn", - ] + and prefs["kindle_gloss_lang"] in ["en", "zh", "zh_cn"] and not prefs["use_wiktionary_for_kindle"] ) def kindle_db_path(plugin_path: Path, lemma_lang: str, prefs: Prefs) -> Path: if use_kindle_ww_db(lemma_lang, prefs): - return custom_lemmas_folder(plugin_path).joinpath( - f"{lemma_lang}/kindle_en_en_v{PROFICIENCY_MAJOR_VERSION}.db" + return ( + custom_lemmas_folder(plugin_path) + / lemma_lang + / f"kindle_en_en_v{PROFICIENCY_MAJOR_VERSION}.db" ) else: return wiktionary_db_path(plugin_path, lemma_lang, prefs["kindle_gloss_lang"]) def wiktionary_db_path(plugin_path: Path, lemma_lang: str, gloss_lang: str) -> Path: - return custom_lemmas_folder(plugin_path).joinpath( - f"{lemma_lang}/wiktionary_{lemma_lang}_{gloss_lang}_v{PROFICIENCY_MAJOR_VERSION}.db" + return ( + custom_lemmas_folder(plugin_path) + / lemma_lang + / f"wiktionary_{lemma_lang}_{gloss_lang}_v{PROFICIENCY_MAJOR_VERSION}.db" ) @@ -139,9 +141,10 @@ def get_klld_path(plugin_path: Path) -> Path | None: def get_wiktionary_klld_path( plugin_path: Path, lemma_lang: str, gloss_lang: str ) -> Path: - custom_folder = custom_lemmas_folder(plugin_path) - return custom_folder.joinpath( - f"{lemma_lang}/kll.{lemma_lang}.{gloss_lang}_v{PROFICIENCY_MAJOR_VERSION}.klld" + return ( + custom_lemmas_folder(plugin_path) + / lemma_lang + / f"kll.{lemma_lang}.{gloss_lang}_v{PROFICIENCY_MAJOR_VERSION}.klld" )