From f7523ddb887e226c41105f0e96d8ff2f2c955305 Mon Sep 17 00:00:00 2001 From: Elliott Sales de Andrade Date: Mon, 26 Aug 2024 00:35:50 -0400 Subject: [PATCH 1/2] Remove redundant definition of _has_webpage_extension --- gramps/plugins/webreport/calendar.py | 13 ++----------- gramps/plugins/webreport/common.py | 4 ++-- gramps/plugins/webreport/webcal.py | 6 +++--- 3 files changed, 7 insertions(+), 16 deletions(-) diff --git a/gramps/plugins/webreport/calendar.py b/gramps/plugins/webreport/calendar.py index 0b17cfdf621..ea725224a70 100644 --- a/gramps/plugins/webreport/calendar.py +++ b/gramps/plugins/webreport/calendar.py @@ -57,7 +57,7 @@ import gramps.plugins.lib.libholiday as libholiday from gramps.plugins.webreport.basepage import BasePage -from gramps.plugins.webreport.common import do_we_have_holidays, _WEB_EXT +from gramps.plugins.webreport.common import do_we_have_holidays, _has_webpage_extension from gramps.plugins.lib.libhtml import Html # , xml_lang from gramps.gui.pluginmanager import GuiPluginManager @@ -281,7 +281,7 @@ def month_navigation(self, nr_up, year, currentsection): url = url_fname add_subdirs = False if not (url.startswith("http:") or url.startswith("/")): - add_subdirs = not any(url.endswith(ext) for ext in _WEB_EXT) + add_subdirs = not _has_webpage_extension(url) # whether to add subdirs or not??? if add_subdirs: @@ -1348,15 +1348,6 @@ def get_first_day_of_month(year, month): return current_date, current_ord, monthinfo -def _has_webpage_extension(url): - """ - determine if a filename has an extension or not... - - url = filename to be checked - """ - return any(url.endswith(ext) for ext in _WEB_EXT) - - def get_day_list(event_date, holiday_list, bday_anniv_list, rlocale=glocale): """ Will fill day_list and return it to its caller: calendar_build() diff --git a/gramps/plugins/webreport/common.py b/gramps/plugins/webreport/common.py index e5eac1c33ab..df8e233b5da 100644 --- a/gramps/plugins/webreport/common.py +++ b/gramps/plugins/webreport/common.py @@ -78,7 +78,7 @@ # define clear blank line for proper styling FULLCLEAR = Html("div", class_="fullclear", inline=True) # define all possible web page filename extensions -_WEB_EXT = [".html", ".htm", ".shtml", ".php", ".cgi"] +_WEB_EXT = (".html", ".htm", ".shtml", ".php", ".cgi") # used to select secured web site or not HTTP = "http://" HTTPS = "https://" @@ -708,7 +708,7 @@ def _has_webpage_extension(url): @param: url -- filename to be checked """ - return any(url.endswith(ext) for ext in _WEB_EXT) + return url.endswith(_WEB_EXT) def add_birthdate(dbase, ppl_handle_list, rlocale): diff --git a/gramps/plugins/webreport/webcal.py b/gramps/plugins/webreport/webcal.py index d9170e5cd06..a1d7eccdcaa 100644 --- a/gramps/plugins/webreport/webcal.py +++ b/gramps/plugins/webreport/webcal.py @@ -98,7 +98,7 @@ FULLCLEAR = Html("div", class_="fullclear", inline=True) # Web page filename extensions -_WEB_EXT = [".html", ".htm", ".shtml", ".php", ".php3", ".cgi"] +_WEB_EXT = (".html", ".htm", ".shtml", ".php", ".php3", ".cgi") # Calendar stylesheet names _CALENDARSCREEN = "calendar-screen.css" @@ -710,7 +710,7 @@ def month_navigation(self, nr_up, year, currentsection): url = url_fname add_subdirs = False if not (url.startswith("http:") or url.startswith("/")): - add_subdirs = not any(url.endswith(ext) for ext in _WEB_EXT) + add_subdirs = not _has_webpage_extension(url) # whether to add subdirs or not??? if add_subdirs: @@ -2238,7 +2238,7 @@ def _has_webpage_extension(url): url = filename to be checked """ - return any(url.endswith(ext) for ext in _WEB_EXT) + return url.endswith(_WEB_EXT) def get_day_list(event_date, holiday_list, bday_anniv_list, rlocale=glocale): From c3c30f5ffc9cf5d120e374bf159427d0a15535aa Mon Sep 17 00:00:00 2001 From: Elliott Sales de Andrade Date: Mon, 26 Aug 2024 00:42:23 -0400 Subject: [PATCH 2/2] Simplify multiple calls to str.startswith / str.endswith These methods have supported tuples to mean "any of the prefixes / suffixes" since Python 2.5, which can greatly simplify a bunch of `or`'d conditions. --- gramps/gen/plug/utils.py | 11 +++-------- gramps/gen/utils/grampslocale.py | 2 +- gramps/gui/plug/_windows.py | 9 ++------- gramps/plugins/lib/libgedcom.py | 8 +++----- gramps/plugins/webreport/basepage.py | 6 ++---- gramps/plugins/webreport/calendar.py | 2 +- gramps/plugins/webreport/webcal.py | 2 +- gramps/test/test_util.py | 2 +- po/update_po.py | 10 +++++----- 9 files changed, 19 insertions(+), 33 deletions(-) diff --git a/gramps/gen/plug/utils.py b/gramps/gen/plug/utils.py index 96d638d76a6..e8a5a7bb3f1 100644 --- a/gramps/gen/plug/utils.py +++ b/gramps/gen/plug/utils.py @@ -333,12 +333,7 @@ def load_addon_file(path, callback=None): """ import tarfile - if ( - path.startswith("http://") - or path.startswith("https://") - or path.startswith("ftp://") - or path.startswith("file://") - ): + if path.startswith(("http://", "https://", "ftp://", "file://")): try: fptr = urlopen_maybe_no_check_cert(path) except: @@ -361,9 +356,9 @@ def load_addon_file(path, callback=None): return False fptr.close() # file_obj is either Zipfile or TarFile - if path.endswith(".zip") or path.endswith(".ZIP"): + if path.endswith((".zip", ".ZIP")): file_obj = Zipfile(buffer) - elif path.endswith(".tar.gz") or path.endswith(".tgz"): + elif path.endswith((".tar.gz", ".tgz")): try: file_obj = tarfile.open(None, fileobj=buffer) except: diff --git a/gramps/gen/utils/grampslocale.py b/gramps/gen/utils/grampslocale.py index 6ce2ffc90de..cc9e7776408 100644 --- a/gramps/gen/utils/grampslocale.py +++ b/gramps/gen/utils/grampslocale.py @@ -526,7 +526,7 @@ def _get_translation(self, domain=None, localedir=None, languages=None): translator.lang = lang return translator - if lang.startswith("en") or lang.startswith("C"): + if lang.startswith(("en", "C")): translator = GrampsNullTranslations() translator.lang = "en" return translator diff --git a/gramps/gui/plug/_windows.py b/gramps/gui/plug/_windows.py index 5b88b6a671e..3be96dd3364 100644 --- a/gramps/gui/plug/_windows.py +++ b/gramps/gui/plug/_windows.py @@ -1254,7 +1254,7 @@ def __refresh_addon_list(self, obj): pm.set_pass(total=len(lines), header=_("Reading gramps-project.org...")) for line in lines: pm.step() - if line.startswith("|-") or line.startswith("|}"): + if line.startswith(("|-", "|}")): if row != []: rows.append(row) state = "row" @@ -1308,12 +1308,7 @@ def __refresh_addon_list(self, obj): url = download[1:-1] if " " in url: url, text = url.split(" ", 1) - if ( - url.endswith(".zip") - or url.endswith(".ZIP") - or url.endswith(".tar.gz") - or url.endswith(".tgz") - ): + if url.endswith((".zip", ".ZIP", ".tar.gz", ".tgz")): # Then this is ok: self.addon_model.append( row=[ diff --git a/gramps/plugins/lib/libgedcom.py b/gramps/plugins/lib/libgedcom.py index 5bead8826db..fe2f9edddde 100644 --- a/gramps/plugins/lib/libgedcom.py +++ b/gramps/plugins/lib/libgedcom.py @@ -1006,10 +1006,8 @@ def __readahead(self): line_value = line[2].lstrip() # Ignore meaningless @IDENT@ on CONT or CONC line # as noted at http://www.tamurajones.net/IdentCONT.xhtml - if line_value.lstrip().startswith( - "CONT " - ) or line_value.lstrip().startswith("CONC "): - line = line_value.lstrip().partition(" ") + if line_value.startswith(("CONT ", "CONC ")): + line = line_value.partition(" ") tag = line[0] line_value = line[2] else: @@ -4009,7 +4007,7 @@ def __parse_record(self): self.__check_msgs(_("Top Level"), state, None) elif key in ("SOUR", "SOURCE"): self.__parse_source(line.token_text, 1) - elif line.data.startswith("SOUR ") or line.data.startswith("SOURCE "): + elif line.data.startswith(("SOUR ", "SOURCE ")): # A source formatted in a single line, for example: # 0 @S62@ SOUR This is the title of the source source = self.__find_or_create_source(self.sid_map[line.data]) diff --git a/gramps/plugins/webreport/basepage.py b/gramps/plugins/webreport/basepage.py index 2f9c2f02c9a..e68f1ceb5a0 100644 --- a/gramps/plugins/webreport/basepage.py +++ b/gramps/plugins/webreport/basepage.py @@ -2746,15 +2746,13 @@ def display_url_list(self, urllist=None): # Web Site address elif _type == UrlType.WEB_HOME: - if not ( - uri.startswith("http://") or uri.startswith("https://") - ): + if not uri.startswith(("http://", "https://")): url = self.secure_mode uri = url + "%(website)s" % {"website": uri} # FTP server address elif _type == UrlType.WEB_FTP: - if not (uri.startswith("ftp://") or uri.startswith("ftps://")): + if not uri.startswith(("ftp://", "ftps://")): uri = "ftp://%(ftpsite)s" % {"ftpsite": uri} descr = Html("p", html_escape(descr)) + ( diff --git a/gramps/plugins/webreport/calendar.py b/gramps/plugins/webreport/calendar.py index ea725224a70..587d3358df7 100644 --- a/gramps/plugins/webreport/calendar.py +++ b/gramps/plugins/webreport/calendar.py @@ -280,7 +280,7 @@ def month_navigation(self, nr_up, year, currentsection): url_fname = url_fname.lower() url = url_fname add_subdirs = False - if not (url.startswith("http:") or url.startswith("/")): + if not url.startswith(("http:", "/")): add_subdirs = not _has_webpage_extension(url) # whether to add subdirs or not??? diff --git a/gramps/plugins/webreport/webcal.py b/gramps/plugins/webreport/webcal.py index a1d7eccdcaa..868df04d146 100644 --- a/gramps/plugins/webreport/webcal.py +++ b/gramps/plugins/webreport/webcal.py @@ -709,7 +709,7 @@ def month_navigation(self, nr_up, year, currentsection): url_fname = url_fname.lower() url = url_fname add_subdirs = False - if not (url.startswith("http:") or url.startswith("/")): + if not url.startswith(("http:", "/")): add_subdirs = not _has_webpage_extension(url) # whether to add subdirs or not??? diff --git a/gramps/test/test_util.py b/gramps/test/test_util.py index 09349f38a16..b79fd60ade5 100644 --- a/gramps/test/test_util.py +++ b/gramps/test/test_util.py @@ -174,7 +174,7 @@ def delete_tree(dir): sdir = os.path.abspath(dir) here = _caller_dir() + os.path.sep tmp = tempfile.gettempdir() + os.path.sep - if not (sdir.startswith(here) or sdir.startswith(tmp)): + if not sdir.startswith((here, tmp)): raise TestError("%r is not a subdir of here (%r) or %r" % (dir, here, tmp)) shutil.rmtree(sdir) diff --git a/po/update_po.py b/po/update_po.py index a17eda5573a..5b859ea40d6 100644 --- a/po/update_po.py +++ b/po/update_po.py @@ -312,7 +312,7 @@ def create_filesfile(): for filename in os.listdir(dirpath): name = os.path.split(filename)[1] - if name.endswith(".py") or name.endswith(".glade"): + if name.endswith((".py", ".glade")): full_filename = os.path.join(dirpath, filename) # Skip the file if in POTFILES.skip if full_filename[lentopdir:] in notinfiles: @@ -430,13 +430,13 @@ def xml_fragments(): tokens = tokenize(fp.readline) in_string = False for _token, _text, _start, _end, _line in tokens: - if _text.startswith('"""') or _text.startswith("'''"): + if _text.startswith(('"""', "'''")): _text = _text[3:] - elif _text.startswith('"') or _text.startswith("'"): + elif _text.startswith(('"', "'")): _text = _text[1:] - if _text.endswith('"""') or _text.endswith("'''"): + if _text.endswith(('"""', "'''")): _text = _text[:-3] - elif _text.endswith('"') or _text.endswith("'"): + elif _text.endswith(('"', "'")): _text = _text[:-1] if _token == STRING and not in_string: in_string = True