1
0
forked from 0ad/0ad

Use PEP 8 naming conventions for i18n tools

This commit is contained in:
Dunedan 2024-09-03 13:50:16 +02:00
parent 80f808df4a
commit 4a049c5f3a
Signed by untrusted user: Dunedan
GPG Key ID: 885B16854284E0B2
14 changed files with 341 additions and 335 deletions

View File

@ -133,16 +133,16 @@ pipeline {
stage("Update translations") { stage("Update translations") {
steps { steps {
ws("workspace/nightly-svn") { ws("workspace/nightly-svn") {
bat "cd source\\tools\\i18n && python updateTemplates.py" bat "cd source\\tools\\i18n && python update_templates.py"
withCredentials([string(credentialsId: 'TX_TOKEN', variable: 'TX_TOKEN')]) { withCredentials([string(credentialsId: 'TX_TOKEN', variable: 'TX_TOKEN')]) {
bat "cd source\\tools\\i18n && python pullTranslations.py" bat "cd source\\tools\\i18n && python pull_translations.py"
} }
bat "cd source\\tools\\i18n && python generateDebugTranslation.py --long" bat "cd source\\tools\\i18n && python generate_debug_translation.py --long"
bat "cd source\\tools\\i18n && python cleanTranslationFiles.py" bat "cd source\\tools\\i18n && python clean_translation_files.py"
script { if (!params.NEW_REPO) { script { if (!params.NEW_REPO) {
bat "python source\\tools\\i18n\\checkDiff.py --verbose" bat "python source\\tools\\i18n\\check_diff.py --verbose"
}} }}
bat "cd source\\tools\\i18n && python creditTranslators.py" bat "cd source\\tools\\i18n && python credit_translators.py"
} }
} }
} }

View File

@ -23,12 +23,12 @@ import os
import subprocess import subprocess
from typing import List from typing import List
from i18n_helper import projectRootDirectory from i18n_helper import PROJECT_ROOT_DIRECTORY
def get_diff(): def get_diff():
"""Return a diff using svn diff.""" """Return a diff using svn diff."""
os.chdir(projectRootDirectory) os.chdir(PROJECT_ROOT_DIRECTORY)
diff_process = subprocess.run(["svn", "diff", "binaries"], capture_output=True, check=False) diff_process = subprocess.run(["svn", "diff", "binaries"], capture_output=True, check=False)
if diff_process.returncode != 0: if diff_process.returncode != 0:

View File

@ -21,9 +21,9 @@ import os
import re import re
import sys import sys
from i18n_helper import l10nFolderName, projectRootDirectory from i18n_helper import L10N_FOLDER_NAME, PROJECT_ROOT_DIRECTORY
from i18n_helper.catalog import Catalog from i18n_helper.catalog import Catalog
from i18n_helper.globber import getCatalogs from i18n_helper.globber import get_catalogs
VERBOSE = 0 VERBOSE = 0
@ -36,76 +36,76 @@ class MessageChecker:
self.regex = re.compile(regex, re.IGNORECASE) self.regex = re.compile(regex, re.IGNORECASE)
self.human_name = human_name self.human_name = human_name
def check(self, inputFilePath, templateMessage, translatedCatalogs): def check(self, input_file_path, template_message, translated_catalogs):
patterns = set( patterns = set(
self.regex.findall( self.regex.findall(
templateMessage.id[0] if templateMessage.pluralizable else templateMessage.id template_message.id[0] if template_message.pluralizable else template_message.id
) )
) )
# As a sanity check, verify that the template message is coherent. # As a sanity check, verify that the template message is coherent.
# Note that these tend to be false positives. # Note that these tend to be false positives.
# TODO: the pssible tags are usually comments, we ought be able to find them. # TODO: the pssible tags are usually comments, we ought be able to find them.
if templateMessage.pluralizable: if template_message.pluralizable:
pluralUrls = set(self.regex.findall(templateMessage.id[1])) plural_urls = set(self.regex.findall(template_message.id[1]))
if pluralUrls.difference(patterns): if plural_urls.difference(patterns):
print( print(
f"{inputFilePath} - Different {self.human_name} in " f"{input_file_path} - Different {self.human_name} in "
f"singular and plural source strings " f"singular and plural source strings "
f"for '{templateMessage}' in '{inputFilePath}'" f"for '{template_message}' in '{input_file_path}'"
) )
for translationCatalog in translatedCatalogs: for translation_catalog in translated_catalogs:
translationMessage = translationCatalog.get( translation_message = translation_catalog.get(
templateMessage.id, templateMessage.context template_message.id, template_message.context
) )
if not translationMessage: if not translation_message:
continue continue
translatedPatterns = set( translated_patterns = set(
self.regex.findall( self.regex.findall(
translationMessage.string[0] translation_message.string[0]
if translationMessage.pluralizable if translation_message.pluralizable
else translationMessage.string else translation_message.string
) )
) )
unknown_patterns = translatedPatterns.difference(patterns) unknown_patterns = translated_patterns.difference(patterns)
if unknown_patterns: if unknown_patterns:
print( print(
f'{inputFilePath} - {translationCatalog.locale}: ' f'{input_file_path} - {translation_catalog.locale}: '
f'Found unknown {self.human_name} ' f'Found unknown {self.human_name} '
f'{", ".join(["`" + x + "`" for x in unknown_patterns])} ' f'{", ".join(["`" + x + "`" for x in unknown_patterns])} '
f'in the translation which do not match any of the URLs ' f'in the translation which do not match any of the URLs '
f'in the template: {", ".join(["`" + x + "`" for x in patterns])}' f'in the template: {", ".join(["`" + x + "`" for x in patterns])}'
) )
if templateMessage.pluralizable and translationMessage.pluralizable: if template_message.pluralizable and translation_message.pluralizable:
for indx, val in enumerate(translationMessage.string): for indx, val in enumerate(translation_message.string):
if indx == 0: if indx == 0:
continue continue
translatedPatternsMulti = set(self.regex.findall(val)) translated_patterns_multi = set(self.regex.findall(val))
unknown_patterns_multi = translatedPatternsMulti.difference(pluralUrls) unknown_patterns_multi = translated_patterns_multi.difference(plural_urls)
if unknown_patterns_multi: if unknown_patterns_multi:
print( print(
f'{inputFilePath} - {translationCatalog.locale}: ' f'{input_file_path} - {translation_catalog.locale}: '
f'Found unknown {self.human_name} ' f'Found unknown {self.human_name} '
f'{", ".join(["`" + x + "`" for x in unknown_patterns_multi])} ' f'{", ".join(["`" + x + "`" for x in unknown_patterns_multi])} '
f'in the pluralised translation which do not ' f'in the pluralised translation which do not '
f'match any of the URLs in the template: ' f'match any of the URLs in the template: '
f'{", ".join(["`" + x + "`" for x in pluralUrls])}' f'{", ".join(["`" + x + "`" for x in plural_urls])}'
) )
def check_translations(inputFilePath): def check_translations(input_file_path):
if VERBOSE: if VERBOSE:
print(f"Checking {inputFilePath}") print(f"Checking {input_file_path}")
templateCatalog = Catalog.readFrom(inputFilePath) template_catalog = Catalog.read_from(input_file_path)
# If language codes were specified on the command line, filter by those. # If language codes were specified on the command line, filter by those.
filters = sys.argv[1:] filters = sys.argv[1:]
# Load existing translation catalogs. # Load existing translation catalogs.
existingTranslationCatalogs = getCatalogs(inputFilePath, filters) existing_translation_catalogs = get_catalogs(input_file_path, filters)
spam = MessageChecker("url", r"https?://(?:[a-z0-9-_$@./&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+") spam = MessageChecker("url", r"https?://(?:[a-z0-9-_$@./&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+")
sprintf = MessageChecker("sprintf", r"%\([^)]+\)s") sprintf = MessageChecker("sprintf", r"%\([^)]+\)s")
@ -115,37 +115,37 @@ def check_translations(inputFilePath):
# Loop through all messages in the .POT catalog for URLs. # Loop through all messages in the .POT catalog for URLs.
# For each, check for the corresponding key in the .PO catalogs. # For each, check for the corresponding key in the .PO catalogs.
# If found, check that URLS in the .PO keys are the same as those in the .POT key. # If found, check that URLS in the .PO keys are the same as those in the .POT key.
for templateMessage in templateCatalog: for template_message in template_catalog:
spam.check(inputFilePath, templateMessage, existingTranslationCatalogs) spam.check(input_file_path, template_message, existing_translation_catalogs)
sprintf.check(inputFilePath, templateMessage, existingTranslationCatalogs) sprintf.check(input_file_path, template_message, existing_translation_catalogs)
tags.check(inputFilePath, templateMessage, existingTranslationCatalogs) tags.check(input_file_path, template_message, existing_translation_catalogs)
if VERBOSE: if VERBOSE:
print(f"Done checking {inputFilePath}") print(f"Done checking {input_file_path}")
def main(): def main():
print( print(
"\n\tWARNING: Remember to regenerate the POT files with “updateTemplates.py” " "\n\tWARNING: Remember to regenerate the POT files with “update_templates.py” "
"before you run this script.\n\tPOT files are not in the repository.\n" "before you run this script.\n\tPOT files are not in the repository.\n"
) )
foundPots = 0 found_pots = 0
for root, _folders, filenames in os.walk(projectRootDirectory): for root, _folders, filenames in os.walk(PROJECT_ROOT_DIRECTORY):
for filename in filenames: for filename in filenames:
if ( if (
len(filename) > 4 len(filename) > 4
and filename[-4:] == ".pot" and filename[-4:] == ".pot"
and os.path.basename(root) == l10nFolderName and os.path.basename(root) == L10N_FOLDER_NAME
): ):
foundPots += 1 found_pots += 1
multiprocessing.Process( multiprocessing.Process(
target=check_translations, args=(os.path.join(root, filename),) target=check_translations, args=(os.path.join(root, filename),)
).start() ).start()
if foundPots == 0: if found_pots == 0:
print( print(
"This script did not work because no '.pot' files were found. " "This script did not work because no '.pot' files were found. "
"Please run 'updateTemplates.py' to generate the '.pot' files, " "Please run 'update_templates.py' to generate the '.pot' files, "
"and run 'pullTranslations.py' to pull the latest translations from Transifex. " "and run 'pull_translations.py' to pull the latest translations from Transifex. "
"Then you can run this script to check for spam in translations." "Then you can run this script to check for spam in translations."
) )

View File

@ -33,19 +33,19 @@ import os
import re import re
import sys import sys
from i18n_helper import l10nFolderName, projectRootDirectory, transifexClientFolder from i18n_helper import L10N_FOLDER_NAME, PROJECT_ROOT_DIRECTORY, TRANSIFEX_CLIENT_FOLDER
def main(): def main():
translatorMatch = re.compile(r"^(#\s+[^,<]*)\s+<.*>(.*)") translator_match = re.compile(r"^(#\s+[^,<]*)\s+<.*>(.*)")
lastTranslatorMatch = re.compile(r"^(\"Last-Translator:[^,<]*)\s+<.*>(.*)") last_translator_match = re.compile(r"^(\"Last-Translator:[^,<]*)\s+<.*>(.*)")
for root, folders, _ in os.walk(projectRootDirectory): for root, folders, _ in os.walk(PROJECT_ROOT_DIRECTORY):
for folder in folders: for folder in folders:
if folder != l10nFolderName: if folder != L10N_FOLDER_NAME:
continue continue
if not os.path.exists(os.path.join(root, folder, transifexClientFolder)): if not os.path.exists(os.path.join(root, folder, TRANSIFEX_CLIENT_FOLDER)):
continue continue
path = os.path.join(root, folder, "*.po") path = os.path.join(root, folder, "*.po")
@ -59,16 +59,16 @@ def main():
if reached: if reached:
if line == "# \n": if line == "# \n":
line = "" line = ""
m = translatorMatch.match(line) m = translator_match.match(line)
if m: if m:
if m.group(1) in usernames: if m.group(1) in usernames:
line = "" line = ""
else: else:
line = m.group(1) + m.group(2) + "\n" line = m.group(1) + m.group(2) + "\n"
usernames.append(m.group(1)) usernames.append(m.group(1))
m2 = lastTranslatorMatch.match(line) m2 = last_translator_match.match(line)
if m2: if m2:
line = re.sub(lastTranslatorMatch, r"\1\2", line) line = re.sub(last_translator_match, r"\1\2", line)
elif line.strip() == "# Translators:": elif line.strip() == "# Translators:":
reached = True reached = True
sys.stdout.write(line) sys.stdout.write(line)

View File

@ -27,7 +27,7 @@ automatic deletion. This has not been needed so far. A possibility would be to a
optional boolean entry to the dictionary containing the name. optional boolean entry to the dictionary containing the name.
Translatable strings will be extracted from the generated file, so this should be run Translatable strings will be extracted from the generated file, so this should be run
once before updateTemplates.py. once before update_templates.py.
""" """
import json import json
@ -37,20 +37,20 @@ from collections import defaultdict
from pathlib import Path from pathlib import Path
from babel import Locale, UnknownLocaleError from babel import Locale, UnknownLocaleError
from i18n_helper import l10nFolderName, projectRootDirectory, transifexClientFolder from i18n_helper import L10N_FOLDER_NAME, PROJECT_ROOT_DIRECTORY, TRANSIFEX_CLIENT_FOLDER
poLocations = [] po_locations = []
for root, folders, _filenames in os.walk(projectRootDirectory): for root, folders, _filenames in os.walk(PROJECT_ROOT_DIRECTORY):
for folder in folders: for folder in folders:
if folder != l10nFolderName: if folder != L10N_FOLDER_NAME:
continue continue
if os.path.exists(os.path.join(root, folder, transifexClientFolder)): if os.path.exists(os.path.join(root, folder, TRANSIFEX_CLIENT_FOLDER)):
poLocations.append(os.path.join(root, folder)) po_locations.append(os.path.join(root, folder))
creditsLocation = os.path.join( credits_location = os.path.join(
projectRootDirectory, PROJECT_ROOT_DIRECTORY,
"binaries", "binaries",
"data", "data",
"mods", "mods",
@ -62,19 +62,19 @@ creditsLocation = os.path.join(
) )
# This dictionary will hold creditors lists for each language, indexed by code # This dictionary will hold creditors lists for each language, indexed by code
langsLists = defaultdict(list) langs_lists = defaultdict(list)
# Create the new JSON data # Create the new JSON data
newJSONData = {"Title": "Translators", "Content": []} new_json_data = {"Title": "Translators", "Content": []}
# Now go through the list of languages and search the .po files for people # Now go through the list of languages and search the .po files for people
# Prepare some regexes # Prepare some regexes
translatorMatch = re.compile(r"^#\s+([^,<]*)") translator_match = re.compile(r"^#\s+([^,<]*)")
deletedUsernameMatch = re.compile(r"[0-9a-f]{32}(_[0-9a-f]{7})?") deleted_username_match = re.compile(r"[0-9a-f]{32}(_[0-9a-f]{7})?")
# Search # Search
for location in poLocations: for location in po_locations:
files = Path(location).glob("*.po") files = Path(location).glob("*.po")
for file in files: for file in files:
@ -84,46 +84,46 @@ for location in poLocations:
if lang in ("debug", "long"): if lang in ("debug", "long"):
continue continue
with file.open(encoding="utf-8") as poFile: with file.open(encoding="utf-8") as po_file:
reached = False reached = False
for line in poFile: for line in po_file:
if reached: if reached:
m = translatorMatch.match(line) m = translator_match.match(line)
if not m: if not m:
break break
username = m.group(1) username = m.group(1)
if not deletedUsernameMatch.fullmatch(username): if not deleted_username_match.fullmatch(username):
langsLists[lang].append(username) langs_lists[lang].append(username)
if line.strip() == "# Translators:": if line.strip() == "# Translators:":
reached = True reached = True
# Sort translator names and remove duplicates # Sort translator names and remove duplicates
# Sorting should ignore case, but prefer versions of names starting # Sorting should ignore case, but prefer versions of names starting
# with an upper case letter to have a neat credits list. # with an upper case letter to have a neat credits list.
for lang in langsLists: for lang in langs_lists:
translators = {} translators = {}
for name in sorted(langsLists[lang], reverse=True): for name in sorted(langs_lists[lang], reverse=True):
if name.lower() not in translators or name.istitle(): if name.lower() not in translators or name.istitle():
translators[name.lower()] = name translators[name.lower()] = name
langsLists[lang] = sorted(translators.values(), key=lambda s: s.lower()) langs_lists[lang] = sorted(translators.values(), key=lambda s: s.lower())
# Now insert the new data into the new JSON file # Now insert the new data into the new JSON file
for langCode, langList in sorted(langsLists.items()): for lang_code, lang_list in sorted(langs_lists.items()):
try: try:
lang_name = Locale.parse(langCode).english_name lang_name = Locale.parse(lang_code).english_name
except UnknownLocaleError: except UnknownLocaleError:
lang_name = Locale.parse("en").languages.get(langCode) lang_name = Locale.parse("en").languages.get(lang_code)
if not lang_name: if not lang_name:
raise raise
translators = [{"name": name} for name in langList] translators = [{"name": name} for name in lang_list]
newJSONData["Content"].append({"LangName": lang_name, "List": translators}) new_json_data["Content"].append({"LangName": lang_name, "List": translators})
# Sort languages by their English names # Sort languages by their English names
newJSONData["Content"] = sorted(newJSONData["Content"], key=lambda x: x["LangName"]) new_json_data["Content"] = sorted(new_json_data["Content"], key=lambda x: x["LangName"])
# Save the JSON data to the credits file # Save the JSON data to the credits file
with open(creditsLocation, "w", encoding="utf-8") as creditsFile: with open(credits_location, "w", encoding="utf-8") as credits_file:
json.dump(newJSONData, creditsFile, indent=4) json.dump(new_json_data, credits_file, indent=4)

View File

@ -24,7 +24,7 @@
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs import codecs
import json as jsonParser import json
import os import os
import re import re
import sys import sys
@ -54,8 +54,8 @@ def pathmatch(mask, path):
class Extractor: class Extractor:
def __init__(self, directoryPath, filemasks, options): def __init__(self, directory_path, filemasks, options):
self.directoryPath = directoryPath self.directoryPath = directory_path
self.options = options self.options = options
if isinstance(filemasks, dict): if isinstance(filemasks, dict):
@ -73,8 +73,8 @@ class Extractor:
:rtype: ``iterator`` :rtype: ``iterator``
""" """
empty_string_pattern = re.compile(r"^\s*$") empty_string_pattern = re.compile(r"^\s*$")
directoryAbsolutePath = os.path.abspath(self.directoryPath) directory_absolute_path = os.path.abspath(self.directoryPath)
for root, folders, filenames in os.walk(directoryAbsolutePath): for root, folders, filenames in os.walk(directory_absolute_path):
for subdir in folders: for subdir in folders:
if subdir.startswith((".", "_")): if subdir.startswith((".", "_")):
folders.remove(subdir) folders.remove(subdir)
@ -90,14 +90,14 @@ class Extractor:
else: else:
for filemask in self.includeMasks: for filemask in self.includeMasks:
if pathmatch(filemask, filename): if pathmatch(filemask, filename):
filepath = os.path.join(directoryAbsolutePath, filename) filepath = os.path.join(directory_absolute_path, filename)
for ( for (
message, message,
plural, plural,
context, context,
position, position,
comments, comments,
) in self.extractFromFile(filepath): ) in self.extract_from_file(filepath):
if empty_string_pattern.match(message): if empty_string_pattern.match(message):
continue continue
@ -105,7 +105,7 @@ class Extractor:
filename = "\u2068" + filename + "\u2069" filename = "\u2068" + filename + "\u2069"
yield message, plural, context, (filename, position), comments yield message, plural, context, (filename, position), comments
def extractFromFile(self, filepath): def extract_from_file(self, filepath):
"""Extract messages from a specific file. """Extract messages from a specific file.
:return: An iterator over ``(message, plural, context, position, comments)`` tuples. :return: An iterator over ``(message, plural, context, position, comments)`` tuples.
@ -113,7 +113,7 @@ class Extractor:
""" """
class javascript(Extractor): class JavascriptExtractor(Extractor):
"""Extract messages from JavaScript source code.""" """Extract messages from JavaScript source code."""
empty_msgid_warning = ( empty_msgid_warning = (
@ -121,7 +121,7 @@ class javascript(Extractor):
"returns the header entry with meta information, not the empty string." "returns the header entry with meta information, not the empty string."
) )
def extractJavascriptFromFile(self, fileObject): def extract_javascript_from_file(self, file_object):
from babel.messages.jslexer import tokenize, unquote_string from babel.messages.jslexer import tokenize, unquote_string
funcname = message_lineno = None funcname = message_lineno = None
@ -134,7 +134,7 @@ class javascript(Extractor):
comment_tags = self.options.get("commentTags", []) comment_tags = self.options.get("commentTags", [])
keywords = self.options.get("keywords", {}).keys() keywords = self.options.get("keywords", {}).keys()
for token in tokenize(fileObject.read(), dotted=False): for token in tokenize(file_object.read(), dotted=False):
if token.type == "operator" and ( if token.type == "operator" and (
token.value == "(" or (call_stack != -1 and (token.value in ("[", "{"))) token.value == "(" or (call_stack != -1 and (token.value in ("[", "{")))
): ):
@ -236,9 +236,11 @@ class javascript(Extractor):
last_token = token last_token = token
def extractFromFile(self, filepath): def extract_from_file(self, filepath):
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject: with codecs.open(filepath, "r", encoding="utf-8-sig") as file_object:
for lineno, funcname, messages, comments in self.extractJavascriptFromFile(fileObject): for lineno, funcname, messages, comments in self.extract_javascript_from_file(
file_object
):
spec = self.options.get("keywords", {})[funcname] or (1,) if funcname else (1,) spec = self.options.get("keywords", {})[funcname] or (1,) if funcname else (1,)
if not isinstance(messages, (list, tuple)): if not isinstance(messages, (list, tuple)):
messages = [messages] messages = [messages]
@ -276,7 +278,7 @@ class javascript(Extractor):
if not messages[first_msg_index]: if not messages[first_msg_index]:
# An empty string msgid isn't valid, emit a warning # An empty string msgid isn't valid, emit a warning
where = "%s:%i" % ( where = "%s:%i" % (
hasattr(fileObject, "name") and fileObject.name or "(unknown)", hasattr(file_object, "name") and file_object.name or "(unknown)",
lineno, lineno,
) )
print(self.empty_msgid_warning % where, file=sys.stderr) print(self.empty_msgid_warning % where, file=sys.stderr)
@ -291,54 +293,54 @@ class javascript(Extractor):
yield message, plural, context, lineno, comments yield message, plural, context, lineno, comments
class cpp(javascript): class CppExtractor(JavascriptExtractor):
"""Extract messages from C++ source code.""" """Extract messages from C++ source code."""
class txt(Extractor): class TXTExtractor(Extractor):
"""Extract messages from plain text files.""" """Extract messages from plain text files."""
def extractFromFile(self, filepath): def extract_from_file(self, filepath):
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject: with codecs.open(filepath, "r", encoding="utf-8-sig") as file_object:
for lineno, line in enumerate( for lineno, line in enumerate(
[line.strip("\n\r") for line in fileObject.readlines()], start=1 [line.strip("\n\r") for line in file_object.readlines()], start=1
): ):
if line: if line:
yield line, None, None, lineno, [] yield line, None, None, lineno, []
class json(Extractor): class JsonExtractor(Extractor):
"""Extract messages from JSON files.""" """Extract messages from JSON files."""
def __init__(self, directoryPath=None, filemasks=None, options=None): def __init__(self, directory_path=None, filemasks=None, options=None):
if options is None: if options is None:
options = {} options = {}
if filemasks is None: if filemasks is None:
filemasks = [] filemasks = []
super().__init__(directoryPath, filemasks, options) super().__init__(directory_path, filemasks, options)
self.keywords = self.options.get("keywords", {}) self.keywords = self.options.get("keywords", {})
self.context = self.options.get("context", None) self.context = self.options.get("context", None)
self.comments = self.options.get("comments", []) self.comments = self.options.get("comments", [])
def setOptions(self, options): def set_options(self, options):
self.options = options self.options = options
self.keywords = self.options.get("keywords", {}) self.keywords = self.options.get("keywords", {})
self.context = self.options.get("context", None) self.context = self.options.get("context", None)
self.comments = self.options.get("comments", []) self.comments = self.options.get("comments", [])
def extractFromFile(self, filepath): def extract_from_file(self, filepath):
with codecs.open(filepath, "r", "utf-8") as fileObject: with codecs.open(filepath, "r", "utf-8") as file_object:
for message, context in self.extractFromString(fileObject.read()): for message, context in self.extract_from_string(file_object.read()):
yield message, None, context, None, self.comments yield message, None, context, None, self.comments
def extractFromString(self, string): def extract_from_string(self, string):
jsonDocument = jsonParser.loads(string) json_document = json.loads(string)
if isinstance(jsonDocument, list): if isinstance(json_document, list):
for message, context in self.parseList(jsonDocument): for message, context in self.parse_list(json_document):
if message: # Skip empty strings. if message: # Skip empty strings.
yield message, context yield message, context
elif isinstance(jsonDocument, dict): elif isinstance(json_document, dict):
for message, context in self.parseDictionary(jsonDocument): for message, context in self.parse_dictionary(json_document):
if message: # Skip empty strings. if message: # Skip empty strings.
yield message, context yield message, context
else: else:
@ -347,22 +349,22 @@ class json(Extractor):
"You must extend the JSON extractor to support it." "You must extend the JSON extractor to support it."
) )
def parseList(self, itemsList): def parse_list(self, items_list):
for listItem in itemsList: for list_item in items_list:
if isinstance(listItem, list): if isinstance(list_item, list):
for message, context in self.parseList(listItem): for message, context in self.parse_list(list_item):
yield message, context yield message, context
elif isinstance(listItem, dict): elif isinstance(list_item, dict):
for message, context in self.parseDictionary(listItem): for message, context in self.parse_dictionary(list_item):
yield message, context yield message, context
def parseDictionary(self, dictionary): def parse_dictionary(self, dictionary):
for keyword in dictionary: for keyword in dictionary:
if keyword in self.keywords: if keyword in self.keywords:
if isinstance(dictionary[keyword], str): if isinstance(dictionary[keyword], str):
yield self.extractString(dictionary[keyword], keyword) yield self.extract_string(dictionary[keyword], keyword)
elif isinstance(dictionary[keyword], list): elif isinstance(dictionary[keyword], list):
for message, context in self.extractList(dictionary[keyword], keyword): for message, context in self.extract_list(dictionary[keyword], keyword):
yield message, context yield message, context
elif isinstance(dictionary[keyword], dict): elif isinstance(dictionary[keyword], dict):
extract = None extract = None
@ -370,22 +372,22 @@ class json(Extractor):
"extractFromInnerKeys" in self.keywords[keyword] "extractFromInnerKeys" in self.keywords[keyword]
and self.keywords[keyword]["extractFromInnerKeys"] and self.keywords[keyword]["extractFromInnerKeys"]
): ):
for message, context in self.extractDictionaryInnerKeys( for message, context in self.extract_dictionary_inner_keys(
dictionary[keyword], keyword dictionary[keyword], keyword
): ):
yield message, context yield message, context
else: else:
extract = self.extractDictionary(dictionary[keyword], keyword) extract = self.extract_dictionary(dictionary[keyword], keyword)
if extract: if extract:
yield extract yield extract
elif isinstance(dictionary[keyword], list): elif isinstance(dictionary[keyword], list):
for message, context in self.parseList(dictionary[keyword]): for message, context in self.parse_list(dictionary[keyword]):
yield message, context yield message, context
elif isinstance(dictionary[keyword], dict): elif isinstance(dictionary[keyword], dict):
for message, context in self.parseDictionary(dictionary[keyword]): for message, context in self.parse_dictionary(dictionary[keyword]):
yield message, context yield message, context
def extractString(self, string, keyword): def extract_string(self, string, keyword):
context = None context = None
if "tagAsContext" in self.keywords[keyword]: if "tagAsContext" in self.keywords[keyword]:
context = keyword context = keyword
@ -395,16 +397,16 @@ class json(Extractor):
context = self.context context = self.context
return string, context return string, context
def extractList(self, itemsList, keyword): def extract_list(self, items_list, keyword):
for listItem in itemsList: for list_item in items_list:
if isinstance(listItem, str): if isinstance(list_item, str):
yield self.extractString(listItem, keyword) yield self.extract_string(list_item, keyword)
elif isinstance(listItem, dict): elif isinstance(list_item, dict):
extract = self.extractDictionary(listItem[keyword], keyword) extract = self.extract_dictionary(list_item[keyword], keyword)
if extract: if extract:
yield extract yield extract
def extractDictionary(self, dictionary, keyword): def extract_dictionary(self, dictionary, keyword):
message = dictionary.get("_string", None) message = dictionary.get("_string", None)
if message and isinstance(message, str): if message and isinstance(message, str):
context = None context = None
@ -419,45 +421,47 @@ class json(Extractor):
return message, context return message, context
return None return None
def extractDictionaryInnerKeys(self, dictionary, keyword): def extract_dictionary_inner_keys(self, dictionary, keyword):
for innerKeyword in dictionary: for inner_keyword in dictionary:
if isinstance(dictionary[innerKeyword], str): if isinstance(dictionary[inner_keyword], str):
yield self.extractString(dictionary[innerKeyword], keyword) yield self.extract_string(dictionary[inner_keyword], keyword)
elif isinstance(dictionary[innerKeyword], list): elif isinstance(dictionary[inner_keyword], list):
yield from self.extractList(dictionary[innerKeyword], keyword) yield from self.extract_list(dictionary[inner_keyword], keyword)
elif isinstance(dictionary[innerKeyword], dict): elif isinstance(dictionary[inner_keyword], dict):
extract = self.extractDictionary(dictionary[innerKeyword], keyword) extract = self.extract_dictionary(dictionary[inner_keyword], keyword)
if extract: if extract:
yield extract yield extract
class xml(Extractor): class XmlExtractor(Extractor):
"""Extract messages from XML files.""" """Extract messages from XML files."""
def __init__(self, directoryPath, filemasks, options): def __init__(self, directory_path, filemasks, options):
super().__init__(directoryPath, filemasks, options) super().__init__(directory_path, filemasks, options)
self.keywords = self.options.get("keywords", {}) self.keywords = self.options.get("keywords", {})
self.jsonExtractor = None self.jsonExtractor = None
def getJsonExtractor(self): def get_json_extractor(self):
if not self.jsonExtractor: if not self.jsonExtractor:
self.jsonExtractor = json() self.jsonExtractor = JsonExtractor()
return self.jsonExtractor return self.jsonExtractor
def extractFromFile(self, filepath): def extract_from_file(self, filepath):
from lxml import etree from lxml import etree
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject: with codecs.open(filepath, "r", encoding="utf-8-sig") as file_object:
xmlDocument = etree.parse(fileObject) xml_document = etree.parse(file_object)
for keyword in self.keywords: for keyword in self.keywords:
for element in xmlDocument.iter(keyword): for element in xml_document.iter(keyword):
lineno = element.sourceline lineno = element.sourceline
if element.text is not None: if element.text is not None:
comments = [] comments = []
if "extractJson" in self.keywords[keyword]: if "extractJson" in self.keywords[keyword]:
jsonExtractor = self.getJsonExtractor() json_extractor = self.get_json_extractor()
jsonExtractor.setOptions(self.keywords[keyword]["extractJson"]) json_extractor.set_options(self.keywords[keyword]["extractJson"])
for message, context in jsonExtractor.extractFromString(element.text): for message, context in json_extractor.extract_from_string(
element.text
):
yield message, None, context, lineno, comments yield message, None, context, lineno, comments
else: else:
context = None context = None
@ -474,12 +478,12 @@ class xml(Extractor):
) # Remove tabs, line breaks and unecessary spaces. ) # Remove tabs, line breaks and unecessary spaces.
comments.append(comment) comments.append(comment)
if "splitOnWhitespace" in self.keywords[keyword]: if "splitOnWhitespace" in self.keywords[keyword]:
for splitText in element.text.split(): for split_text in element.text.split():
# split on whitespace is used for token lists, there, a # split on whitespace is used for token lists, there, a
# leading '-' means the token has to be removed, so it's not # leading '-' means the token has to be removed, so it's not
# to be processed here either # to be processed here either
if splitText[0] != "-": if split_text[0] != "-":
yield str(splitText), None, context, lineno, comments yield str(split_text), None, context, lineno, comments
else: else:
yield str(element.text), None, context, lineno, comments yield str(element.text), None, context, lineno, comments
@ -500,14 +504,14 @@ class FakeSectionHeader:
return self.fp.readline() return self.fp.readline()
class ini(Extractor): class IniExtractor(Extractor):
"""Extract messages from INI files.""" """Extract messages from INI files."""
def __init__(self, directoryPath, filemasks, options): def __init__(self, directory_path, filemasks, options):
super().__init__(directoryPath, filemasks, options) super().__init__(directory_path, filemasks, options)
self.keywords = self.options.get("keywords", []) self.keywords = self.options.get("keywords", [])
def extractFromFile(self, filepath): def extract_from_file(self, filepath):
import ConfigParser import ConfigParser
config = ConfigParser.RawConfigParser() config = ConfigParser.RawConfigParser()

View File

@ -21,9 +21,9 @@ import multiprocessing
import os import os
import sys import sys
from i18n_helper import l10nFolderName, projectRootDirectory from i18n_helper import L10N_FOLDER_NAME, PROJECT_ROOT_DIRECTORY
from i18n_helper.catalog import Catalog from i18n_helper.catalog import Catalog
from i18n_helper.globber import getCatalogs from i18n_helper.globber import get_catalogs
DEBUG_PREFIX = "X_X " DEBUG_PREFIX = "X_X "
@ -41,7 +41,7 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
input_file_path = os.path.join(root_path, input_file_name) input_file_path = os.path.join(root_path, input_file_name)
output_file_path = os.path.join(root_path, output_file_name) output_file_path = os.path.join(root_path, output_file_name)
template_catalog = Catalog.readFrom(input_file_path) template_catalog = Catalog.read_from(input_file_path)
# Pretend we write English to get plurals. # Pretend we write English to get plurals.
long_string_catalog = Catalog(locale="en") long_string_catalog = Catalog(locale="en")
@ -55,7 +55,7 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
) )
# Load existing translation catalogs. # Load existing translation catalogs.
existing_translation_catalogs = getCatalogs(input_file_path, languages) existing_translation_catalogs = get_catalogs(input_file_path, languages)
# If any existing translation has more characters than the average expansion, use that instead. # If any existing translation has more characters than the average expansion, use that instead.
for translation_catalog in existing_translation_catalogs: for translation_catalog in existing_translation_catalogs:
@ -100,7 +100,7 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
longest_plural_string, longest_plural_string,
] ]
translation_message = long_string_catalog_message translation_message = long_string_catalog_message
long_string_catalog.writeTo(output_file_path) long_string_catalog.write_to(output_file_path)
def generate_debug(root_path, input_file_name, output_file_name): def generate_debug(root_path, input_file_name, output_file_name):
@ -114,7 +114,7 @@ def generate_debug(root_path, input_file_name, output_file_name):
input_file_path = os.path.join(root_path, input_file_name) input_file_path = os.path.join(root_path, input_file_name)
output_file_path = os.path.join(root_path, output_file_name) output_file_path = os.path.join(root_path, output_file_name)
template_catalog = Catalog.readFrom(input_file_path) template_catalog = Catalog.read_from(input_file_path)
# Pretend we write English to get plurals. # Pretend we write English to get plurals.
out_catalog = Catalog(locale="en") out_catalog = Catalog(locale="en")
@ -134,7 +134,7 @@ def generate_debug(root_path, input_file_name, output_file_name):
auto_comments=message.auto_comments, auto_comments=message.auto_comments,
) )
out_catalog.writeTo(output_file_path) out_catalog.write_to(output_file_path)
def main(): def main():
@ -159,12 +159,12 @@ def main():
sys.exit(0) sys.exit(0)
found_pot_files = 0 found_pot_files = 0
for root, _, filenames in os.walk(projectRootDirectory): for root, _, filenames in os.walk(PROJECT_ROOT_DIRECTORY):
for filename in filenames: for filename in filenames:
if ( if (
len(filename) > 4 len(filename) > 4
and filename[-4:] == ".pot" and filename[-4:] == ".pot"
and os.path.basename(root) == l10nFolderName and os.path.basename(root) == L10N_FOLDER_NAME
): ):
found_pot_files += 1 found_pot_files += 1
if args.debug: if args.debug:
@ -180,8 +180,8 @@ def main():
if found_pot_files == 0: if found_pot_files == 0:
print( print(
"This script did not work because no '.pot' files were found. " "This script did not work because no '.pot' files were found. "
"Please, run 'updateTemplates.py' to generate the '.pot' files, and run " "Please, run 'update_templates.py' to generate the '.pot' files, and run "
"'pullTranslations.py' to pull the latest translations from Transifex. " "'pull_translations.py' to pull the latest translations from Transifex. "
"Then you can run this script to generate '.po' files with obvious debug strings." "Then you can run this script to generate '.po' files with obvious debug strings."
) )

View File

@ -1,9 +1,9 @@
import os import os
l10nFolderName = "l10n" L10N_FOLDER_NAME = "l10n"
transifexClientFolder = ".tx" TRANSIFEX_CLIENT_FOLDER = ".tx"
l10nToolsDirectory = os.path.dirname(os.path.realpath(__file__)) L10N_TOOLS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
projectRootDirectory = os.path.abspath( PROJECT_ROOT_DIRECTORY = os.path.abspath(
os.path.join(l10nToolsDirectory, os.pardir, os.pardir, os.pardir, os.pardir) os.path.join(L10N_TOOLS_DIRECTORY, os.pardir, os.pardir, os.pardir, os.pardir)
) )

View File

@ -44,10 +44,10 @@ class Catalog(BabelCatalog):
return [("Project-Id-Version", self._project), *headers] return [("Project-Id-Version", self._project), *headers]
@staticmethod @staticmethod
def readFrom(file_path, locale=None): def read_from(file_path, locale=None):
with open(file_path, "r+", encoding="utf-8") as fd: with open(file_path, "r+", encoding="utf-8") as fd:
return read_po(fd, locale=locale) return read_po(fd, locale=locale)
def writeTo(self, file_path): def write_to(self, file_path):
with open(file_path, "wb+") as fd: with open(file_path, "wb+") as fd:
return write_po(fileobj=fd, catalog=self, width=90, sort_by_file=True) return write_po(fileobj=fd, catalog=self, width=90, sort_by_file=True)

View File

@ -6,22 +6,22 @@ from typing import List, Optional
from i18n_helper.catalog import Catalog from i18n_helper.catalog import Catalog
def getCatalogs(inputFilePath, filters: Optional[List[str]] = None) -> List[Catalog]: def get_catalogs(input_file_path, filters: Optional[List[str]] = None) -> List[Catalog]:
"""Return a list of "real" catalogs (.po) in the given folder.""" """Return a list of "real" catalogs (.po) in the given folder."""
existingTranslationCatalogs = [] existing_translation_catalogs = []
l10nFolderPath = os.path.dirname(inputFilePath) l10n_folder_path = os.path.dirname(input_file_path)
inputFileName = os.path.basename(inputFilePath) input_file_name = os.path.basename(input_file_path)
for filename in os.listdir(str(l10nFolderPath)): for filename in os.listdir(str(l10n_folder_path)):
if filename.startswith("long") or not filename.endswith(".po"): if filename.startswith("long") or not filename.endswith(".po"):
continue continue
if filename.split(".")[1] != inputFileName.split(".")[0]: if filename.split(".")[1] != input_file_name.split(".")[0]:
continue continue
if not filters or filename.split(".")[0] in filters: if not filters or filename.split(".")[0] in filters:
existingTranslationCatalogs.append( existing_translation_catalogs.append(
Catalog.readFrom( Catalog.read_from(
os.path.join(l10nFolderPath, filename), locale=filename.split(".")[0] os.path.join(l10n_folder_path, filename), locale=filename.split(".")[0]
) )
) )
return existingTranslationCatalogs return existing_translation_catalogs

View File

@ -19,16 +19,16 @@
import os import os
import subprocess import subprocess
from i18n_helper import l10nFolderName, projectRootDirectory, transifexClientFolder from i18n_helper import L10N_FOLDER_NAME, PROJECT_ROOT_DIRECTORY, TRANSIFEX_CLIENT_FOLDER
def main(): def main():
for root, folders, _ in os.walk(projectRootDirectory): for root, folders, _ in os.walk(PROJECT_ROOT_DIRECTORY):
for folder in folders: for folder in folders:
if folder != l10nFolderName: if folder != L10N_FOLDER_NAME:
continue continue
if os.path.exists(os.path.join(root, folder, transifexClientFolder)): if os.path.exists(os.path.join(root, folder, TRANSIFEX_CLIENT_FOLDER)):
path = os.path.join(root, folder) path = os.path.join(root, folder)
os.chdir(path) os.chdir(path)
print(f"INFO: Starting to pull translations in {path}...") print(f"INFO: Starting to pull translations in {path}...")

View File

@ -1,7 +1,7 @@
import io import io
import pytest import pytest
from checkDiff import check_diff from check_diff import check_diff
PATCHES = [ PATCHES = [

View File

@ -1,132 +0,0 @@
#!/usr/bin/env python3
#
# Copyright (C) 2022 Wildfire Games.
# This file is part of 0 A.D.
#
# 0 A.D. is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# 0 A.D. is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import json
import multiprocessing
import os
from importlib import import_module
from i18n_helper import l10nFolderName, projectRootDirectory
from i18n_helper.catalog import Catalog
messagesFilename = "messages.json"
def warnAboutUntouchedMods():
"""Warn about mods that are not properly configured to get their messages extracted."""
modsRootFolder = os.path.join(projectRootDirectory, "binaries", "data", "mods")
untouchedMods = {}
for modFolder in os.listdir(modsRootFolder):
if modFolder[0] != "_" and modFolder[0] != ".":
if not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName)):
untouchedMods[modFolder] = (
f"There is no '{l10nFolderName}' folder in the root folder of this mod."
)
elif not os.path.exists(
os.path.join(modsRootFolder, modFolder, l10nFolderName, messagesFilename)
):
untouchedMods[modFolder] = (
f"There is no '{messagesFilename}' file within the '{l10nFolderName}' folder "
f"in the root folder of this mod."
)
if untouchedMods:
print("Warning: No messages were extracted from the following mods:")
for mod in untouchedMods:
print(f"{mod}: {untouchedMods[mod]}")
print(
""
f"For this script to extract messages from a mod folder, this mod folder must contain "
f"a '{l10nFolderName}' folder, and this folder must contain a '{messagesFilename}' "
f"file that describes how to extract messages for the mod. See the folder of the main "
f"mod ('public') for an example, and see the documentation for more information."
)
def generatePOT(templateSettings, rootPath):
if "skip" in templateSettings and templateSettings["skip"] == "yes":
return
inputRootPath = rootPath
if "inputRoot" in templateSettings:
inputRootPath = os.path.join(rootPath, templateSettings["inputRoot"])
template = Catalog(
project=templateSettings["project"],
copyright_holder=templateSettings["copyrightHolder"],
locale="en",
)
for rule in templateSettings["rules"]:
if "skip" in rule and rule["skip"] == "yes":
return
options = rule.get("options", {})
extractorClass = getattr(import_module("extractors.extractors"), rule["extractor"])
extractor = extractorClass(inputRootPath, rule["filemasks"], options)
formatFlag = None
if "format" in options:
formatFlag = options["format"]
for message, plural, context, location, comments in extractor.run():
message_id = (message, plural) if plural else message
saved_message = template.get(message_id, context) or template.add(
id=message_id,
context=context,
auto_comments=comments,
flags=[formatFlag] if formatFlag and message.find("%") != -1 else [],
)
saved_message.locations.append(location)
saved_message.flags.discard("python-format")
template.writeTo(os.path.join(rootPath, templateSettings["output"]))
print('Generated "{}" with {} messages.'.format(templateSettings["output"], len(template)))
def generateTemplatesForMessagesFile(messagesFilePath):
with open(messagesFilePath, encoding="utf-8") as fileObject:
settings = json.load(fileObject)
for templateSettings in settings:
multiprocessing.Process(
target=generatePOT, args=(templateSettings, os.path.dirname(messagesFilePath))
).start()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--scandir",
help="Directory to start scanning for l10n folders in. "
"Type '.' for current working directory",
)
args = parser.parse_args()
for root, folders, _filenames in os.walk(args.scandir or projectRootDirectory):
for folder in folders:
if folder == l10nFolderName:
messagesFilePath = os.path.join(root, folder, messagesFilename)
if os.path.exists(messagesFilePath):
generateTemplatesForMessagesFile(messagesFilePath)
warnAboutUntouchedMods()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,134 @@
#!/usr/bin/env python3
#
# Copyright (C) 2022 Wildfire Games.
# This file is part of 0 A.D.
#
# 0 A.D. is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# 0 A.D. is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import json
import multiprocessing
import os
from importlib import import_module
from i18n_helper import L10N_FOLDER_NAME, PROJECT_ROOT_DIRECTORY
from i18n_helper.catalog import Catalog
messages_filename = "messages.json"
def warn_about_untouched_mods():
"""Warn about mods that are not properly configured to get their messages extracted."""
mods_root_folder = os.path.join(PROJECT_ROOT_DIRECTORY, "binaries", "data", "mods")
untouched_mods = {}
for mod_folder in os.listdir(mods_root_folder):
if mod_folder[0] != "_" and mod_folder[0] != ".":
if not os.path.exists(os.path.join(mods_root_folder, mod_folder, L10N_FOLDER_NAME)):
untouched_mods[mod_folder] = (
f"There is no '{L10N_FOLDER_NAME}' folder in the root folder of this mod."
)
elif not os.path.exists(
os.path.join(mods_root_folder, mod_folder, L10N_FOLDER_NAME, messages_filename)
):
untouched_mods[mod_folder] = (
f"There is no '{messages_filename}' file within the '{L10N_FOLDER_NAME}' "
f"folder in the root folder of this mod."
)
if untouched_mods:
print("Warning: No messages were extracted from the following mods:")
for mod in untouched_mods:
print(f"{mod}: {untouched_mods[mod]}")
print(
""
f"For this script to extract messages from a mod folder, this mod folder must contain "
f"a '{L10N_FOLDER_NAME}' folder, and this folder must contain a '{messages_filename}' "
f"file that describes how to extract messages for the mod. See the folder of the main "
f"mod ('public') for an example, and see the documentation for more information."
)
def generate_pot(template_settings, root_path):
if "skip" in template_settings and template_settings["skip"] == "yes":
return
input_root_path = root_path
if "inputRoot" in template_settings:
input_root_path = os.path.join(root_path, template_settings["inputRoot"])
template = Catalog(
project=template_settings["project"],
copyright_holder=template_settings["copyrightHolder"],
locale="en",
)
for rule in template_settings["rules"]:
if "skip" in rule and rule["skip"] == "yes":
return
options = rule.get("options", {})
extractor_class = getattr(
import_module("extractors.extractors"), f'{rule["extractor"].title()}Extractor'
)
extractor = extractor_class(input_root_path, rule["filemasks"], options)
format_flag = None
if "format" in options:
format_flag = options["format"]
for message, plural, context, location, comments in extractor.run():
message_id = (message, plural) if plural else message
saved_message = template.get(message_id, context) or template.add(
id=message_id,
context=context,
auto_comments=comments,
flags=[format_flag] if format_flag and message.find("%") != -1 else [],
)
saved_message.locations.append(location)
saved_message.flags.discard("python-format")
template.write_to(os.path.join(root_path, template_settings["output"]))
print('Generated "{}" with {} messages.'.format(template_settings["output"], len(template)))
def generate_templates_for_messages_file(messages_file_path):
with open(messages_file_path, encoding="utf-8") as file_object:
settings = json.load(file_object)
for template_settings in settings:
multiprocessing.Process(
target=generate_pot, args=(template_settings, os.path.dirname(messages_file_path))
).start()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--scandir",
help="Directory to start scanning for l10n folders in. "
"Type '.' for current working directory",
)
args = parser.parse_args()
for root, folders, _filenames in os.walk(args.scandir or PROJECT_ROOT_DIRECTORY):
for folder in folders:
if folder == L10N_FOLDER_NAME:
messages_file_path = os.path.join(root, folder, messages_filename)
if os.path.exists(messages_file_path):
generate_templates_for_messages_file(messages_file_path)
warn_about_untouched_mods()
if __name__ == "__main__":
main()