1
0
forked from 0ad/0ad

Lint and format Python files using ruff

To improve quality und uniformity of the included Python code this
lints and formats the included Python files with ruff.
This commit is contained in:
Dunedan 2024-08-22 09:18:20 +02:00
parent 8519eb9b86
commit c49d4eedd0
Signed by untrusted user: Dunedan
GPG Key ID: 885B16854284E0B2
34 changed files with 2051 additions and 1474 deletions

View File

@ -1,138 +1,158 @@
#!/usr/bin/env python3
# ruff: noqa: F403, F405
from ctypes import *
import sys
import os
import xml.etree.ElementTree as ET
binaries = '../../../binaries'
binaries = "../../../binaries"
# Work out the platform-dependent library filename
dll_filename = {
'posix': './libCollada_dbg.so',
'nt': 'Collada_dbg.dll',
"posix": "./libCollada_dbg.so",
"nt": "Collada_dbg.dll",
}[os.name]
# The DLL may need other DLLs which are in its directory, so set the path to that
# (Don't care about clobbering the old PATH - it doesn't have anything important)
os.environ['PATH'] = '%s/system/' % binaries
os.environ["PATH"] = "%s/system/" % binaries
# Load the actual library
library = cdll.LoadLibrary('%s/system/%s' % (binaries, dll_filename))
library = cdll.LoadLibrary("%s/system/%s" % (binaries, dll_filename))
def log(severity, message):
print('[%s] %s' % (('INFO', 'WARNING', 'ERROR')[severity], message))
print("[%s] %s" % (("INFO", "WARNING", "ERROR")[severity], message))
clog = CFUNCTYPE(None, c_int, c_char_p)(log)
# (the CFUNCTYPE must not be GC'd, so try to keep a reference)
# (the CFUNCTYPE must not be GC'd, so try to keep a reference)
library.set_logger(clog)
skeleton_definitions = open('%s/data/tests/collada/skeletons.xml' % binaries).read()
skeleton_definitions = open("%s/data/tests/collada/skeletons.xml" % binaries).read()
library.set_skeleton_definitions(skeleton_definitions, len(skeleton_definitions))
def _convert_dae(func, filename, expected_status=0):
output = []
def cb(cbdata, str, len):
output.append(string_at(str, len))
cbtype = CFUNCTYPE(None, POINTER(None), POINTER(c_char), c_uint)
status = func(filename, cbtype(cb), None)
assert(status == expected_status)
return ''.join(output)
def _convert_dae(func, filename, expected_status=0):
output = []
def cb(cbdata, str, len):
output.append(string_at(str, len))
cbtype = CFUNCTYPE(None, POINTER(None), POINTER(c_char), c_uint)
status = func(filename, cbtype(cb), None)
assert status == expected_status
return "".join(output)
def convert_dae_to_pmd(*args, **kwargs):
return _convert_dae(library.convert_dae_to_pmd, *args, **kwargs)
return _convert_dae(library.convert_dae_to_pmd, *args, **kwargs)
def convert_dae_to_psa(*args, **kwargs):
return _convert_dae(library.convert_dae_to_psa, *args, **kwargs)
return _convert_dae(library.convert_dae_to_psa, *args, **kwargs)
def clean_dir(path):
# Remove all files first
try:
for f in os.listdir(path):
os.remove(path+'/'+f)
os.rmdir(path)
except OSError:
pass # (ignore errors if files are in use)
# Make sure the directory exists
try:
os.makedirs(path)
except OSError:
pass # (ignore errors if it already exists)
# Remove all files first
try:
for f in os.listdir(path):
os.remove(path + "/" + f)
os.rmdir(path)
except OSError:
pass # (ignore errors if files are in use)
# Make sure the directory exists
try:
os.makedirs(path)
except OSError:
pass # (ignore errors if it already exists)
def create_actor(mesh, texture, anims, props_):
actor = ET.Element('actor', version='1')
ET.SubElement(actor, 'castshadow')
group = ET.SubElement(actor, 'group')
variant = ET.SubElement(group, 'variant', frequency='100', name='Base')
ET.SubElement(variant, 'mesh').text = mesh+'.pmd'
ET.SubElement(variant, 'texture').text = texture+'.dds'
actor = ET.Element("actor", version="1")
ET.SubElement(actor, "castshadow")
group = ET.SubElement(actor, "group")
variant = ET.SubElement(group, "variant", frequency="100", name="Base")
ET.SubElement(variant, "mesh").text = mesh + ".pmd"
ET.SubElement(variant, "texture").text = texture + ".dds"
animations = ET.SubElement(variant, "animations")
for name, file in anims:
ET.SubElement(animations, "animation", file=file + ".psa", name=name, speed="100")
props = ET.SubElement(variant, "props")
for name, file in props_:
ET.SubElement(props, "prop", actor=file + ".xml", attachpoint=name)
return ET.tostring(actor)
animations = ET.SubElement(variant, 'animations')
for name, file in anims:
ET.SubElement(animations, 'animation', file=file+'.psa', name=name, speed='100')
props = ET.SubElement(variant, 'props')
for name, file in props_:
ET.SubElement(props, 'prop', actor=file+'.xml', attachpoint=name)
return ET.tostring(actor)
def create_actor_static(mesh, texture):
actor = ET.Element('actor', version='1')
ET.SubElement(actor, 'castshadow')
group = ET.SubElement(actor, 'group')
variant = ET.SubElement(group, 'variant', frequency='100', name='Base')
ET.SubElement(variant, 'mesh').text = mesh+'.pmd'
ET.SubElement(variant, 'texture').text = texture+'.dds'
return ET.tostring(actor)
actor = ET.Element("actor", version="1")
ET.SubElement(actor, "castshadow")
group = ET.SubElement(actor, "group")
variant = ET.SubElement(group, "variant", frequency="100", name="Base")
ET.SubElement(variant, "mesh").text = mesh + ".pmd"
ET.SubElement(variant, "texture").text = texture + ".dds"
return ET.tostring(actor)
################################
# Error handling
if False:
convert_dae_to_pmd('This is not well-formed XML', expected_status=-2)
convert_dae_to_pmd('<html>This is not COLLADA</html>', expected_status=-2)
convert_dae_to_pmd('<COLLADA>This is still not valid COLLADA</COLLADA>', expected_status=-2)
convert_dae_to_pmd("This is not well-formed XML", expected_status=-2)
convert_dae_to_pmd("<html>This is not COLLADA</html>", expected_status=-2)
convert_dae_to_pmd("<COLLADA>This is still not valid COLLADA</COLLADA>", expected_status=-2)
# Do some real conversions, so the output can be tested in the Actor Viewer
test_data = binaries + '/data/tests/collada'
test_mod = binaries + '/data/mods/_test.collada'
test_data = binaries + "/data/tests/collada"
test_mod = binaries + "/data/mods/_test.collada"
clean_dir(test_mod + '/art/meshes')
clean_dir(test_mod + '/art/actors')
clean_dir(test_mod + '/art/animation')
clean_dir(test_mod + "/art/meshes")
clean_dir(test_mod + "/art/actors")
clean_dir(test_mod + "/art/animation")
#for test_file in ['cube', 'jav2', 'jav2b', 'teapot_basic', 'teapot_skin', 'plane_skin', 'dude_skin', 'mergenonbone', 'densemesh']:
#for test_file in ['teapot_basic', 'jav2b', 'jav2d']:
for test_file in ['xsitest3c','xsitest3e','jav2d','jav2d2']:
#for test_file in ['xsitest3']:
#for test_file in []:
print("* Converting PMD %s" % (test_file))
# for test_file in ['cube', 'jav2', 'jav2b', 'teapot_basic', 'teapot_skin', 'plane_skin', 'dude_skin', 'mergenonbone', 'densemesh']:
# for test_file in ['teapot_basic', 'jav2b', 'jav2d']:
for test_file in ["xsitest3c", "xsitest3e", "jav2d", "jav2d2"]:
# for test_file in ['xsitest3']:
# for test_file in []:
print("* Converting PMD %s" % (test_file))
input_filename = '%s/%s.dae' % (test_data, test_file)
output_filename = '%s/art/meshes/%s.pmd' % (test_mod, test_file)
input = open(input_filename).read()
output = convert_dae_to_pmd(input)
open(output_filename, 'wb').write(output)
input_filename = "%s/%s.dae" % (test_data, test_file)
output_filename = "%s/art/meshes/%s.pmd" % (test_mod, test_file)
xml = create_actor(test_file, 'male', [('Idle','dudeidle'),('Corpse','dudecorpse'),('attack1',test_file),('attack2','jav2d')], [('helmet','teapot_basic_static')])
open('%s/art/actors/%s.xml' % (test_mod, test_file), 'w').write(xml)
input = open(input_filename).read()
output = convert_dae_to_pmd(input)
open(output_filename, "wb").write(output)
xml = create_actor_static(test_file, 'male')
open('%s/art/actors/%s_static.xml' % (test_mod, test_file), 'w').write(xml)
xml = create_actor(
test_file,
"male",
[
("Idle", "dudeidle"),
("Corpse", "dudecorpse"),
("attack1", test_file),
("attack2", "jav2d"),
],
[("helmet", "teapot_basic_static")],
)
open("%s/art/actors/%s.xml" % (test_mod, test_file), "w").write(xml)
#for test_file in ['jav2','jav2b', 'jav2d']:
for test_file in ['xsitest3c','xsitest3e','jav2d','jav2d2']:
#for test_file in []:
print("* Converting PSA %s" % (test_file))
xml = create_actor_static(test_file, "male")
open("%s/art/actors/%s_static.xml" % (test_mod, test_file), "w").write(xml)
input_filename = '%s/%s.dae' % (test_data, test_file)
output_filename = '%s/art/animation/%s.psa' % (test_mod, test_file)
# for test_file in ['jav2','jav2b', 'jav2d']:
for test_file in ["xsitest3c", "xsitest3e", "jav2d", "jav2d2"]:
# for test_file in []:
print("* Converting PSA %s" % (test_file))
input = open(input_filename).read()
output = convert_dae_to_psa(input)
open(output_filename, 'wb').write(output)
input_filename = "%s/%s.dae" % (test_data, test_file)
output_filename = "%s/art/animation/%s.psa" % (test_mod, test_file)
input = open(input_filename).read()
output = convert_dae_to_psa(input)
open(output_filename, "wb").write(output)

File diff suppressed because it is too large Load Diff

View File

@ -8,41 +8,58 @@ from scriptlib import warn, SimulTemplateEntity, find_files
def find_entities(vfs_root):
base = vfs_root / 'public' / 'simulation' / 'templates'
return [str(fp.relative_to(base).with_suffix('')) for (_, fp) in find_files(vfs_root, ['public'], 'simulation/templates', 'xml')]
base = vfs_root / "public" / "simulation" / "templates"
return [
str(fp.relative_to(base).with_suffix(""))
for (_, fp) in find_files(vfs_root, ["public"], "simulation/templates", "xml")
]
def main():
vfs_root = Path(__file__).resolve().parents[3] / 'binaries' / 'data' / 'mods'
simul_templates_path = Path('simulation/templates')
vfs_root = Path(__file__).resolve().parents[3] / "binaries" / "data" / "mods"
simul_templates_path = Path("simulation/templates")
simul_template_entity = SimulTemplateEntity(vfs_root)
with open('creation.dot', 'w') as dot_f:
dot_f.write('digraph G {\n')
with open("creation.dot", "w") as dot_f:
dot_f.write("digraph G {\n")
files = sorted(find_entities(vfs_root))
for f in files:
if f.startswith('template_'):
if f.startswith("template_"):
continue
print(f"# {f}...")
entity = simul_template_entity.load_inherited(simul_templates_path, f, ['public'])
if entity.find('Builder') is not None and entity.find('Builder').find('Entities') is not None:
entities = entity.find('Builder').find('Entities').text.replace('{civ}', entity.find('Identity').find('Civ').text)
builders = split(r'\s+', entities.strip())
entity = simul_template_entity.load_inherited(simul_templates_path, f, ["public"])
if (
entity.find("Builder") is not None
and entity.find("Builder").find("Entities") is not None
):
entities = (
entity.find("Builder")
.find("Entities")
.text.replace("{civ}", entity.find("Identity").find("Civ").text)
)
builders = split(r"\s+", entities.strip())
for builder in builders:
if Path(builder) in files:
warn(f"Invalid Builder reference: {f} -> {builder}")
dot_f.write(f'"{f}" -> "{builder}" [color=green];\n')
if entity.find('TrainingQueue') is not None and entity.find('TrainingQueue').find('Entities') is not None:
entities = entity.find('TrainingQueue').find('Entities').text.replace('{civ}', entity.find('Identity').find('Civ').text)
training_queues = split(r'\s+', entities.strip())
if (
entity.find("TrainingQueue") is not None
and entity.find("TrainingQueue").find("Entities") is not None
):
entities = (
entity.find("TrainingQueue")
.find("Entities")
.text.replace("{civ}", entity.find("Identity").find("Civ").text)
)
training_queues = split(r"\s+", entities.strip())
for training_queue in training_queues:
if Path(training_queue) in files:
warn(f"Invalid TrainingQueue reference: {f} -> {training_queue}")
dot_f.write(f'"{f}" -> "{training_queue}" [color=blue];\n')
dot_f.write('}\n')
if run(['dot', '-V'], capture_output=True).returncode == 0:
exit(run(['dot', '-Tpng', 'creation.dot', '-o', 'creation.png'], text=True).returncode)
dot_f.write("}\n")
if run(["dot", "-V"], capture_output=True).returncode == 0:
exit(run(["dot", "-Tpng", "creation.dot", "-o", "creation.png"], text=True).returncode)
if __name__ == '__main__':
if __name__ == "__main__":
chdir(Path(__file__).resolve().parent)
main()

View File

@ -17,8 +17,10 @@ RELAXNG_SCHEMA_ERROR_MSG = """Relax NG schema non existant.
Please create the file: {}
You can do that by running 'pyrogenesis -dumpSchema' in the 'system' directory
"""
XMLLINT_ERROR_MSG = ("xmllint not found in your PATH, please install it "
"(usually in libxml2 package)")
XMLLINT_ERROR_MSG = (
"xmllint not found in your PATH, please install it " "(usually in libxml2 package)"
)
class SingleLevelFilter(logging.Filter):
def __init__(self, passlevel, reject):
@ -27,37 +29,48 @@ class SingleLevelFilter(logging.Filter):
def filter(self, record):
if self.reject:
return (record.levelno != self.passlevel)
return record.levelno != self.passlevel
else:
return (record.levelno == self.passlevel)
return record.levelno == self.passlevel
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create a console handler, seems nicer to Windows and for future uses
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
ch.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
f1 = SingleLevelFilter(logging.INFO, False)
ch.addFilter(f1)
logger.addHandler(ch)
errorch =logging. StreamHandler(sys.stderr)
errorch = logging.StreamHandler(sys.stderr)
errorch.setLevel(logging.WARNING)
errorch.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
errorch.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
logger.addHandler(errorch)
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser(description="Validate templates")
parser.add_argument("-m", "--mod-name", required=True,
help="The name of the mod to validate.")
parser.add_argument("-r", "--root", dest="vfs_root", default=Path(),
type=Path, help="The path to mod's root location.")
parser.add_argument("-s", "--relaxng-schema",
default=Path() / ENTITY_RELAXNG_FNAME, type=Path,
help="The path to mod's root location.")
parser.add_argument("-t", "--templates", nargs="*",
help="Optionally, a list of templates to validate.")
parser.add_argument("-v", "--verbose",
help="Be verbose about the output.", default=False)
parser.add_argument("-m", "--mod-name", required=True, help="The name of the mod to validate.")
parser.add_argument(
"-r",
"--root",
dest="vfs_root",
default=Path(),
type=Path,
help="The path to mod's root location.",
)
parser.add_argument(
"-s",
"--relaxng-schema",
default=Path() / ENTITY_RELAXNG_FNAME,
type=Path,
help="The path to mod's root location.",
)
parser.add_argument(
"-t", "--templates", nargs="*", help="Optionally, a list of templates to validate."
)
parser.add_argument("-v", "--verbose", help="Be verbose about the output.", default=False)
args = parser.parse_args(argv)
@ -72,8 +85,9 @@ def main(argv: Sequence[str] | None = None) -> int:
if args.templates:
templates = sorted([(Path(t), None) for t in args.templates])
else:
templates = sorted(find_files(args.vfs_root, [args.mod_name],
SIMUL_TEMPLATES_PATH.as_posix(), "xml"))
templates = sorted(
find_files(args.vfs_root, [args.mod_name], SIMUL_TEMPLATES_PATH.as_posix(), "xml")
)
simul_template_entity = SimulTemplateEntity(args.vfs_root, logger)
count, failed = 0, 0
@ -82,29 +96,32 @@ def main(argv: Sequence[str] | None = None) -> int:
continue
path = fp.as_posix()
if (path.startswith(f"{SIMUL_TEMPLATES_PATH.as_posix()}/mixins/")
or path.startswith(
f"{SIMUL_TEMPLATES_PATH.as_posix()}/special/")):
if path.startswith(f"{SIMUL_TEMPLATES_PATH.as_posix()}/mixins/") or path.startswith(
f"{SIMUL_TEMPLATES_PATH.as_posix()}/special/"
):
continue
if (args.verbose):
if args.verbose:
logger.info(f"Parsing {fp}...")
count += 1
entity = simul_template_entity.load_inherited(
SIMUL_TEMPLATES_PATH,
str(fp.relative_to(SIMUL_TEMPLATES_PATH)),
[args.mod_name]
SIMUL_TEMPLATES_PATH, str(fp.relative_to(SIMUL_TEMPLATES_PATH)), [args.mod_name]
)
xmlcontent = ElementTree.tostring(entity, encoding="unicode")
try:
run(["xmllint", "--relaxng",
str(args.relaxng_schema.resolve()), "-"],
input=xmlcontent, encoding="utf-8", capture_output=True, text=True, check=True)
run(
["xmllint", "--relaxng", str(args.relaxng_schema.resolve()), "-"],
input=xmlcontent,
encoding="utf-8",
capture_output=True,
text=True,
check=True,
)
except CalledProcessError as e:
failed += 1
if (e.stderr):
if e.stderr:
logger.error(e.stderr)
if (e.stdout):
if e.stdout:
logger.info(e.stdout)
logger.info(f"Total: {count}; failed: {failed}")

View File

@ -4,6 +4,7 @@ from re import split
from xml.etree import ElementTree
from os.path import exists
class SimulTemplateEntity:
def __init__(self, vfs_root, logger):
self.vfs_root = vfs_root
@ -11,11 +12,11 @@ class SimulTemplateEntity:
def get_file(self, base_path, vfs_path, mod):
default_path = self.vfs_root / mod / base_path
file = (default_path / "special" / "filter" / vfs_path).with_suffix('.xml')
file = (default_path / "special" / "filter" / vfs_path).with_suffix(".xml")
if not exists(file):
file = (default_path / "mixins" / vfs_path).with_suffix('.xml')
file = (default_path / "mixins" / vfs_path).with_suffix(".xml")
if not exists(file):
file = (default_path / vfs_path).with_suffix('.xml')
file = (default_path / vfs_path).with_suffix(".xml")
return file
def get_main_mod(self, base_path, vfs_path, mods):
@ -35,52 +36,52 @@ class SimulTemplateEntity:
"""
apply tag layer to base_tag
"""
if tag.get('datatype') == 'tokens':
base_tokens = split(r'\s+', base_tag.text or '')
tokens = split(r'\s+', tag.text or '')
if tag.get("datatype") == "tokens":
base_tokens = split(r"\s+", base_tag.text or "")
tokens = split(r"\s+", tag.text or "")
final_tokens = base_tokens.copy()
for token in tokens:
if token.startswith('-'):
if token.startswith("-"):
token_to_remove = token[1:]
if token_to_remove in final_tokens:
final_tokens.remove(token_to_remove)
elif token not in final_tokens:
final_tokens.append(token)
base_tag.text = ' '.join(final_tokens)
base_tag.text = " ".join(final_tokens)
base_tag.set("datatype", "tokens")
elif tag.get('op'):
op = tag.get('op')
op1 = Decimal(base_tag.text or '0')
op2 = Decimal(tag.text or '0')
elif tag.get("op"):
op = tag.get("op")
op1 = Decimal(base_tag.text or "0")
op2 = Decimal(tag.text or "0")
# Try converting to integers if possible, to pass validation.
if op == 'add':
if op == "add":
base_tag.text = str(int(op1 + op2) if int(op1 + op2) == op1 + op2 else op1 + op2)
elif op == 'mul':
elif op == "mul":
base_tag.text = str(int(op1 * op2) if int(op1 * op2) == op1 * op2 else op1 * op2)
elif op == 'mul_round':
elif op == "mul_round":
base_tag.text = str(round(op1 * op2))
else:
raise ValueError(f"Invalid operator '{op}'")
else:
base_tag.text = tag.text
for prop in tag.attrib:
if prop not in ('disable', 'replace', 'parent', 'merge'):
if prop not in ("disable", "replace", "parent", "merge"):
base_tag.set(prop, tag.get(prop))
for child in tag:
base_child = base_tag.find(child.tag)
if 'disable' in child.attrib:
if "disable" in child.attrib:
if base_child is not None:
base_tag.remove(base_child)
elif ('merge' not in child.attrib) or (base_child is not None):
if 'replace' in child.attrib and base_child is not None:
elif ("merge" not in child.attrib) or (base_child is not None):
if "replace" in child.attrib and base_child is not None:
base_tag.remove(base_child)
base_child = None
if base_child is None:
base_child = ElementTree.Element(child.tag)
base_tag.append(base_child)
self.apply_layer(base_child, child)
if 'replace' in base_child.attrib:
del base_child.attrib['replace']
if "replace" in base_child.attrib:
del base_child.attrib["replace"]
def load_inherited(self, base_path, vfs_path, mods):
entity = self._load_inherited(base_path, vfs_path, mods)
@ -91,7 +92,7 @@ class SimulTemplateEntity:
"""
vfs_path should be relative to base_path in a mod
"""
if '|' in vfs_path:
if "|" in vfs_path:
paths = vfs_path.split("|", 1)
base = self._load_inherited(base_path, paths[1], mods, base)
base = self._load_inherited(base_path, paths[0], mods, base)
@ -106,8 +107,8 @@ class SimulTemplateEntity:
if duplicates:
for dup in duplicates:
self.logger.warning(f"Duplicate child node '{dup}' in tag {el.tag} of {fp}")
if layer.get('parent'):
parent = self._load_inherited(base_path, layer.get('parent'), mods, base)
if layer.get("parent"):
parent = self._load_inherited(base_path, layer.get("parent"), mods, base)
self.apply_layer(parent, layer)
return parent
else:
@ -124,15 +125,20 @@ def find_files(vfs_root, mods, vfs_path, *ext_list):
- Path relative to the mod base
- full Path
"""
full_exts = ['.' + ext for ext in ext_list]
full_exts = ["." + ext for ext in ext_list]
def find_recursive(dp, base):
"""(relative Path, full Path) generator"""
if dp.is_dir():
if dp.name != '.svn' and dp.name != '.git' and not dp.name.endswith('~'):
if dp.name != ".svn" and dp.name != ".git" and not dp.name.endswith("~"):
for fp in dp.iterdir():
yield from find_recursive(fp, base)
elif dp.suffix in full_exts:
relative_file_path = dp.relative_to(base)
yield (relative_file_path, dp.resolve())
return [(rp, fp) for mod in mods for (rp, fp) in find_recursive(vfs_root / mod / vfs_path, vfs_root / mod)]
return [
(rp, fp)
for mod in mods
for (rp, fp) in find_recursive(vfs_root / mod / vfs_path, vfs_root / mod)
]

View File

@ -20,47 +20,54 @@ else:
ft_lib = "libfreetype.so.6"
lc_lib = "libcairo.so.2"
_freetype_so = ctypes.CDLL (ft_lib)
_cairo_so = ctypes.CDLL (lc_lib)
_freetype_so = ctypes.CDLL(ft_lib)
_cairo_so = ctypes.CDLL(lc_lib)
_cairo_so.cairo_ft_font_face_create_for_ft_face.restype = ctypes.c_void_p
_cairo_so.cairo_ft_font_face_create_for_ft_face.argtypes = [ ctypes.c_void_p, ctypes.c_int ]
_cairo_so.cairo_set_font_face.argtypes = [ ctypes.c_void_p, ctypes.c_void_p ]
_cairo_so.cairo_font_face_status.argtypes = [ ctypes.c_void_p ]
_cairo_so.cairo_status.argtypes = [ ctypes.c_void_p ]
_cairo_so.cairo_ft_font_face_create_for_ft_face.argtypes = [ctypes.c_void_p, ctypes.c_int]
_cairo_so.cairo_set_font_face.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_cairo_so.cairo_font_face_status.argtypes = [ctypes.c_void_p]
_cairo_so.cairo_status.argtypes = [ctypes.c_void_p]
# initialize freetype
_ft_lib = ctypes.c_void_p ()
if FT_Err_Ok != _freetype_so.FT_Init_FreeType (ctypes.byref (_ft_lib)):
_ft_lib = ctypes.c_void_p()
if FT_Err_Ok != _freetype_so.FT_Init_FreeType(ctypes.byref(_ft_lib)):
raise Exception("Error initialising FreeType library.")
_surface = cairo.ImageSurface (cairo.FORMAT_A8, 0, 0)
_surface = cairo.ImageSurface(cairo.FORMAT_A8, 0, 0)
class PycairoContext(ctypes.Structure):
_fields_ = [("PyObject_HEAD", ctypes.c_byte * object.__basicsize__),
_fields_ = [
("PyObject_HEAD", ctypes.c_byte * object.__basicsize__),
("ctx", ctypes.c_void_p),
("base", ctypes.c_void_p)]
("base", ctypes.c_void_p),
]
def create_cairo_font_face_for_file (filename, faceindex=0, loadoptions=0):
def create_cairo_font_face_for_file(filename, faceindex=0, loadoptions=0):
# create freetype face
ft_face = ctypes.c_void_p()
cairo_ctx = cairo.Context (_surface)
cairo_ctx = cairo.Context(_surface)
cairo_t = PycairoContext.from_address(id(cairo_ctx)).ctx
if FT_Err_Ok != _freetype_so.FT_New_Face (_ft_lib, filename.encode('ascii'), faceindex, ctypes.byref(ft_face)):
if FT_Err_Ok != _freetype_so.FT_New_Face(
_ft_lib, filename.encode("ascii"), faceindex, ctypes.byref(ft_face)
):
raise Exception("Error creating FreeType font face for " + filename)
# create cairo font face for freetype face
cr_face = _cairo_so.cairo_ft_font_face_create_for_ft_face (ft_face, loadoptions)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_font_face_status (cr_face):
cr_face = _cairo_so.cairo_ft_font_face_create_for_ft_face(ft_face, loadoptions)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_font_face_status(cr_face):
raise Exception("Error creating cairo font face for " + filename)
_cairo_so.cairo_set_font_face (cairo_t, cr_face)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_status (cairo_t):
_cairo_so.cairo_set_font_face(cairo_t, cr_face)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_status(cairo_t):
raise Exception("Error creating cairo font face for " + filename)
face = cairo_ctx.get_font_face ()
face = cairo_ctx.get_font_face()
indexes = lambda char: _freetype_so.FT_Get_Char_Index(ft_face, ord(char))
def indexes(char):
return _freetype_so.FT_Get_Char_Index(ft_face, ord(char))
return (face, indexes)

View File

@ -18,59 +18,64 @@
from bisect import bisect_left
class OutOfSpaceError(Exception): pass
class OutOfSpaceError(Exception):
pass
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __cmp__(self, other):
"""Compares the starting position of height slices"""
return self.x - other.x
class RectanglePacker(object):
"""Base class for rectangle packing algorithms
By uniting all rectangle packers under this common base class, you can
easily switch between different algorithms to find the most efficient or
performant one for a given job.
An almost exhaustive list of packing algorithms can be found here:
http://www.csc.liv.ac.uk/~epa/surveyhtml.html"""
def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area"""
self.packingAreaWidth = packingAreaWidth
self.packingAreaHeight = packingAreaHeight
def Pack(self, rectangleWidth, rectangleHeight):
"""Allocates space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns the location at which the rectangle has been placed"""
point = self.TryPack(rectangleWidth, rectangleHeight)
if not point:
raise OutOfSpaceError("Rectangle does not fit in packing area")
return point
def TryPack(self, rectangleWidth, rectangleHeight):
"""Tries to allocate space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None"""
raise NotImplementedError
class DumbRectanglePacker(RectanglePacker):
def __init__(self, packingAreaWidth, packingAreaHeight):
RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight)
@ -91,86 +96,85 @@ class DumbRectanglePacker(RectanglePacker):
self.rowh = max(self.rowh, rectangleHeight)
return r
class CygonRectanglePacker(RectanglePacker):
"""
Packer using a custom algorithm by Markus 'Cygon' Ewald
Algorithm conceived by Markus Ewald (cygon at nuclex dot org), though
I'm quite sure I'm not the first one to come up with it :)
The algorithm always places rectangles as low as possible in the packing
area. So, for any new rectangle that is to be added, the packer has to
determine the X coordinate at which the rectangle can have the lowest
overall height without intersecting any other rectangles.
To quickly discover these locations, the packer uses a sophisticated
data structure that stores the upper silhouette of the packing area. When
a new rectangle needs to be added, only the silouette edges need to be
analyzed to find the position where the rectangle would achieve the lowest"""
def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area"""
RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight)
# Stores the height silhouette of the rectangles
self.heightSlices = []
# At the beginning, the packing area is a single slice of height 0
self.heightSlices.append(Point(0,0))
self.heightSlices.append(Point(0, 0))
def TryPack(self, rectangleWidth, rectangleHeight):
"""Tries to allocate space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None"""
placement = None
# If the rectangle is larger than the packing area in any dimension,
# it will never fit!
if rectangleWidth > self.packingAreaWidth or rectangleHeight > \
self.packingAreaHeight:
if rectangleWidth > self.packingAreaWidth or rectangleHeight > self.packingAreaHeight:
return None
# Determine the placement for the new rectangle
placement = self.tryFindBestPlacement(rectangleWidth, rectangleHeight)
# If a place for the rectangle could be found, update the height slice
# table to mark the region of the rectangle as being taken.
if placement:
self.integrateRectangle(placement.x, rectangleWidth, placement.y \
+ rectangleHeight)
self.integrateRectangle(placement.x, rectangleWidth, placement.y + rectangleHeight)
return placement
def tryFindBestPlacement(self, rectangleWidth, rectangleHeight):
"""Finds the best position for a rectangle of the given dimensions
rectangleWidth: Width of the rectangle to find a position for
rectangleHeight: Height of the rectangle to find a position for
Returns a Point instance if a valid placement for the rectangle could
be found, otherwise returns None"""
# Slice index, vertical position and score of the best placement we
# could find
bestSliceIndex = -1 # Slice index where the best placement was found
bestSliceY = 0 # Y position of the best placement found
bestSliceIndex = -1 # Slice index where the best placement was found
bestSliceY = 0 # Y position of the best placement found
# lower == better!
bestScore = self.packingAreaHeight
bestScore = self.packingAreaHeight
# This is the counter for the currently checked position. The search
# works by skipping from slice to slice, determining the suitability
# of the location for the placement of the rectangle.
leftSliceIndex = 0
# Determine the slice in which the right end of the rectangle is located
rightSliceIndex = bisect_left(self.heightSlices, Point(rectangleWidth, 0))
while rightSliceIndex <= len(self.heightSlices):
# Determine the highest slice within the slices covered by the
# rectangle at its current placement. We cannot put the rectangle
@ -179,21 +183,21 @@ class CygonRectanglePacker(RectanglePacker):
for index in range(leftSliceIndex + 1, rightSliceIndex):
if self.heightSlices[index].y > highest:
highest = self.heightSlices[index].y
# Only process this position if it doesn't leave the packing area
if highest + rectangleHeight < self.packingAreaHeight:
score = highest
if score < bestScore:
bestSliceIndex = leftSliceIndex
bestSliceY = highest
bestScore = score
# Advance the starting slice to the next slice start
leftSliceIndex += 1
if leftSliceIndex >= len(self.heightSlices):
break
# Advance the ending slice until we're on the proper slice again,
# given the new starting position of the rectangle.
rightRectangleEnd = self.heightSlices[leftSliceIndex].x + rectangleWidth
@ -202,18 +206,18 @@ class CygonRectanglePacker(RectanglePacker):
rightSliceStart = self.packingAreaWidth
else:
rightSliceStart = self.heightSlices[rightSliceIndex].x
# Is this the slice we're looking for?
if rightSliceStart > rightRectangleEnd:
break
rightSliceIndex += 1
# If we crossed the end of the slice array, the rectangle's right
# end has left the packing area, and thus, our search ends.
if rightSliceIndex > len(self.heightSlices):
break
# Return the best placement we found for this rectangle. If the
# rectangle didn't fit anywhere, the slice index will still have its
# initialization value of -1 and we can report that no placement
@ -222,23 +226,23 @@ class CygonRectanglePacker(RectanglePacker):
return None
else:
return Point(self.heightSlices[bestSliceIndex].x, bestSliceY)
def integrateRectangle(self, left, width, bottom):
"""Integrates a new rectangle into the height slice table
left: Position of the rectangle's left side
width: Width of the rectangle
bottom: Position of the rectangle's lower side"""
# Find the first slice that is touched by the rectangle
startSlice = bisect_left(self.heightSlices, Point(left, 0))
# We scored a direct hit, so we can replace the slice we have hit
firstSliceOriginalHeight = self.heightSlices[startSlice].y
self.heightSlices[startSlice] = Point(left, bottom)
right = left + width
startSlice += 1
# Special case, the rectangle started on the last slice, so we cannot
# use the start slice + 1 for the binary search and the possibly
# already modified start slice height now only remains in our temporary
@ -249,21 +253,24 @@ class CygonRectanglePacker(RectanglePacker):
# to return to the original height at the end of the rectangle.
if right < self.packingAreaWidth:
self.heightSlices.append(Point(right, firstSliceOriginalHeight))
else: # The rectangle doesn't start on the last slice
endSlice = bisect_left(self.heightSlices, Point(right,0), \
startSlice, len(self.heightSlices))
else: # The rectangle doesn't start on the last slice
endSlice = bisect_left(
self.heightSlices, Point(right, 0), startSlice, len(self.heightSlices)
)
# Another direct hit on the final slice's end?
if endSlice < len(self.heightSlices) and not (Point(right, 0) < self.heightSlices[endSlice]):
if endSlice < len(self.heightSlices) and not (
Point(right, 0) < self.heightSlices[endSlice]
):
del self.heightSlices[startSlice:endSlice]
else: # No direct hit, rectangle ends inside another slice
else: # No direct hit, rectangle ends inside another slice
# Find out to which height we need to return at the right end of
# the rectangle
if endSlice == startSlice:
returnHeight = firstSliceOriginalHeight
else:
returnHeight = self.heightSlices[endSlice - 1].y
# Remove all slices covered by the rectangle and begin a new
# slice at its end to return back to the height of the slice on
# which the rectangle ends.

View File

@ -4,13 +4,16 @@
import FontLoader
def dump_font(ttf):
(face, indexes) = FontLoader.create_cairo_font_face_for_file(
"../../../binaries/data/tools/fontbuilder/fonts/%s" % ttf, 0, FontLoader.FT_LOAD_DEFAULT
)
(face, indexes) = FontLoader.create_cairo_font_face_for_file("../../../binaries/data/tools/fontbuilder/fonts/%s" % ttf, 0, FontLoader.FT_LOAD_DEFAULT)
mappings = [(c, indexes(chr(c))) for c in range(1, 65535)]
print(ttf, end=" ")
print(" ".join(str(c) for (c, g) in mappings if g != 0))
mappings = [ (c, indexes(chr(c))) for c in range(1, 65535) ]
print(ttf, end=' ')
print(' '.join(str(c) for (c, g) in mappings if g != 0))
dump_font("DejaVuSansMono.ttf")
dump_font("FreeSans.ttf")

View File

@ -7,6 +7,7 @@ import math
import FontLoader
import Packer
# Representation of a rendered glyph
class Glyph(object):
def __init__(self, ctx, renderstyle, char, idx, face, size):
@ -30,7 +31,7 @@ class Glyph(object):
bb = [inf, inf, -inf, -inf]
if "stroke" in self.renderstyle:
for (c, w) in self.renderstyle["stroke"]:
for c, w in self.renderstyle["stroke"]:
ctx.set_line_width(w)
ctx.glyph_path([self.glyph])
e = ctx.stroke_extents()
@ -52,8 +53,8 @@ class Glyph(object):
# Force multiple of 4, to avoid leakage across S3TC blocks
# (TODO: is this useful?)
#self.w += (4 - (self.w % 4)) % 4
#self.h += (4 - (self.h % 4)) % 4
# self.w += (4 - (self.w % 4)) % 4
# self.h += (4 - (self.h % 4)) % 4
def pack(self, packer):
self.pos = packer.Pack(self.w, self.h)
@ -69,20 +70,21 @@ class Glyph(object):
# Render each stroke, and then each fill on top of it
if "stroke" in self.renderstyle:
for ((r, g, b, a), w) in self.renderstyle["stroke"]:
for (r, g, b, a), w in self.renderstyle["stroke"]:
ctx.set_line_width(w)
ctx.set_source_rgba(r, g, b, a)
ctx.glyph_path([self.glyph])
ctx.stroke()
if "fill" in self.renderstyle:
for (r, g, b, a) in self.renderstyle["fill"]:
for r, g, b, a in self.renderstyle["fill"]:
ctx.set_source_rgba(r, g, b, a)
ctx.glyph_path([self.glyph])
ctx.fill()
ctx.restore()
# Load the set of characters contained in the given text file
def load_char_list(filename):
f = codecs.open(filename, "r", "utf-8")
@ -90,22 +92,25 @@ def load_char_list(filename):
f.close()
return set(chars)
# Construct a Cairo context and surface for rendering text with the given parameters
def setup_context(width, height, renderstyle):
format = (cairo.FORMAT_ARGB32 if "colour" in renderstyle else cairo.FORMAT_A8)
format = cairo.FORMAT_ARGB32 if "colour" in renderstyle else cairo.FORMAT_A8
surface = cairo.ImageSurface(format, width, height)
ctx = cairo.Context(surface)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
return ctx, surface
def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
faceList = []
indexList = []
for i in range(len(ttfNames)):
(face, indices) = FontLoader.create_cairo_font_face_for_file("../../../binaries/data/tools/fontbuilder/fonts/%s" % ttfNames[i], 0, loadopts)
(face, indices) = FontLoader.create_cairo_font_face_for_file(
"../../../binaries/data/tools/fontbuilder/fonts/%s" % ttfNames[i], 0, loadopts
)
faceList.append(face)
if not ttfNames[i] in dsizes:
if ttfNames[i] not in dsizes:
dsizes[ttfNames[i]] = 0
indexList.append(indices)
@ -123,32 +128,36 @@ def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
# Translate all the characters into glyphs
# (This is inefficient if multiple characters have the same glyph)
glyphs = []
#for c in chars:
# for c in chars:
for c in range(0x20, 0xFFFE):
for i in range(len(indexList)):
idx = indexList[i](chr(c))
if c == 0xFFFD and idx == 0: # use "?" if the missing-glyph glyph is missing
if c == 0xFFFD and idx == 0: # use "?" if the missing-glyph glyph is missing
idx = indexList[i]("?")
if idx:
glyphs.append(Glyph(ctx, renderstyle, chr(c), idx, faceList[i], size + dsizes[ttfNames[i]]))
glyphs.append(
Glyph(ctx, renderstyle, chr(c), idx, faceList[i], size + dsizes[ttfNames[i]])
)
break
# Sort by decreasing height (tie-break on decreasing width)
glyphs.sort(key = lambda g: (-g.h, -g.w))
glyphs.sort(key=lambda g: (-g.h, -g.w))
# Try various sizes to pack the glyphs into
sizes = []
for h in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
sizes.append((h, h))
sizes.append((h*2, h))
sizes.sort(key = lambda w_h: (w_h[0]*w_h[1], max(w_h[0], w_h[1]))) # prefer smaller and squarer
sizes.append((h * 2, h))
sizes.sort(
key=lambda w_h: (w_h[0] * w_h[1], max(w_h[0], w_h[1]))
) # prefer smaller and squarer
for w, h in sizes:
try:
# Using the dump pacher usually creates bigger textures, but runs faster
# In practice the size difference is so small it always ends up in the same size
packer = Packer.DumbRectanglePacker(w, h)
#packer = Packer.CygonRectanglePacker(w, h)
# packer = Packer.CygonRectanglePacker(w, h)
for g in glyphs:
g.pack(packer)
except Packer.OutOfSpaceError:
@ -168,7 +177,7 @@ def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
fnt.write("%d\n" % linespacing)
fnt.write("%d\n" % charheight)
# sorting unneeded, as glyphs are added in increasing order
#glyphs.sort(key = lambda g: ord(g.char))
# glyphs.sort(key = lambda g: ord(g.char))
for g in glyphs:
x0 = g.x0
y0 = g.y0
@ -179,31 +188,39 @@ def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
# glyph by an arbitrary amount to make it roughly the right
# place when used after an a-macron glyph.
if ord(g.char) == 0x0301:
y0 += charheight/3
y0 += charheight / 3
fnt.write("%d %d %d %d %d %d %d %d\n" % (ord(g.char), g.pos.x, h-g.pos.y, g.w, g.h, -x0, y0, g.xadvance))
fnt.write(
"%d %d %d %d %d %d %d %d\n"
% (ord(g.char), g.pos.x, h - g.pos.y, g.w, g.h, -x0, y0, g.xadvance)
)
fnt.close()
return
print("Failed to fit glyphs in texture")
filled = { "fill": [(1, 1, 1, 1)] }
stroked1 = { "colour": True, "stroke": [((0, 0, 0, 1), 2.0), ((0, 0, 0, 1), 2.0)], "fill": [(1, 1, 1, 1)] }
stroked2 = { "colour": True, "stroke": [((0, 0, 0, 1), 2.0)], "fill": [(1, 1, 1, 1), (1, 1, 1, 1)] }
stroked3 = { "colour": True, "stroke": [((0, 0, 0, 1), 2.5)], "fill": [(1, 1, 1, 1), (1, 1, 1, 1)] }
filled = {"fill": [(1, 1, 1, 1)]}
stroked1 = {
"colour": True,
"stroke": [((0, 0, 0, 1), 2.0), ((0, 0, 0, 1), 2.0)],
"fill": [(1, 1, 1, 1)],
}
stroked2 = {"colour": True, "stroke": [((0, 0, 0, 1), 2.0)], "fill": [(1, 1, 1, 1), (1, 1, 1, 1)]}
stroked3 = {"colour": True, "stroke": [((0, 0, 0, 1), 2.5)], "fill": [(1, 1, 1, 1), (1, 1, 1, 1)]}
# For extra glyph support, add your preferred font to the font array
Sans = (["LinBiolinum_Rah.ttf","FreeSans.ttf"], FontLoader.FT_LOAD_DEFAULT)
Sans_Bold = (["LinBiolinum_RBah.ttf","FreeSansBold.ttf"], FontLoader.FT_LOAD_DEFAULT)
Sans_Italic = (["LinBiolinum_RIah.ttf","FreeSansOblique.ttf"], FontLoader.FT_LOAD_DEFAULT)
SansMono = (["DejaVuSansMono.ttf","FreeMono.ttf"], FontLoader.FT_LOAD_DEFAULT)
Serif = (["texgyrepagella-regular.otf","FreeSerif.ttf"], FontLoader.FT_LOAD_NO_HINTING)
Serif_Bold = (["texgyrepagella-bold.otf","FreeSerifBold.ttf"], FontLoader.FT_LOAD_NO_HINTING)
Sans = (["LinBiolinum_Rah.ttf", "FreeSans.ttf"], FontLoader.FT_LOAD_DEFAULT)
Sans_Bold = (["LinBiolinum_RBah.ttf", "FreeSansBold.ttf"], FontLoader.FT_LOAD_DEFAULT)
Sans_Italic = (["LinBiolinum_RIah.ttf", "FreeSansOblique.ttf"], FontLoader.FT_LOAD_DEFAULT)
SansMono = (["DejaVuSansMono.ttf", "FreeMono.ttf"], FontLoader.FT_LOAD_DEFAULT)
Serif = (["texgyrepagella-regular.otf", "FreeSerif.ttf"], FontLoader.FT_LOAD_NO_HINTING)
Serif_Bold = (["texgyrepagella-bold.otf", "FreeSerifBold.ttf"], FontLoader.FT_LOAD_NO_HINTING)
# Define the size differences used to render different fallback fonts
# I.e. when adding a fallback font has smaller glyphs than the original, you can bump it
dsizes = {'HanaMinA.ttf': 2} # make the glyphs for the (chinese font 2 pts bigger)
dsizes = {"HanaMinA.ttf": 2} # make the glyphs for the (chinese font 2 pts bigger)
fonts = (
("mono-10", SansMono, 10, filled),
@ -231,6 +248,8 @@ fonts = (
("sans-stroke-16", Sans, 16, stroked2),
)
for (name, (fontnames, loadopts), size, style) in fonts:
for name, (fontnames, loadopts), size, style in fonts:
print("%s..." % name)
generate_font("../../../binaries/data/mods/mod/fonts/%s" % name, fontnames, loadopts, size, style, dsizes)
generate_font(
"../../../binaries/data/mods/mod/fonts/%s" % name, fontnames, loadopts, size, style, dsizes
)

View File

@ -16,6 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
# ruff: noqa: E741
import io
import os
import subprocess
@ -23,6 +25,7 @@ from typing import List
from i18n_helper import projectRootDirectory
def get_diff():
"""Return a diff using svn diff"""
os.chdir(projectRootDirectory)
@ -31,9 +34,10 @@ def get_diff():
if diff_process.returncode != 0:
print(f"Error running svn diff: {diff_process.stderr.decode('utf-8')}. Exiting.")
return
return io.StringIO(diff_process.stdout.decode('utf-8'))
return io.StringIO(diff_process.stdout.decode("utf-8"))
def check_diff(diff : io.StringIO, verbose = False) -> List[str]:
def check_diff(diff: io.StringIO, verbose=False) -> List[str]:
"""Run through a diff of .po files and check that some of the changes
are real translations changes and not just noise (line changes....).
The algorithm isn't extremely clever, but it is quite fast."""
@ -57,13 +61,18 @@ def check_diff(diff : io.StringIO, verbose = False) -> List[str]:
diff.readline()
l = diff.readline()
continue
if l[0] != '-' and l[0] != '+':
if l[0] != "-" and l[0] != "+":
l = diff.readline()
continue
if l[1:].strip() == "" or (l[1] == '#' and l[2] == ':'):
if l[1:].strip() == "" or (l[1] == "#" and l[2] == ":"):
l = diff.readline()
continue
if "# Copyright (C)" in l or "POT-Creation-Date:" in l or "PO-Revision-Date" in l or "Last-Translator" in l:
if (
"# Copyright (C)" in l
or "POT-Creation-Date:" in l
or "PO-Revision-Date" in l
or "Last-Translator" in l
):
l = diff.readline()
continue
# We've hit a real line
@ -75,23 +84,25 @@ def check_diff(diff : io.StringIO, verbose = False) -> List[str]:
return list(files.difference(keep))
def revert_files(files: List[str], verbose = False):
def revert_files(files: List[str], verbose=False):
revert_process = subprocess.run(["svn", "revert"] + files, capture_output=True)
if revert_process.returncode != 0:
print(f"Warning: Some files could not be reverted. Error: {revert_process.stderr.decode('utf-8')}")
print(
f"Warning: Some files could not be reverted. Error: {revert_process.stderr.decode('utf-8')}"
)
if verbose:
for file in files:
print(f"Reverted {file}")
def add_untracked(verbose = False):
def add_untracked(verbose=False):
"""Add untracked .po files to svn"""
diff_process = subprocess.run(["svn", "st", "binaries"], capture_output=True)
if diff_process.stderr != b'':
if diff_process.stderr != b"":
print(f"Error running svn st: {diff_process.stderr.decode('utf-8')}. Exiting.")
return
for line in diff_process.stdout.decode('utf-8').split('\n'):
for line in diff_process.stdout.decode("utf-8").split("\n"):
if not line.startswith("?"):
continue
# Ignore non PO files. This is important so that the translator credits
@ -100,16 +111,17 @@ def add_untracked(verbose = False):
if not file.endswith(".po") and not file.endswith(".pot"):
continue
add_process = subprocess.run(["svn", "add", file, "--parents"], capture_output=True)
if add_process.stderr != b'':
if add_process.stderr != b"":
print(f"Warning: file {file} could not be added.")
if verbose:
print(f"Added {file}")
if __name__ == '__main__':
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", help="Print reverted files.", action='store_true')
parser.add_argument("--verbose", help="Print reverted files.", action="store_true")
args = parser.parse_args()
need_revert = check_diff(get_diff(), args.verbose)
revert_files(need_revert, args.verbose)

View File

@ -16,7 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import sys, os, re, multiprocessing
import sys
import os
import re
import multiprocessing
from i18n_helper import l10nFolderName, projectRootDirectory
from i18n_helper.catalog import Catalog
@ -27,14 +30,17 @@ VERBOSE = 0
class MessageChecker:
"""Checks all messages in a catalog against a regex."""
def __init__(self, human_name, regex):
self.regex = re.compile(regex, re.IGNORECASE)
self.human_name = human_name
def check(self, inputFilePath, templateMessage, translatedCatalogs):
patterns = set(self.regex.findall(
templateMessage.id[0] if templateMessage.pluralizable else templateMessage.id
))
patterns = set(
self.regex.findall(
templateMessage.id[0] if templateMessage.pluralizable else templateMessage.id
)
)
# As a sanity check, verify that the template message is coherent.
# Note that these tend to be false positives.
@ -42,23 +48,32 @@ class MessageChecker:
if templateMessage.pluralizable:
pluralUrls = set(self.regex.findall(templateMessage.id[1]))
if pluralUrls.difference(patterns):
print(f"{inputFilePath} - Different {self.human_name} in singular and plural source strings "
f"for '{templateMessage}' in '{inputFilePath}'")
print(
f"{inputFilePath} - Different {self.human_name} in singular and plural source strings "
f"for '{templateMessage}' in '{inputFilePath}'"
)
for translationCatalog in translatedCatalogs:
translationMessage = translationCatalog.get(
templateMessage.id, templateMessage.context)
templateMessage.id, templateMessage.context
)
if not translationMessage:
continue
translatedPatterns = set(self.regex.findall(
translationMessage.string[0] if translationMessage.pluralizable else translationMessage.string
))
translatedPatterns = set(
self.regex.findall(
translationMessage.string[0]
if translationMessage.pluralizable
else translationMessage.string
)
)
unknown_patterns = translatedPatterns.difference(patterns)
if unknown_patterns:
print(f'{inputFilePath} - {translationCatalog.locale}: '
f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns])} in the translation '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in patterns])}')
print(
f'{inputFilePath} - {translationCatalog.locale}: '
f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns])} in the translation '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in patterns])}'
)
if templateMessage.pluralizable and translationMessage.pluralizable:
for indx, val in enumerate(translationMessage.string):
@ -67,9 +82,12 @@ class MessageChecker:
translatedPatternsMulti = set(self.regex.findall(val))
unknown_patterns_multi = translatedPatternsMulti.difference(pluralUrls)
if unknown_patterns_multi:
print(f'{inputFilePath} - {translationCatalog.locale}: '
f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns_multi])} in the pluralised translation '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in pluralUrls])}')
print(
f'{inputFilePath} - {translationCatalog.locale}: '
f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns_multi])} in the pluralised translation '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in pluralUrls])}'
)
def check_translations(inputFilePath):
if VERBOSE:
@ -100,23 +118,29 @@ def check_translations(inputFilePath):
def main():
print("\n\tWARNING: Remember to regenerate the POT files with “updateTemplates.py” "
"before you run this script.\n\tPOT files are not in the repository.\n")
print(
"\n\tWARNING: Remember to regenerate the POT files with “updateTemplates.py” "
"before you run this script.\n\tPOT files are not in the repository.\n"
)
foundPots = 0
for root, folders, filenames in os.walk(projectRootDirectory):
for filename in filenames:
if len(filename) > 4 and filename[-4:] == ".pot" and os.path.basename(root) == l10nFolderName:
if (
len(filename) > 4
and filename[-4:] == ".pot"
and os.path.basename(root) == l10nFolderName
):
foundPots += 1
multiprocessing.Process(
target=check_translations,
args=(os.path.join(root, filename), )
target=check_translations, args=(os.path.join(root, filename),)
).start()
if foundPots == 0:
print(
"This script did not work because no '.pot' files were found. "
"Please run 'updateTemplates.py' to generate the '.pot' files, "
"and run 'pullTranslations.py' to pull the latest translations from Transifex. "
"Then you can run this script to check for spam in translations.")
"Then you can run this script to check for spam in translations."
)
if __name__ == "__main__":

View File

@ -26,10 +26,15 @@ However that needs to be fixed on the transifex side, see rP25896. For now
strip the e-mails using this script.
"""
import sys, os, glob, re, fileinput
import sys
import os
import glob
import re
import fileinput
from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory
def main():
translatorMatch = re.compile(r"^(#\s+[^,<]*)\s+<.*>(.*)")
lastTranslatorMatch = re.compile(r"^(\"Last-Translator:[^,<]*)\s+<.*>(.*)")
@ -43,7 +48,9 @@ def main():
for file in files:
usernames = []
reached = False
for line in fileinput.input(file.replace("\\", "/"), inplace=True, encoding="utf-8"):
for line in fileinput.input(
file.replace("\\", "/"), inplace=True, encoding="utf-8"
):
if reached:
if line == "# \n":
line = ""
@ -61,5 +68,6 @@ def main():
reached = True
sys.stdout.write(line)
if __name__ == "__main__":
main()

View File

@ -29,7 +29,9 @@ Translatable strings will be extracted from the generated file, so this should b
once before updateTemplates.py.
"""
import json, os, re
import json
import os
import re
from collections import defaultdict
from pathlib import Path
@ -44,13 +46,23 @@ for root, folders, filenames in os.walk(projectRootDirectory):
if os.path.exists(os.path.join(root, folder, transifexClientFolder)):
poLocations.append(os.path.join(root, folder))
creditsLocation = os.path.join(projectRootDirectory, 'binaries', 'data', 'mods', 'public', 'gui', 'credits', 'texts', 'translators.json')
creditsLocation = os.path.join(
projectRootDirectory,
"binaries",
"data",
"mods",
"public",
"gui",
"credits",
"texts",
"translators.json",
)
# This dictionary will hold creditors lists for each language, indexed by code
langsLists = defaultdict(list)
# Create the new JSON data
newJSONData = {'Title': 'Translators', 'Content': []}
newJSONData = {"Title": "Translators", "Content": []}
# Now go through the list of languages and search the .po files for people
@ -60,7 +72,7 @@ deletedUsernameMatch = re.compile(r"[0-9a-f]{32}(_[0-9a-f]{7})?")
# Search
for location in poLocations:
files = Path(location).glob('*.po')
files = Path(location).glob("*.po")
for file in files:
lang = file.stem.split(".")[0]
@ -69,7 +81,7 @@ for location in poLocations:
if lang == "debug" or lang == "long":
continue
with file.open(encoding='utf-8') as poFile:
with file.open(encoding="utf-8") as poFile:
reached = False
for line in poFile:
if reached:
@ -80,7 +92,7 @@ for location in poLocations:
username = m.group(1)
if not deletedUsernameMatch.fullmatch(username):
langsLists[lang].append(username)
if line.strip() == '# Translators:':
if line.strip() == "# Translators:":
reached = True
# Sort translator names and remove duplicates
@ -100,18 +112,18 @@ for langCode, langList in sorted(langsLists.items()):
try:
lang_name = Locale.parse(langCode).english_name
except UnknownLocaleError:
lang_name = Locale.parse('en').languages.get(langCode)
lang_name = Locale.parse("en").languages.get(langCode)
if not lang_name:
raise
translators = [{'name': name} for name in langList]
newJSONData['Content'].append({'LangName': lang_name, 'List': translators})
translators = [{"name": name} for name in langList]
newJSONData["Content"].append({"LangName": lang_name, "List": translators})
# Sort languages by their English names
newJSONData['Content'] = sorted(newJSONData['Content'], key=lambda x: x['LangName'])
newJSONData["Content"] = sorted(newJSONData["Content"], key=lambda x: x["LangName"])
# Save the JSON data to the credits file
creditsFile = open(creditsLocation, 'w', encoding='utf-8')
creditsFile = open(creditsLocation, "w", encoding="utf-8")
json.dump(newJSONData, creditsFile, indent=4)
creditsFile.close()

View File

@ -20,14 +20,17 @@
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs, re, os, sys
import codecs
import re
import os
import sys
import json as jsonParser
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from textwrap import dedent
def pathmatch(mask, path):
""" Matches paths to a mask, where the mask supports * and **.
"""Matches paths to a mask, where the mask supports * and **.
Paths use / as the separator
* matches a sequence of characters without /.
@ -45,13 +48,11 @@ def pathmatch(mask, path):
else:
p = p + re.escape(s[i])
p = p + "$"
return re.match(p, path) != None
return re.match(p, path) is not None
class Extractor(object):
def __init__(self, directoryPath, filemasks, options):
self.directoryPath = directoryPath
self.options = options
@ -62,9 +63,8 @@ class Extractor(object):
self.includeMasks = filemasks
self.excludeMasks = []
def run(self):
""" Extracts messages.
"""Extracts messages.
:return: An iterator over ``(message, plural, context, (location, pos), comment)`` tuples.
:rtype: ``iterator``
@ -73,12 +73,14 @@ class Extractor(object):
directoryAbsolutePath = os.path.abspath(self.directoryPath)
for root, folders, filenames in os.walk(directoryAbsolutePath):
for subdir in folders:
if subdir.startswith('.') or subdir.startswith('_'):
if subdir.startswith(".") or subdir.startswith("_"):
folders.remove(subdir)
folders.sort()
filenames.sort()
for filename in filenames:
filename = os.path.relpath(os.path.join(root, filename), self.directoryPath).replace(os.sep, '/')
filename = os.path.relpath(
os.path.join(root, filename), self.directoryPath
).replace(os.sep, "/")
for filemask in self.excludeMasks:
if pathmatch(filemask, filename):
break
@ -86,7 +88,13 @@ class Extractor(object):
for filemask in self.includeMasks:
if pathmatch(filemask, filename):
filepath = os.path.join(directoryAbsolutePath, filename)
for message, plural, context, position, comments in self.extractFromFile(filepath):
for (
message,
plural,
context,
position,
comments,
) in self.extractFromFile(filepath):
if empty_string_pattern.match(message):
continue
@ -94,9 +102,8 @@ class Extractor(object):
filename = "\u2068" + filename + "\u2069"
yield message, plural, context, (filename, position), comments
def extractFromFile(self, filepath):
""" Extracts messages from a specific file.
"""Extracts messages from a specific file.
:return: An iterator over ``(message, plural, context, position, comments)`` tuples.
:rtype: ``iterator``
@ -104,17 +111,17 @@ class Extractor(object):
pass
class javascript(Extractor):
""" Extract messages from JavaScript source code.
"""
"""Extract messages from JavaScript source code."""
empty_msgid_warning = ( '%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
'returns the header entry with meta information, not the empty string.' )
empty_msgid_warning = (
'%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
"returns the header entry with meta information, not the empty string."
)
def extractJavascriptFromFile(self, fileObject):
from babel.messages.jslexer import tokenize, unquote_string
funcname = message_lineno = None
messages = []
last_argument = None
@ -122,21 +129,21 @@ class javascript(Extractor):
concatenate_next = False
last_token = None
call_stack = -1
comment_tags = self.options.get('commentTags', [])
keywords = self.options.get('keywords', {}).keys()
comment_tags = self.options.get("commentTags", [])
keywords = self.options.get("keywords", {}).keys()
for token in tokenize(fileObject.read(), dotted=False):
if token.type == 'operator' and \
(token.value == '(' or (call_stack != -1 and \
(token.value == '[' or token.value == '{'))):
if token.type == "operator" and (
token.value == "("
or (call_stack != -1 and (token.value == "[" or token.value == "{"))
):
if funcname:
message_lineno = token.lineno
call_stack += 1
elif call_stack == -1 and token.type == 'linecomment':
elif call_stack == -1 and token.type == "linecomment":
value = token.value[2:].strip()
if translator_comments and \
translator_comments[-1][0] == token.lineno - 1:
if translator_comments and translator_comments[-1][0] == token.lineno - 1:
translator_comments.append((token.lineno, value))
continue
@ -145,7 +152,7 @@ class javascript(Extractor):
translator_comments.append((token.lineno, value.strip()))
break
elif token.type == 'multilinecomment':
elif token.type == "multilinecomment":
# only one multi-line comment may preceed a translation
translator_comments = []
value = token.value[2:-2].strip()
@ -154,14 +161,13 @@ class javascript(Extractor):
lines = value.splitlines()
if lines:
lines[0] = lines[0].strip()
lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
lines[1:] = dedent("\n".join(lines[1:])).splitlines()
for offset, line in enumerate(lines):
translator_comments.append((token.lineno + offset,
line))
translator_comments.append((token.lineno + offset, line))
break
elif funcname and call_stack == 0:
if token.type == 'operator' and token.value == ')':
if token.type == "operator" and token.value == ")":
if last_argument is not None:
messages.append(last_argument)
if len(messages) > 1:
@ -173,13 +179,16 @@ class javascript(Extractor):
# Comments don't apply unless they immediately precede the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
if translator_comments and translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
if messages is not None:
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
yield (
message_lineno,
funcname,
messages,
[comment[1] for comment in translator_comments],
)
funcname = message_lineno = last_argument = None
concatenate_next = False
@ -187,47 +196,54 @@ class javascript(Extractor):
messages = []
call_stack = -1
elif token.type == 'string':
elif token.type == "string":
new_value = unquote_string(token.value)
if concatenate_next:
last_argument = (last_argument or '') + new_value
last_argument = (last_argument or "") + new_value
concatenate_next = False
else:
last_argument = new_value
elif token.type == 'operator':
if token.value == ',':
elif token.type == "operator":
if token.value == ",":
if last_argument is not None:
messages.append(last_argument)
last_argument = None
else:
messages.append(None)
concatenate_next = False
elif token.value == '+':
elif token.value == "+":
concatenate_next = True
elif call_stack > 0 and token.type == 'operator' and \
(token.value == ')' or token.value == ']' or token.value == '}'):
elif (
call_stack > 0
and token.type == "operator"
and (token.value == ")" or token.value == "]" or token.value == "}")
):
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif call_stack == -1 and token.type == 'name' and \
token.value in keywords and \
(last_token is None or last_token.type != 'name' or
last_token.value != 'function'):
elif (
call_stack == -1
and token.type == "name"
and token.value in keywords
and (
last_token is None
or last_token.type != "name"
or last_token.value != "function"
)
):
funcname = token.value
last_token = token
def extractFromFile(self, filepath):
with codecs.open(filepath, 'r', encoding='utf-8-sig') as fileObject:
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject:
for lineno, funcname, messages, comments in self.extractJavascriptFromFile(fileObject):
if funcname:
spec = self.options.get('keywords', {})[funcname] or (1,)
spec = self.options.get("keywords", {})[funcname] or (1,)
else:
spec = (1,)
if not isinstance(messages, (list, tuple)):
@ -265,8 +281,10 @@ class javascript(Extractor):
first_msg_index = spec[0] - 1
if not messages[first_msg_index]:
# An empty string msgid isn't valid, emit a warning
where = '%s:%i' % (hasattr(fileObject, 'name') and \
fileObject.name or '(unknown)', lineno)
where = "%s:%i" % (
hasattr(fileObject, "name") and fileObject.name or "(unknown)",
lineno,
)
print(self.empty_msgid_warning % where, file=sys.stderr)
continue
@ -279,20 +297,17 @@ class javascript(Extractor):
yield message, plural, context, lineno, comments
class cpp(javascript):
""" Extract messages from C++ source code.
"""
"""Extract messages from C++ source code."""
pass
class txt(Extractor):
""" Extract messages from plain text files.
"""
"""Extract messages from plain text files."""
def extractFromFile(self, filepath):
with codecs.open(filepath, "r", encoding='utf-8-sig') as fileObject:
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject:
lineno = 0
for line in [line.strip("\n\r") for line in fileObject.readlines()]:
lineno += 1
@ -300,10 +315,8 @@ class txt(Extractor):
yield line, None, None, lineno, []
class json(Extractor):
""" Extract messages from JSON files.
"""
"""Extract messages from JSON files."""
def __init__(self, directoryPath=None, filemasks=[], options={}):
super(json, self).__init__(directoryPath, filemasks, options)
@ -318,7 +331,7 @@ class json(Extractor):
self.comments = self.options.get("comments", [])
def extractFromFile(self, filepath):
with codecs.open(filepath, "r", 'utf-8') as fileObject:
with codecs.open(filepath, "r", "utf-8") as fileObject:
for message, context in self.extractFromString(fileObject.read()):
yield message, None, context, None, self.comments
@ -326,14 +339,16 @@ class json(Extractor):
jsonDocument = jsonParser.loads(string)
if isinstance(jsonDocument, list):
for message, context in self.parseList(jsonDocument):
if message: # Skip empty strings.
if message: # Skip empty strings.
yield message, context
elif isinstance(jsonDocument, dict):
for message, context in self.parseDictionary(jsonDocument):
if message: # Skip empty strings.
if message: # Skip empty strings.
yield message, context
else:
raise Exception("Unexpected JSON document parent structure (not a list or a dictionary). You must extend the JSON extractor to support it.")
raise Exception(
"Unexpected JSON document parent structure (not a list or a dictionary). You must extend the JSON extractor to support it."
)
def parseList(self, itemsList):
index = 0
@ -356,8 +371,13 @@ class json(Extractor):
yield message, context
elif isinstance(dictionary[keyword], dict):
extract = None
if "extractFromInnerKeys" in self.keywords[keyword] and self.keywords[keyword]["extractFromInnerKeys"]:
for message, context in self.extractDictionaryInnerKeys(dictionary[keyword], keyword):
if (
"extractFromInnerKeys" in self.keywords[keyword]
and self.keywords[keyword]["extractFromInnerKeys"]
):
for message, context in self.extractDictionaryInnerKeys(
dictionary[keyword], keyword
):
yield message, context
else:
extract = self.extractDictionary(dictionary[keyword], keyword)
@ -386,7 +406,7 @@ class json(Extractor):
if isinstance(listItem, str):
yield self.extractString(listItem, keyword)
elif isinstance(listItem, dict):
extract = self.extractDictionary(dictionary[keyword], keyword)
extract = self.extractDictionary(listItem[keyword], keyword)
if extract:
yield extract
index += 1
@ -420,8 +440,7 @@ class json(Extractor):
class xml(Extractor):
""" Extract messages from XML files.
"""
"""Extract messages from XML files."""
def __init__(self, directoryPath, filemasks, options):
super(xml, self).__init__(directoryPath, filemasks, options)
@ -435,7 +454,8 @@ class xml(Extractor):
def extractFromFile(self, filepath):
from lxml import etree
with codecs.open(filepath, "r", encoding='utf-8-sig') as fileObject:
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject:
xmlDocument = etree.parse(fileObject)
for keyword in self.keywords:
for element in xmlDocument.iter(keyword):
@ -457,7 +477,9 @@ class xml(Extractor):
context = self.keywords[keyword]["customContext"]
if "comment" in element.attrib:
comment = element.get("comment")
comment = u" ".join(comment.split()) # Remove tabs, line breaks and unecessary spaces.
comment = " ".join(
comment.split()
) # Remove tabs, line breaks and unecessary spaces.
comments.append(comment)
if "splitOnWhitespace" in self.keywords[keyword]:
for splitText in element.text.split():
@ -470,21 +492,22 @@ class xml(Extractor):
# Hack from http://stackoverflow.com/a/2819788
class FakeSectionHeader(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[root]\n'
self.sechead = "[root]\n"
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else: return self.fp.readline()
try:
return self.sechead
finally:
self.sechead = None
else:
return self.fp.readline()
class ini(Extractor):
""" Extract messages from INI files.
"""
"""Extract messages from INI files."""
def __init__(self, directoryPath, filemasks, options):
super(ini, self).__init__(directoryPath, filemasks, options)
@ -492,6 +515,7 @@ class ini(Extractor):
def extractFromFile(self, filepath):
import ConfigParser
config = ConfigParser.RawConfigParser()
config.readfp(FakeSectionHeader(open(filepath)))
for keyword in self.keywords:

View File

@ -26,16 +26,16 @@ from i18n_helper.catalog import Catalog
from i18n_helper.globber import getCatalogs
DEBUG_PREFIX = 'X_X '
DEBUG_PREFIX = "X_X "
def generate_long_strings(root_path, input_file_name, output_file_name, languages=None):
"""
Generate the 'long strings' debug catalog.
This catalog contains the longest singular and plural string,
found amongst all translated languages or a filtered subset.
It can be used to check if GUI elements are large enough.
The catalog is long.*.po
Generate the 'long strings' debug catalog.
This catalog contains the longest singular and plural string,
found amongst all translated languages or a filtered subset.
It can be used to check if GUI elements are large enough.
The catalog is long.*.po
"""
print("Generating", output_file_name)
input_file_path = os.path.join(root_path, input_file_name)
@ -48,8 +48,11 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
# Fill catalog with English strings.
for message in template_catalog:
long_string_catalog.add(
id=message.id, string=message.id, context=message.context,
auto_comments=message.auto_comments)
id=message.id,
string=message.id,
context=message.context,
auto_comments=message.auto_comments,
)
# Load existing translation catalogs.
existing_translation_catalogs = getCatalogs(input_file_path, languages)
@ -58,18 +61,23 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
for translation_catalog in existing_translation_catalogs:
for long_string_catalog_message in long_string_catalog:
translation_message = translation_catalog.get(
long_string_catalog_message.id, long_string_catalog_message.context)
long_string_catalog_message.id, long_string_catalog_message.context
)
if not translation_message or not translation_message.string:
continue
if not long_string_catalog_message.pluralizable or not translation_message.pluralizable:
if (
not long_string_catalog_message.pluralizable
or not translation_message.pluralizable
):
if len(translation_message.string) > len(long_string_catalog_message.string):
long_string_catalog_message.string = translation_message.string
continue
longest_singular_string = translation_message.string[0]
longest_plural_string = translation_message.string[1 if len(
translation_message.string) > 1 else 0]
longest_plural_string = translation_message.string[
1 if len(translation_message.string) > 1 else 0
]
candidate_singular_string = long_string_catalog_message.string[0]
# There might be between 0 and infinite plural forms.
@ -88,17 +96,19 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
if changed:
long_string_catalog_message.string = [
longest_singular_string, longest_plural_string]
longest_singular_string,
longest_plural_string,
]
translation_message = long_string_catalog_message
long_string_catalog.writeTo(output_file_path)
def generate_debug(root_path, input_file_name, output_file_name):
"""
Generate a debug catalog to identify untranslated strings.
This prefixes all strings with DEBUG_PREFIX, to easily identify
untranslated strings while still making the game navigable.
The catalog is debug.*.po
Generate a debug catalog to identify untranslated strings.
This prefixes all strings with DEBUG_PREFIX, to easily identify
untranslated strings while still making the game navigable.
The catalog is debug.*.po
"""
print("Generating", output_file_name)
input_file_path = os.path.join(root_path, input_file_name)
@ -114,28 +124,34 @@ def generate_debug(root_path, input_file_name, output_file_name):
id=message.id,
string=(DEBUG_PREFIX + message.id[0],),
context=message.context,
auto_comments=message.auto_comments)
auto_comments=message.auto_comments,
)
else:
out_catalog.add(
id=message.id,
string=DEBUG_PREFIX + message.id,
context=message.context,
auto_comments=message.auto_comments)
auto_comments=message.auto_comments,
)
out_catalog.writeTo(output_file_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--debug",
help="Generate debug localisation to identify non-translated strings.",
action="store_true")
parser.add_argument("--long",
help="Generate 'long strings' localisation to identify GUI elements too small.",
action="store_true")
parser.add_argument("--languages",
nargs="+",
help="For long strings, restrict to these languages")
parser.add_argument(
"--debug",
help="Generate debug localisation to identify non-translated strings.",
action="store_true",
)
parser.add_argument(
"--long",
help="Generate 'long strings' localisation to identify GUI elements too small.",
action="store_true",
)
parser.add_argument(
"--languages", nargs="+", help="For long strings, restrict to these languages"
)
args = parser.parse_args()
if not args.debug and not args.long:
@ -145,24 +161,28 @@ def main():
found_pot_files = 0
for root, _, filenames in os.walk(projectRootDirectory):
for filename in filenames:
if len(filename) > 4 and filename[-4:] == ".pot" and os.path.basename(root) == l10nFolderName:
if (
len(filename) > 4
and filename[-4:] == ".pot"
and os.path.basename(root) == l10nFolderName
):
found_pot_files += 1
if args.debug:
multiprocessing.Process(
target=generate_debug,
args=(root, filename, "debug." + filename[:-1])
target=generate_debug, args=(root, filename, "debug." + filename[:-1])
).start()
if args.long:
multiprocessing.Process(
target=generate_long_strings,
args=(root, filename, "long." +
filename[:-1], args.languages)
args=(root, filename, "long." + filename[:-1], args.languages),
).start()
if found_pot_files == 0:
print("This script did not work because no ‘.pot’ files were found. "
"Please, run ‘updateTemplates.py’ to generate the ‘.pot’ files, and run ‘pullTranslations.py’ to pull the latest translations from Transifex. "
"Then you can run this script to generate ‘.po’ files with obvious debug strings.")
print(
"This script did not work because no ‘.pot’ files were found. "
"Please, run ‘updateTemplates.py’ to generate the ‘.pot’ files, and run ‘pullTranslations.py’ to pull the latest translations from Transifex. "
"Then you can run this script to generate ‘.po’ files with obvious debug strings."
)
if __name__ == "__main__":

View File

@ -3,4 +3,6 @@ import os
l10nFolderName = "l10n"
transifexClientFolder = ".tx"
l10nToolsDirectory = os.path.dirname(os.path.realpath(__file__))
projectRootDirectory = os.path.abspath(os.path.join(l10nToolsDirectory, os.pardir, os.pardir, os.pardir, os.pardir))
projectRootDirectory = os.path.abspath(
os.path.join(l10nToolsDirectory, os.pardir, os.pardir, os.pardir, os.pardir)
)

View File

@ -1,14 +1,19 @@
"""Wrapper around babel Catalog / .po handling"""
from datetime import datetime
from babel.messages.catalog import Catalog as BabelCatalog
from babel.messages.pofile import read_po, write_po
class Catalog(BabelCatalog):
"""Wraps a BabelCatalog for convenience."""
def __init__(self, *args, project=None, copyright_holder=None, **other_kwargs):
date = datetime.now()
super().__init__(*args, header_comment=(
super().__init__(
*args,
header_comment=(
f"# Translation template for {project}.\n"
f"# Copyright (C) {date.year} {copyright_holder}\n"
f"# This file is distributed under the same license as the {project} project."
@ -18,7 +23,8 @@ class Catalog(BabelCatalog):
charset="utf-8",
creation_date=date,
revision_date=date,
**other_kwargs)
**other_kwargs,
)
self._project = project
@BabelCatalog.mime_headers.getter
@ -31,14 +37,15 @@ class Catalog(BabelCatalog):
"MIME-Version",
"Content-Type",
"Content-Transfer-Encoding",
"Plural-Forms"}:
"Plural-Forms",
}:
headers.append((name, value))
return [('Project-Id-Version', self._project)] + headers
return [("Project-Id-Version", self._project)] + headers
@staticmethod
def readFrom(file_path, locale = None):
return read_po(open(file_path, "r+",encoding="utf-8"), locale=locale)
def readFrom(file_path, locale=None):
return read_po(open(file_path, "r+", encoding="utf-8"), locale=locale)
def writeTo(self, file_path):
return write_po(

View File

@ -1,10 +1,12 @@
"""Utils to list .po"""
import os
from typing import List
from i18n_helper.catalog import Catalog
def getCatalogs(inputFilePath, filters : List[str] = None) -> List[Catalog]:
def getCatalogs(inputFilePath, filters: List[str] = None) -> List[Catalog]:
"""Returns a list of "real" catalogs (.po) in the given folder."""
existingTranslationCatalogs = []
l10nFolderPath = os.path.dirname(inputFilePath)
@ -17,6 +19,9 @@ def getCatalogs(inputFilePath, filters : List[str] = None) -> List[Catalog]:
continue
if not filters or filename.split(".")[0] in filters:
existingTranslationCatalogs.append(
Catalog.readFrom(os.path.join(l10nFolderPath, filename), locale=filename.split('.')[0]))
Catalog.readFrom(
os.path.join(l10nFolderPath, filename), locale=filename.split(".")[0]
)
)
return existingTranslationCatalogs

View File

@ -21,6 +21,7 @@ import subprocess
from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory
def main():
for root, folders, _ in os.walk(projectRootDirectory):
for folder in folders:

View File

@ -1,11 +1,9 @@
import io
import pytest
from checkDiff import check_diff
from unittest import mock
from types import SimpleNamespace
PATCHES = [
"""
"""
Index: binaries/data/l10n/en_GB.engine.po
===================================================================
--- binaries/data/l10n/en_GB.engine.po
@ -21,7 +19,7 @@ Index: binaries/data/l10n/en_GB.engine.po
msgid "The incoming stream version is unsupported"
""",
"""
"""
Index: binaries/data/l10n/en_GB.engine.po
===================================================================
--- binaries/data/l10n/en_GB.engine.po
@ -33,7 +31,7 @@ Index: binaries/data/l10n/en_GB.engine.po
msgid "Stream error"
msgstr "Stream error"
""",
"""
"""
Index: binaries/data/l10n/en_GB.engine.po
===================================================================
--- binaries/data/l10n/en_GB.engine.po
@ -65,7 +63,7 @@ Index: binaries/data/l10n/en_GB_3.engine.po
msgid "Stream error"
msgstr "Stream error"
""",
"""
"""
Index: binaries/data/l10n/bar.engine.po
===================================================================
--- binaries/data/l10n/bar.engine.po
@ -86,16 +84,17 @@ Index: binaries/data/l10n/bar.engine.po
"Language-Team: Bavarian (http://www.transifex.com/wildfire-games/0ad/language/bar/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"""
""",
]
PATCHES_EXPECT_REVERT = [
set(),
{"binaries/data/l10n/en_GB.engine.po"},
{"binaries/data/l10n/en_GB.engine.po", "binaries/data/l10n/en_GB_3.engine.po"},
{"binaries/data/l10n/bar.engine.po"}
{"binaries/data/l10n/bar.engine.po"},
]
@pytest.fixture(params=zip(PATCHES, PATCHES_EXPECT_REVERT))
def patch(request):
return [io.StringIO(request.param[0]), request.param[1]]

View File

@ -16,42 +16,56 @@
# You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import json, os
import json
import os
import multiprocessing
from importlib import import_module
from lxml import etree
from i18n_helper import l10nFolderName, projectRootDirectory
from i18n_helper.catalog import Catalog
from extractors import extractors
messagesFilename = "messages.json"
def warnAboutUntouchedMods():
"""
Warn about mods that are not properly configured to get their messages extracted.
Warn about mods that are not properly configured to get their messages extracted.
"""
modsRootFolder = os.path.join(projectRootDirectory, "binaries", "data", "mods")
untouchedMods = {}
for modFolder in os.listdir(modsRootFolder):
if modFolder[0] != "_" and modFolder[0] != '.':
if modFolder[0] != "_" and modFolder[0] != ".":
if not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName)):
untouchedMods[modFolder] = "There is no '{folderName}' folder in the root folder of this mod.".format(folderName=l10nFolderName)
elif not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName, messagesFilename)):
untouchedMods[modFolder] = "There is no '{filename}' file within the '{folderName}' folder in the root folder of this mod.".format(folderName=l10nFolderName, filename=messagesFilename)
untouchedMods[modFolder] = (
"There is no '{folderName}' folder in the root folder of this mod.".format(
folderName=l10nFolderName
)
)
elif not os.path.exists(
os.path.join(modsRootFolder, modFolder, l10nFolderName, messagesFilename)
):
untouchedMods[modFolder] = (
"There is no '{filename}' file within the '{folderName}' folder in the root folder of this mod.".format(
folderName=l10nFolderName, filename=messagesFilename
)
)
if untouchedMods:
print(""
"Warning: No messages were extracted from the following mods:"
"")
print("" "Warning: No messages were extracted from the following mods:" "")
for mod in untouchedMods:
print("{modName}: {warningMessage}".format(modName=mod, warningMessage=untouchedMods[mod]))
print(""
print(
"{modName}: {warningMessage}".format(
modName=mod, warningMessage=untouchedMods[mod]
)
)
print(
""
f"For this script to extract messages from a mod folder, this mod folder must contain a '{l10nFolderName}' "
f"folder, and this folder must contain a '{messagesFilename}' file that describes how to extract messages for the "
f"mod. See the folder of the main mod ('public') for an example, and see the documentation for more "
f"information."
)
)
def generatePOT(templateSettings, rootPath):
if "skip" in templateSettings and templateSettings["skip"] == "yes":
@ -64,7 +78,7 @@ def generatePOT(templateSettings, rootPath):
template = Catalog(
project=templateSettings["project"],
copyright_holder=templateSettings["copyrightHolder"],
locale='en',
locale="en",
)
for rule in templateSettings["rules"]:
@ -72,7 +86,7 @@ def generatePOT(templateSettings, rootPath):
return
options = rule.get("options", {})
extractorClass = getattr(import_module("extractors.extractors"), rule['extractor'])
extractorClass = getattr(import_module("extractors.extractors"), rule["extractor"])
extractor = extractorClass(inputRootPath, rule["filemasks"], options)
formatFlag = None
if "format" in options:
@ -84,31 +98,34 @@ def generatePOT(templateSettings, rootPath):
id=message_id,
context=context,
auto_comments=comments,
flags=[formatFlag] if formatFlag and message.find("%") != -1 else []
flags=[formatFlag] if formatFlag and message.find("%") != -1 else [],
)
saved_message.locations.append(location)
saved_message.flags.discard('python-format')
saved_message.flags.discard("python-format")
template.writeTo(os.path.join(rootPath, templateSettings["output"]))
print(u"Generated \"{}\" with {} messages.".format(templateSettings["output"], len(template)))
print('Generated "{}" with {} messages.'.format(templateSettings["output"], len(template)))
def generateTemplatesForMessagesFile(messagesFilePath):
with open(messagesFilePath, 'r') as fileObject:
with open(messagesFilePath, "r") as fileObject:
settings = json.load(fileObject)
for templateSettings in settings:
multiprocessing.Process(
target=generatePOT,
args=(templateSettings, os.path.dirname(messagesFilePath))
target=generatePOT, args=(templateSettings, os.path.dirname(messagesFilePath))
).start()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--scandir", help="Directory to start scanning for l10n folders in. "
"Type '.' for current working directory")
parser.add_argument(
"--scandir",
help="Directory to start scanning for l10n folders in. "
"Type '.' for current working directory",
)
args = parser.parse_args()
for root, folders, filenames in os.walk(args.scandir or projectRootDirectory):
for folder in folders:

View File

@ -23,118 +23,148 @@
# THE SOFTWARE.
import argparse
import io
import os
import struct
import sys
parser = argparse.ArgumentParser(description="Convert maps compatible with 0 A.D. version Alpha XVIII (A18) to maps compatible with version Alpha XIX (A19), or the other way around.")
parser = argparse.ArgumentParser(
description="Convert maps compatible with 0 A.D. version Alpha XVIII (A18) to maps compatible with version Alpha XIX (A19), or the other way around."
)
parser.add_argument("--reverse", action="store_true", help="Make an A19 map compatible with A18 (note that conversion will fail if mountains are too high)")
parser.add_argument("--no-version-bump", action="store_true", help="Don't change the version number of the map")
parser.add_argument("--no-color-spelling", action="store_true", help="Don't change the spelling of color and colour")
parser.add_argument(
"--reverse",
action="store_true",
help="Make an A19 map compatible with A18 (note that conversion will fail if mountains are too high)",
)
parser.add_argument(
"--no-version-bump", action="store_true", help="Don't change the version number of the map"
)
parser.add_argument(
"--no-color-spelling",
action="store_true",
help="Don't change the spelling of color and colour",
)
parser.add_argument("--no-height-change", action="store_true", help="Don't change the heightmap")
parser.add_argument("files", nargs="+", help="XML file to process (use wildcards '*' to select multiple files)")
parser.add_argument(
"files", nargs="+", help="XML file to process (use wildcards '*' to select multiple files)"
)
args = parser.parse_args()
HEIGHTMAP_BIT_SHIFT = 3
for xmlFile in args.files:
pmpFile = xmlFile[:-3] + "pmp"
pmpFile = xmlFile[:-3] + "pmp"
print("Processing " + xmlFile + " ...")
print("Processing " + xmlFile + " ...")
if os.path.isfile(pmpFile):
with open(pmpFile, "rb") as f1, open(pmpFile + "~", "wb") as f2:
# 4 bytes PSMP to start the file
f2.write(f1.read(4))
if os.path.isfile(pmpFile):
with open(pmpFile, "rb") as f1, open(pmpFile + "~", "wb") as f2:
# 4 bytes PSMP to start the file
f2.write(f1.read(4))
# 4 bytes to encode the version of the file format
version = struct.unpack("<I", f1.read(4))[0]
if args.no_version_bump:
f2.write(struct.pack("<I", version))
else:
if args.reverse:
if version != 6:
print("Warning: File " + pmpFile + " was not at version 6, while a negative version bump was requested.\nABORTING ...")
continue
f2.write(struct.pack("<I", version-1))
else:
if version != 5:
print("Warning: File " + pmpFile + " was not at version 5, while a version bump was requested.\nABORTING ...")
continue
f2.write(struct.pack("<I", version+1))
# 4 bytes to encode the version of the file format
version = struct.unpack("<I", f1.read(4))[0]
if args.no_version_bump:
f2.write(struct.pack("<I", version))
else:
if args.reverse:
if version != 6:
print(
"Warning: File "
+ pmpFile
+ " was not at version 6, while a negative version bump was requested.\nABORTING ..."
)
continue
f2.write(struct.pack("<I", version - 1))
else:
if version != 5:
print(
"Warning: File "
+ pmpFile
+ " was not at version 5, while a version bump was requested.\nABORTING ..."
)
continue
f2.write(struct.pack("<I", version + 1))
# 4 bytes a for file size (which shouldn't change)
f2.write(f1.read(4))
# 4 bytes a for file size (which shouldn't change)
f2.write(f1.read(4))
# 4 bytes to encode the map size
map_size = struct.unpack("<I", f1.read(4))[0]
f2.write(struct.pack("<I", map_size))
# 4 bytes to encode the map size
map_size = struct.unpack("<I", f1.read(4))[0]
f2.write(struct.pack("<I", map_size))
# half all heights using the shift '>>' operator
if args.no_height_change:
def height_transform(h):
return h
else:
if args.reverse:
def height_transform(h):
return h << HEIGHTMAP_BIT_SHIFT
else:
def height_transform(h):
return h >> HEIGHTMAP_BIT_SHIFT
for i in range(0, (map_size*16+1)*(map_size*16+1)):
height = struct.unpack("<H", f1.read(2))[0]
f2.write(struct.pack("<H", height_transform(height)))
# copy the rest of the file
byte = f1.read(1)
while byte != b"":
f2.write(byte)
byte = f1.read(1)
# half all heights using the shift '>>' operator
if args.no_height_change:
f2.close()
f1.close()
def height_transform(h):
return h
else:
if args.reverse:
# replace the old file, comment to see both files
os.remove(pmpFile)
os.rename(pmpFile + "~", pmpFile)
def height_transform(h):
return h << HEIGHTMAP_BIT_SHIFT
else:
def height_transform(h):
return h >> HEIGHTMAP_BIT_SHIFT
if os.path.isfile(xmlFile):
with open(xmlFile, "r") as f1, open(xmlFile + "~", "w") as f2:
data = f1.read()
for i in range(0, (map_size * 16 + 1) * (map_size * 16 + 1)):
height = struct.unpack("<H", f1.read(2))[0]
f2.write(struct.pack("<H", height_transform(height)))
# bump version number (rely on how Atlas formats the XML)
if not args.no_version_bump:
if args.reverse:
if data.find('<Scenario version="6">') == -1:
print("Warning: File " + xmlFile + " was not at version 6, while a negative version bump was requested.\nABORTING ...")
sys.exit()
else:
data = data.replace('<Scenario version="6">', '<Scenario version="5">')
else:
if data.find('<Scenario version="5">') == -1:
print("Warning: File " + xmlFile + " was not at version 5, while a version bump was requested.\nABORTING ...")
sys.exit()
else:
data = data.replace('<Scenario version="5">', '<Scenario version="6">')
# copy the rest of the file
byte = f1.read(1)
while byte != b"":
f2.write(byte)
byte = f1.read(1)
# transform the color keys
if not args.no_color_spelling:
if args.reverse:
data = data.replace("color", "colour").replace("Color", "Colour")
else:
data = data.replace("colour", "color").replace("Colour", "Color")
f2.write(data)
f1.close()
f2.close()
f2.close()
f1.close()
# replace the old file, comment to see both files
os.remove(xmlFile)
os.rename(xmlFile + "~", xmlFile)
# replace the old file, comment to see both files
os.remove(pmpFile)
os.rename(pmpFile + "~", pmpFile)
if os.path.isfile(xmlFile):
with open(xmlFile, "r") as f1, open(xmlFile + "~", "w") as f2:
data = f1.read()
# bump version number (rely on how Atlas formats the XML)
if not args.no_version_bump:
if args.reverse:
if data.find('<Scenario version="6">') == -1:
print(
"Warning: File "
+ xmlFile
+ " was not at version 6, while a negative version bump was requested.\nABORTING ..."
)
sys.exit()
else:
data = data.replace('<Scenario version="6">', '<Scenario version="5">')
else:
if data.find('<Scenario version="5">') == -1:
print(
"Warning: File "
+ xmlFile
+ " was not at version 5, while a version bump was requested.\nABORTING ..."
)
sys.exit()
else:
data = data.replace('<Scenario version="5">', '<Scenario version="6">')
# transform the color keys
if not args.no_color_spelling:
if args.reverse:
data = data.replace("color", "colour").replace("Color", "Colour")
else:
data = data.replace("colour", "color").replace("Colour", "Color")
f2.write(data)
f1.close()
f2.close()
# replace the old file, comment to see both files
os.remove(xmlFile)
os.rename(xmlFile + "~", xmlFile)

View File

@ -4,31 +4,36 @@ import zero_ad
# First, we will define some helper functions we will use later.
import math
def dist (p1, p2):
def dist(p1, p2):
return math.sqrt(sum((math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2))))
def center(units):
sum_position = map(sum, zip(*map(lambda u: u.position(), units)))
return [x/len(units) for x in sum_position]
return [x / len(units) for x in sum_position]
def closest(units, position):
dists = (dist(unit.position(), position) for unit in units)
index = 0
min_dist = next(dists)
for (i, d) in enumerate(dists):
for i, d in enumerate(dists):
if d < min_dist:
index = i
min_dist = d
return units[index]
# Connect to a 0 AD game server listening at localhost:6000
game = zero_ad.ZeroAD('http://localhost:6000')
game = zero_ad.ZeroAD("http://localhost:6000")
# Load the Arcadia map
samples_dir = path.dirname(path.realpath(__file__))
scenario_config_path = path.join(samples_dir, 'arcadia.json')
with open(scenario_config_path, 'r') as f:
scenario_config_path = path.join(samples_dir, "arcadia.json")
with open(scenario_config_path, "r") as f:
arcadia_config = f.read()
state = game.reset(arcadia_config)
@ -37,15 +42,15 @@ state = game.reset(arcadia_config)
state = game.step()
# Units can be queried from the game state
citizen_soldiers = state.units(owner=1, type='infantry')
citizen_soldiers = state.units(owner=1, type="infantry")
# (including gaia units like trees or other resources)
nearby_tree = closest(state.units(owner=0, type='tree'), center(citizen_soldiers))
nearby_tree = closest(state.units(owner=0, type="tree"), center(citizen_soldiers))
# Action commands can be created using zero_ad.actions
collect_wood = zero_ad.actions.gather(citizen_soldiers, nearby_tree)
female_citizens = state.units(owner=1, type='female_citizen')
house_tpl = 'structures/spart/house'
female_citizens = state.units(owner=1, type="female_citizen")
house_tpl = "structures/spart/house"
x = 680
z = 640
build_house = zero_ad.actions.construct(female_citizens, house_tpl, x, z, autocontinue=True)
@ -58,20 +63,24 @@ female_id = female_citizens[0].id()
female_citizen = state.unit(female_id)
# A variety of unit information can be queried from the unit:
print('female citizen\'s max health is', female_citizen.max_health())
print("female citizen's max health is", female_citizen.max_health())
# Raw data for units and game states are available via the data attribute
print(female_citizen.data)
# Units can be built using the "train action"
civic_center = state.units(owner=1, type="civil_centre")[0]
spearman_type = 'units/spart/infantry_spearman_b'
spearman_type = "units/spart/infantry_spearman_b"
train_spearmen = zero_ad.actions.train([civic_center], spearman_type)
state = game.step([train_spearmen])
# Let's step the engine until the house has been built
is_unit_busy = lambda state, unit_id: len(state.unit(unit_id).data['unitAIOrderData']) > 0
def is_unit_busy(state, unit_id):
return len(state.unit(unit_id).data["unitAIOrderData"]) > 0
while is_unit_busy(state, female_id):
state = game.step()
@ -85,14 +94,16 @@ for _ in range(150):
state = game.step()
# Let's attack with our entire military
state = game.step([zero_ad.actions.chat('An attack is coming!')])
state = game.step([zero_ad.actions.chat("An attack is coming!")])
while len(state.units(owner=2, type='unit')) > 0:
attack_units = [ unit for unit in state.units(owner=1, type='unit') if 'female' not in unit.type() ]
target = closest(state.units(owner=2, type='unit'), center(attack_units))
while len(state.units(owner=2, type="unit")) > 0:
attack_units = [
unit for unit in state.units(owner=1, type="unit") if "female" not in unit.type()
]
target = closest(state.units(owner=2, type="unit"), center(attack_units))
state = game.step([zero_ad.actions.attack(attack_units, target)])
while state.unit(target.id()):
state = game.step()
game.step([zero_ad.actions.chat('The enemies have been vanquished. Our home is safe again.')])
game.step([zero_ad.actions.chat("The enemies have been vanquished. Our home is safe again.")])

View File

@ -1,13 +1,14 @@
import os
from setuptools import setup
setup(name='zero_ad',
version='0.0.1',
description='Python client for 0 AD',
url='https://code.wildfiregames.com',
author='Brian Broll',
author_email='brian.broll@gmail.com',
install_requires=[],
license='MIT',
packages=['zero_ad'],
zip_safe=False)
setup(
name="zero_ad",
version="0.0.1",
description="Python client for 0 AD",
url="https://code.wildfiregames.com",
author="Brian Broll",
author_email="brian.broll@gmail.com",
install_requires=[],
license="MIT",
packages=["zero_ad"],
zip_safe=False,
)

View File

@ -1,35 +1,38 @@
import zero_ad
import json
import math
from os import path
game = zero_ad.ZeroAD('http://localhost:6000')
game = zero_ad.ZeroAD("http://localhost:6000")
scriptdir = path.dirname(path.realpath(__file__))
with open(path.join(scriptdir, '..', 'samples', 'arcadia.json'), 'r') as f:
with open(path.join(scriptdir, "..", "samples", "arcadia.json"), "r") as f:
config = f.read()
def dist (p1, p2):
def dist(p1, p2):
return math.sqrt(sum((math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2))))
def center(units):
sum_position = map(sum, zip(*map(lambda u: u.position(), units)))
return [x/len(units) for x in sum_position]
return [x / len(units) for x in sum_position]
def closest(units, position):
dists = (dist(unit.position(), position) for unit in units)
index = 0
min_dist = next(dists)
for (i, d) in enumerate(dists):
for i, d in enumerate(dists):
if d < min_dist:
index = i
min_dist = d
return units[index]
def test_construct():
state = game.reset(config)
female_citizens = state.units(owner=1, type='female_citizen')
house_tpl = 'structures/spart/house'
female_citizens = state.units(owner=1, type="female_citizen")
house_tpl = "structures/spart/house"
house_count = len(state.units(owner=1, type=house_tpl))
x = 680
z = 640
@ -39,21 +42,23 @@ def test_construct():
while len(state.units(owner=1, type=house_tpl)) == house_count:
state = game.step()
def test_gather():
state = game.reset(config)
female_citizen = state.units(owner=1, type='female_citizen')[0]
trees = state.units(owner=0, type='tree')
nearby_tree = closest(state.units(owner=0, type='tree'), female_citizen.position())
female_citizen = state.units(owner=1, type="female_citizen")[0]
state.units(owner=0, type="tree")
nearby_tree = closest(state.units(owner=0, type="tree"), female_citizen.position())
collect_wood = zero_ad.actions.gather([female_citizen], nearby_tree)
state = game.step([collect_wood])
while len(state.unit(female_citizen.id()).data['resourceCarrying']) == 0:
while len(state.unit(female_citizen.id()).data["resourceCarrying"]) == 0:
state = game.step()
def test_train():
state = game.reset(config)
civic_centers = state.units(owner=1, type="civil_centre")
spearman_type = 'units/spart/infantry_spearman_b'
spearman_type = "units/spart/infantry_spearman_b"
spearman_count = len(state.units(owner=1, type=spearman_type))
train_spearmen = zero_ad.actions.train(civic_centers, spearman_type)
@ -61,9 +66,10 @@ def test_train():
while len(state.units(owner=1, type=spearman_type)) == spearman_count:
state = game.step()
def test_walk():
state = game.reset(config)
female_citizens = state.units(owner=1, type='female_citizen')
female_citizens = state.units(owner=1, type="female_citizen")
x = 680
z = 640
initial_distance = dist(center(female_citizens), [x, z])
@ -73,13 +79,14 @@ def test_walk():
distance = initial_distance
while distance >= initial_distance:
state = game.step()
female_citizens = state.units(owner=1, type='female_citizen')
female_citizens = state.units(owner=1, type="female_citizen")
distance = dist(center(female_citizens), [x, z])
def test_attack():
state = game.reset(config)
unit = state.units(owner=1, type='cavalry')[0]
target = state.units(owner=2, type='female_citizen')[0]
unit = state.units(owner=1, type="cavalry")[0]
target = state.units(owner=2, type="female_citizen")[0]
initial_health_target = target.health()
initial_health_unit = unit.health()
@ -87,11 +94,13 @@ def test_attack():
attack = zero_ad.actions.attack([unit], target)
state = game.step([attack])
while (state.unit(target.id()).health() >= initial_health_target
) and (state.unit(unit.id()).health() >= initial_health_unit):
while (state.unit(target.id()).health() >= initial_health_target) and (
state.unit(unit.id()).health() >= initial_health_unit
):
state = game.step()
def test_chat():
state = game.reset(config)
chat = zero_ad.actions.chat('hello world!!')
state = game.step([chat])
game.reset(config)
chat = zero_ad.actions.chat("hello world!!")
game.step([chat])

View File

@ -1,44 +1,48 @@
import zero_ad
import json
import math
from os import path
game = zero_ad.ZeroAD('http://localhost:6000')
game = zero_ad.ZeroAD("http://localhost:6000")
scriptdir = path.dirname(path.realpath(__file__))
with open(path.join(scriptdir, '..', 'samples', 'arcadia.json'), 'r') as f:
with open(path.join(scriptdir, "..", "samples", "arcadia.json"), "r") as f:
config = f.read()
with open(path.join(scriptdir, 'fastactions.js'), 'r') as f:
with open(path.join(scriptdir, "fastactions.js"), "r") as f:
fastactions = f.read()
def test_return_object():
state = game.reset(config)
game.reset(config)
result = game.evaluate('({"hello": "world"})')
assert type(result) is dict
assert result['hello'] == 'world'
assert result["hello"] == "world"
def test_return_null():
result = game.evaluate('null')
assert result == None
result = game.evaluate("null")
assert result is None
def test_return_string():
state = game.reset(config)
game.reset(config)
result = game.evaluate('"cat"')
assert result == 'cat'
assert result == "cat"
def test_fastactions():
state = game.reset(config)
game.evaluate(fastactions)
female_citizens = state.units(owner=1, type='female_citizen')
house_tpl = 'structures/spart/house'
house_count = len(state.units(owner=1, type=house_tpl))
female_citizens = state.units(owner=1, type="female_citizen")
house_tpl = "structures/spart/house"
len(state.units(owner=1, type=house_tpl))
x = 680
z = 640
build_house = zero_ad.actions.construct(female_citizens, house_tpl, x, z, autocontinue=True)
# Check that they start building the house
state = game.step([build_house])
step_count = 0
new_house = lambda _=None: state.units(owner=1, type=house_tpl)[0]
def new_house(_=None):
return state.units(owner=1, type=house_tpl)[0]
initial_health = new_house().health(ratio=True)
while new_house().health(ratio=True) == initial_health:
state = game.step()

View File

@ -1,4 +1,5 @@
from . import actions
from . import actions # noqa: F401
from . import environment
ZeroAD = environment.ZeroAD
GameState = environment.GameState

View File

@ -1,63 +1,57 @@
def construct(units, template, x, z, angle=0, autorepair=True, autocontinue=True, queued=False):
unit_ids = [ unit.id() for unit in units ]
unit_ids = [unit.id() for unit in units]
return {
'type': 'construct',
'entities': unit_ids,
'template': template,
'x': x,
'z': z,
'angle': angle,
'autorepair': autorepair,
'autocontinue': autocontinue,
'queued': queued,
"type": "construct",
"entities": unit_ids,
"template": template,
"x": x,
"z": z,
"angle": angle,
"autorepair": autorepair,
"autocontinue": autocontinue,
"queued": queued,
}
def gather(units, target, queued=False):
unit_ids = [ unit.id() for unit in units ]
unit_ids = [unit.id() for unit in units]
return {
'type': 'gather',
'entities': unit_ids,
'target': target.id(),
'queued': queued,
"type": "gather",
"entities": unit_ids,
"target": target.id(),
"queued": queued,
}
def train(entities, unit_type, count=1):
entity_ids = [ unit.id() for unit in entities ]
entity_ids = [unit.id() for unit in entities]
return {
'type': 'train',
'entities': entity_ids,
'template': unit_type,
'count': count,
"type": "train",
"entities": entity_ids,
"template": unit_type,
"count": count,
}
def chat(message):
return {
'type': 'aichat',
'message': message
}
return {"type": "aichat", "message": message}
def reveal_map():
return {
'type': 'reveal-map',
'enable': True
}
return {"type": "reveal-map", "enable": True}
def walk(units, x, z, queued=False):
ids = [ unit.id() for unit in units ]
return {
'type': 'walk',
'entities': ids,
'x': x,
'z': z,
'queued': queued
}
ids = [unit.id() for unit in units]
return {"type": "walk", "entities": ids, "x": x, "z": z, "queued": queued}
def attack(units, target, queued=False, allow_capture=True):
unit_ids = [ unit.id() for unit in units ]
unit_ids = [unit.id() for unit in units]
return {
'type': 'attack',
'entities': unit_ids,
'target': target.id(),
'allowCapture': allow_capture,
'queued': queued
"type": "attack",
"entities": unit_ids,
"target": target.id(),
"allowCapture": allow_capture,
"queued": queued,
}

View File

@ -1,33 +1,33 @@
import urllib
from urllib import request
import json
class RLAPI():
class RLAPI:
def __init__(self, url):
self.url = url
def post(self, route, data):
response = request.urlopen(url=f'{self.url}/{route}', data=bytes(data, 'utf8'))
response = request.urlopen(url=f"{self.url}/{route}", data=bytes(data, "utf8"))
return response.read()
def step(self, commands):
post_data = '\n'.join((f'{player};{json.dumps(action)}' for (player, action) in commands))
return self.post('step', post_data)
post_data = "\n".join((f"{player};{json.dumps(action)}" for (player, action) in commands))
return self.post("step", post_data)
def reset(self, scenario_config, player_id, save_replay):
path = 'reset?'
path = "reset?"
if save_replay:
path += 'saveReplay=1&'
path += "saveReplay=1&"
if player_id:
path += f'playerID={player_id}&'
path += f"playerID={player_id}&"
return self.post(path, scenario_config)
def get_templates(self, names):
post_data = '\n'.join(names)
response = self.post('templates', post_data)
return zip(names, response.decode().split('\n'))
post_data = "\n".join(names)
response = self.post("templates", post_data)
return zip(names, response.decode().split("\n"))
def evaluate(self, code):
response = self.post('evaluate', code)
response = self.post("evaluate", code)
return json.loads(response.decode())

View File

@ -1,11 +1,11 @@
from .api import RLAPI
import json
import math
from xml.etree import ElementTree
from itertools import cycle
class ZeroAD():
def __init__(self, uri='http://localhost:6000'):
class ZeroAD:
def __init__(self, uri="http://localhost:6000"):
self.api = RLAPI(uri)
self.current_state = None
self.cache = {}
@ -20,7 +20,7 @@ class ZeroAD():
self.current_state = GameState(json.loads(state_json), self)
return self.current_state
def reset(self, config='', save_replay=False, player_id=1):
def reset(self, config="", save_replay=False, player_id=1):
state_json = self.api.reset(config, player_id, save_replay)
self.current_state = GameState(json.loads(state_json), self)
return self.current_state
@ -33,7 +33,7 @@ class ZeroAD():
def get_templates(self, names):
templates = self.api.get_templates(names)
return [ (name, EntityTemplate(content)) for (name, content) in templates ]
return [(name, EntityTemplate(content)) for (name, content) in templates]
def update_templates(self, types=[]):
all_types = list(set([unit.type() for unit in self.current_state.units()]))
@ -41,54 +41,60 @@ class ZeroAD():
template_pairs = self.get_templates(all_types)
self.cache = {}
for (name, tpl) in template_pairs:
for name, tpl in template_pairs:
self.cache[name] = tpl
return template_pairs
class GameState():
class GameState:
def __init__(self, data, game):
self.data = data
self.game = game
self.mapSize = self.data['mapSize']
self.mapSize = self.data["mapSize"]
def units(self, owner=None, type=None):
filter_fn = lambda e: (owner is None or e['owner'] == owner) and \
(type is None or type in e['template'])
return [ Entity(e, self.game) for e in self.data['entities'].values() if filter_fn(e) ]
def filter_fn(e):
return (owner is None or e["owner"] == owner) and (
type is None or type in e["template"]
)
return [Entity(e, self.game) for e in self.data["entities"].values() if filter_fn(e)]
def unit(self, id):
id = str(id)
return Entity(self.data['entities'][id], self.game) if id in self.data['entities'] else None
return (
Entity(self.data["entities"][id], self.game) if id in self.data["entities"] else None
)
class Entity():
class Entity:
def __init__(self, data, game):
self.data = data
self.game = game
self.template = self.game.cache.get(self.type(), None)
def type(self):
return self.data['template']
return self.data["template"]
def id(self):
return self.data['id']
return self.data["id"]
def owner(self):
return self.data['owner']
return self.data["owner"]
def max_health(self):
template = self.get_template()
return float(template.get('Health/Max'))
return float(template.get("Health/Max"))
def health(self, ratio=False):
if ratio:
return self.data['hitpoints']/self.max_health()
return self.data["hitpoints"] / self.max_health()
return self.data['hitpoints']
return self.data["hitpoints"]
def position(self):
return self.data['position']
return self.data["position"]
def get_template(self):
if self.template is None:
@ -97,9 +103,10 @@ class Entity():
return self.template
class EntityTemplate():
class EntityTemplate:
def __init__(self, xml):
self.data = ElementTree.fromstring(f'<Entity>{xml}</Entity>')
self.data = ElementTree.fromstring(f"<Entity>{xml}</Entity>")
def get(self, path):
node = self.data.find(path)
@ -113,4 +120,4 @@ class EntityTemplate():
return node is not None
def __str__(self):
return ElementTree.tostring(self.data).decode('utf-8')
return ElementTree.tostring(self.data).decode("utf-8")

View File

@ -22,14 +22,12 @@
# THE SOFTWARE.
import argparse
import datetime
import hashlib
import itertools
import json
import os
import subprocess
import sys
import time
import yaml
import xml.etree.ElementTree as ET
@ -40,29 +38,32 @@ def execute(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
except:
sys.stderr.write('Failed to run command: {}\n'.format(' '.join(command)))
sys.stderr.write("Failed to run command: {}\n".format(" ".join(command)))
raise
return process.returncode, out, err
def calculate_hash(path):
assert os.path.isfile(path)
with open(path, 'rb') as handle:
with open(path, "rb") as handle:
return hashlib.sha1(handle.read()).hexdigest()
def compare_spirv(path1, path2):
with open(path1, 'rb') as handle:
with open(path1, "rb") as handle:
spirv1 = handle.read()
with open(path2, 'rb') as handle:
with open(path2, "rb") as handle:
spirv2 = handle.read()
return spirv1 == spirv2
def resolve_if(defines, expression):
for item in expression.strip().split('||'):
for item in expression.strip().split("||"):
item = item.strip()
assert len(item) > 1
name = item
invert = False
if name[0] == '!':
if name[0] == "!":
invert = True
name = item[1:]
assert item[1].isalpha()
@ -70,210 +71,267 @@ def resolve_if(defines, expression):
assert item[0].isalpha()
found_define = False
for define in defines:
if define['name'] == name:
assert define['value'] == 'UNDEFINED' or define['value'] == '0' or define['value'] == '1'
if define["name"] == name:
assert (
define["value"] == "UNDEFINED"
or define["value"] == "0"
or define["value"] == "1"
)
if invert:
if define['value'] != '1':
if define["value"] != "1":
return True
found_define = True
else:
if define['value'] == '1':
if define["value"] == "1":
return True
if invert and not found_define:
return True
return False
def compile_and_reflect(input_mod_path, output_mod_path, dependencies, stage, path, out_path, defines):
def compile_and_reflect(
input_mod_path, output_mod_path, dependencies, stage, path, out_path, defines
):
keep_debug = False
input_path = os.path.normpath(path)
output_path = os.path.normpath(out_path)
command = [
'glslc', '-x', 'glsl', '--target-env=vulkan1.1', '-std=450core',
'-I', os.path.join(input_mod_path, 'shaders', 'glsl'),
"glslc",
"-x",
"glsl",
"--target-env=vulkan1.1",
"-std=450core",
"-I",
os.path.join(input_mod_path, "shaders", "glsl"),
]
for dependency in dependencies:
if dependency != input_mod_path:
command += ['-I', os.path.join(dependency, 'shaders', 'glsl')]
command += ["-I", os.path.join(dependency, "shaders", "glsl")]
command += [
'-fshader-stage=' + stage, '-O', input_path,
"-fshader-stage=" + stage,
"-O",
input_path,
]
use_descriptor_indexing = False
for define in defines:
if define['value'] == 'UNDEFINED':
if define["value"] == "UNDEFINED":
continue
assert ' ' not in define['value']
command.append('-D{}={}'.format(define['name'], define['value']))
if define['name'] == 'USE_DESCRIPTOR_INDEXING':
assert " " not in define["value"]
command.append("-D{}={}".format(define["name"], define["value"]))
if define["name"] == "USE_DESCRIPTOR_INDEXING":
use_descriptor_indexing = True
command.append('-D{}={}'.format('USE_SPIRV', '1'))
command.append('-DSTAGE_{}={}'.format(stage.upper(), '1'))
command += ['-o', output_path]
command.append("-D{}={}".format("USE_SPIRV", "1"))
command.append("-DSTAGE_{}={}".format(stage.upper(), "1"))
command += ["-o", output_path]
# Compile the shader with debug information to see names in reflection.
ret, out, err = execute(command + ['-g'])
ret, out, err = execute(command + ["-g"])
if ret:
sys.stderr.write('Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n'.format(
ret, ' '.join(command), input_path, output_path, err))
preprocessor_output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'preprocessed_file.glsl'))
execute(command[:-2] + ['-g', '-E', '-o', preprocessor_output_path])
sys.stderr.write(
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
ret, " ".join(command), input_path, output_path, err
)
)
preprocessor_output_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "preprocessed_file.glsl")
)
execute(command[:-2] + ["-g", "-E", "-o", preprocessor_output_path])
raise ValueError(err)
ret, out, err = execute(['spirv-reflect', '-y','-v', '1', output_path])
ret, out, err = execute(["spirv-reflect", "-y", "-v", "1", output_path])
if ret:
sys.stderr.write('Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n'.format(
ret, ' '.join(command), input_path, output_path, err))
sys.stderr.write(
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
ret, " ".join(command), input_path, output_path, err
)
)
raise ValueError(err)
# Reflect the result SPIRV.
data = yaml.safe_load(out)
module = data['module']
module = data["module"]
interface_variables = []
if 'all_interface_variables' in data and data['all_interface_variables']:
interface_variables = data['all_interface_variables']
if "all_interface_variables" in data and data["all_interface_variables"]:
interface_variables = data["all_interface_variables"]
push_constants = []
vertex_attributes = []
if 'push_constants' in module and module['push_constants']:
assert len(module['push_constants']) == 1
if "push_constants" in module and module["push_constants"]:
assert len(module["push_constants"]) == 1
def add_push_constants(node, push_constants):
if ('members' in node) and node['members']:
for member in node['members']:
if ("members" in node) and node["members"]:
for member in node["members"]:
add_push_constants(member, push_constants)
else:
assert node['absolute_offset'] + node['size'] <= 128
push_constants.append({
'name': node['name'],
'offset': node['absolute_offset'],
'size': node['size'],
})
assert module['push_constants'][0]['type_description']['type_name'] == 'DrawUniforms'
assert module['push_constants'][0]['size'] <= 128
add_push_constants(module['push_constants'][0], push_constants)
assert node["absolute_offset"] + node["size"] <= 128
push_constants.append(
{
"name": node["name"],
"offset": node["absolute_offset"],
"size": node["size"],
}
)
assert module["push_constants"][0]["type_description"]["type_name"] == "DrawUniforms"
assert module["push_constants"][0]["size"] <= 128
add_push_constants(module["push_constants"][0], push_constants)
descriptor_sets = []
if 'descriptor_sets' in module and module['descriptor_sets']:
if "descriptor_sets" in module and module["descriptor_sets"]:
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1
VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7
for descriptor_set in module['descriptor_sets']:
for descriptor_set in module["descriptor_sets"]:
UNIFORM_SET = 1 if use_descriptor_indexing else 0
STORAGE_SET = 2
bindings = []
if descriptor_set['set'] == UNIFORM_SET:
assert descriptor_set['binding_count'] > 0
for binding in descriptor_set['bindings']:
assert binding['set'] == UNIFORM_SET
block = binding['block']
if descriptor_set["set"] == UNIFORM_SET:
assert descriptor_set["binding_count"] > 0
for binding in descriptor_set["bindings"]:
assert binding["set"] == UNIFORM_SET
block = binding["block"]
members = []
for member in block['members']:
members.append({
'name': member['name'],
'offset': member['absolute_offset'],
'size': member['size'],
})
bindings.append({
'binding': binding['binding'],
'type': 'uniform',
'size': block['size'],
'members': members
})
binding = descriptor_set['bindings'][0]
assert binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
elif descriptor_set['set'] == STORAGE_SET:
assert descriptor_set['binding_count'] > 0
for binding in descriptor_set['bindings']:
is_storage_image = binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
is_storage_buffer = binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
for member in block["members"]:
members.append(
{
"name": member["name"],
"offset": member["absolute_offset"],
"size": member["size"],
}
)
bindings.append(
{
"binding": binding["binding"],
"type": "uniform",
"size": block["size"],
"members": members,
}
)
binding = descriptor_set["bindings"][0]
assert binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
elif descriptor_set["set"] == STORAGE_SET:
assert descriptor_set["binding_count"] > 0
for binding in descriptor_set["bindings"]:
is_storage_image = (
binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
)
is_storage_buffer = (
binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
)
assert is_storage_image or is_storage_buffer
assert binding['descriptor_type'] == descriptor_set['bindings'][0]['descriptor_type']
assert binding['image']['arrayed'] == 0
assert binding['image']['ms'] == 0
bindingType = 'storageImage'
assert (
binding["descriptor_type"]
== descriptor_set["bindings"][0]["descriptor_type"]
)
assert binding["image"]["arrayed"] == 0
assert binding["image"]["ms"] == 0
bindingType = "storageImage"
if is_storage_buffer:
bindingType = 'storageBuffer'
bindings.append({
'binding': binding['binding'],
'type': bindingType,
'name': binding['name'],
})
bindingType = "storageBuffer"
bindings.append(
{
"binding": binding["binding"],
"type": bindingType,
"name": binding["name"],
}
)
else:
if use_descriptor_indexing:
if descriptor_set['set'] == 0:
assert descriptor_set['binding_count'] >= 1
for binding in descriptor_set['bindings']:
assert binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
assert binding['array']['dims'][0] == 16384
if binding['binding'] == 0:
assert binding['name'] == 'textures2D'
elif binding['binding'] == 1:
assert binding['name'] == 'texturesCube'
elif binding['binding'] == 2:
assert binding['name'] == 'texturesShadow'
if descriptor_set["set"] == 0:
assert descriptor_set["binding_count"] >= 1
for binding in descriptor_set["bindings"]:
assert (
binding["descriptor_type"]
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
assert binding["array"]["dims"][0] == 16384
if binding["binding"] == 0:
assert binding["name"] == "textures2D"
elif binding["binding"] == 1:
assert binding["name"] == "texturesCube"
elif binding["binding"] == 2:
assert binding["name"] == "texturesShadow"
else:
assert False
else:
assert descriptor_set['binding_count'] > 0
for binding in descriptor_set['bindings']:
assert binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
assert binding['image']['sampled'] == 1
assert binding['image']['arrayed'] == 0
assert binding['image']['ms'] == 0
sampler_type = 'sampler{}D'.format(binding['image']['dim'] + 1)
if binding['image']['dim'] == 3:
sampler_type = 'samplerCube'
bindings.append({
'binding': binding['binding'],
'type': sampler_type,
'name': binding['name'],
})
descriptor_sets.append({
'set': descriptor_set['set'],
'bindings': bindings,
})
if stage == 'vertex':
assert descriptor_set["binding_count"] > 0
for binding in descriptor_set["bindings"]:
assert (
binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
assert binding["image"]["sampled"] == 1
assert binding["image"]["arrayed"] == 0
assert binding["image"]["ms"] == 0
sampler_type = "sampler{}D".format(binding["image"]["dim"] + 1)
if binding["image"]["dim"] == 3:
sampler_type = "samplerCube"
bindings.append(
{
"binding": binding["binding"],
"type": sampler_type,
"name": binding["name"],
}
)
descriptor_sets.append(
{
"set": descriptor_set["set"],
"bindings": bindings,
}
)
if stage == "vertex":
for variable in interface_variables:
if variable['storage_class'] == 1:
if variable["storage_class"] == 1:
# Input.
vertex_attributes.append({
'name': variable['name'],
'location': variable['location'],
})
vertex_attributes.append(
{
"name": variable["name"],
"location": variable["location"],
}
)
# Compile the final version without debug information.
if not keep_debug:
ret, out, err = execute(command)
if ret:
sys.stderr.write('Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n'.format(
ret, ' '.join(command), input_path, output_path, err))
sys.stderr.write(
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
ret, " ".join(command), input_path, output_path, err
)
)
raise ValueError(err)
return {
'push_constants': push_constants,
'vertex_attributes': vertex_attributes,
'descriptor_sets': descriptor_sets,
"push_constants": push_constants,
"vertex_attributes": vertex_attributes,
"descriptor_sets": descriptor_sets,
}
def output_xml_tree(tree, path):
''' We use a simple custom printer to have the same output for all platforms.'''
with open(path, 'wt') as handle:
"""We use a simple custom printer to have the same output for all platforms."""
with open(path, "wt") as handle:
handle.write('<?xml version="1.0" encoding="utf-8"?>\n')
handle.write('<!-- DO NOT EDIT: GENERATED BY SCRIPT {} -->\n'.format(os.path.basename(__file__)))
handle.write(
"<!-- DO NOT EDIT: GENERATED BY SCRIPT {} -->\n".format(os.path.basename(__file__))
)
def output_xml_node(node, handle, depth):
indent = '\t' * depth
attributes = ''
indent = "\t" * depth
attributes = ""
for attribute_name in sorted(node.attrib.keys()):
attributes += ' {}="{}"'.format(attribute_name, node.attrib[attribute_name])
if len(node) > 0:
handle.write('{}<{}{}>\n'.format(indent, node.tag, attributes))
handle.write("{}<{}{}>\n".format(indent, node.tag, attributes))
for child in node:
output_xml_node(child, handle, depth + 1)
handle.write('{}</{}>\n'.format(indent, node.tag))
handle.write("{}</{}>\n".format(indent, node.tag))
else:
handle.write('{}<{}{}/>\n'.format(indent, node.tag, attributes))
handle.write("{}<{}{}/>\n".format(indent, node.tag, attributes))
output_xml_node(tree.getroot(), handle, 0)
def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
sys.stdout.write('Program "{}"\n'.format(program_name))
if rules and program_name not in rules:
sys.stdout.write(' Skip.\n')
sys.stdout.write(" Skip.\n")
return
sys.stdout.write(' Building.\n')
sys.stdout.write(" Building.\n")
rebuild = False
@ -281,64 +339,76 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
program_defines = []
shaders = []
tree = ET.parse(os.path.join(input_mod_path, 'shaders', 'glsl', program_name + '.xml'))
tree = ET.parse(os.path.join(input_mod_path, "shaders", "glsl", program_name + ".xml"))
root = tree.getroot()
for element in root:
element_tag = element.tag
if element_tag == 'defines':
if element_tag == "defines":
for child in element:
values = []
for value in child:
values.append({
'name': child.attrib['name'],
'value': value.text,
})
values.append(
{
"name": child.attrib["name"],
"value": value.text,
}
)
defines.append(values)
elif element_tag == 'define':
program_defines.append({'name': element.attrib['name'], 'value': element.attrib['value']})
elif element_tag == 'vertex':
elif element_tag == "define":
program_defines.append(
{"name": element.attrib["name"], "value": element.attrib["value"]}
)
elif element_tag == "vertex":
streams = []
for shader_child in element:
assert shader_child.tag == 'stream'
streams.append({
'name': shader_child.attrib['name'],
'attribute': shader_child.attrib['attribute'],
})
if 'if' in shader_child.attrib:
streams[-1]['if'] = shader_child.attrib['if']
shaders.append({
'type': 'vertex',
'file': element.attrib['file'],
'streams': streams,
})
elif element_tag == 'fragment':
shaders.append({
'type': 'fragment',
'file': element.attrib['file'],
})
elif element_tag == 'compute':
shaders.append({
'type': 'compute',
'file': element.attrib['file'],
})
assert shader_child.tag == "stream"
streams.append(
{
"name": shader_child.attrib["name"],
"attribute": shader_child.attrib["attribute"],
}
)
if "if" in shader_child.attrib:
streams[-1]["if"] = shader_child.attrib["if"]
shaders.append(
{
"type": "vertex",
"file": element.attrib["file"],
"streams": streams,
}
)
elif element_tag == "fragment":
shaders.append(
{
"type": "fragment",
"file": element.attrib["file"],
}
)
elif element_tag == "compute":
shaders.append(
{
"type": "compute",
"file": element.attrib["file"],
}
)
else:
raise ValueError('Unsupported element tag: "{}"'.format(element_tag))
stage_extension = {
'vertex': '.vs',
'fragment': '.fs',
'geometry': '.gs',
'compute': '.cs',
"vertex": ".vs",
"fragment": ".fs",
"geometry": ".gs",
"compute": ".cs",
}
output_spirv_mod_path = os.path.join(output_mod_path, 'shaders', 'spirv')
output_spirv_mod_path = os.path.join(output_mod_path, "shaders", "spirv")
if not os.path.isdir(output_spirv_mod_path):
os.mkdir(output_spirv_mod_path)
root = ET.Element('programs')
root = ET.Element("programs")
if 'combinations' in rules[program_name]:
combinations = rules[program_name]['combinations']
if "combinations" in rules[program_name]:
combinations = rules[program_name]["combinations"]
else:
combinations = list(itertools.product(*defines))
@ -346,36 +416,36 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
for index, combination in enumerate(combinations):
assert index < 10000
program_path = 'spirv/' + program_name + ('_%04d' % index) + '.xml'
program_path = "spirv/" + program_name + ("_%04d" % index) + ".xml"
programs_element = ET.SubElement(root, 'program')
programs_element.set('type', 'spirv')
programs_element.set('file', program_path)
programs_element = ET.SubElement(root, "program")
programs_element.set("type", "spirv")
programs_element.set("file", program_path)
defines_element = ET.SubElement(programs_element, 'defines')
defines_element = ET.SubElement(programs_element, "defines")
for define in combination:
if define['value'] == 'UNDEFINED':
if define["value"] == "UNDEFINED":
continue
define_element = ET.SubElement(defines_element, 'define')
define_element.set('name', define['name'])
define_element.set('value', define['value'])
define_element = ET.SubElement(defines_element, "define")
define_element.set("name", define["name"])
define_element.set("value", define["value"])
if not rebuild and os.path.isfile(os.path.join(output_mod_path, 'shaders', program_path)):
if not rebuild and os.path.isfile(os.path.join(output_mod_path, "shaders", program_path)):
continue
program_root = ET.Element('program')
program_root.set('type', 'spirv')
program_root = ET.Element("program")
program_root.set("type", "spirv")
for shader in shaders:
extension = stage_extension[shader['type']]
file_name = program_name + ('_%04d' % index) + extension + '.spv'
extension = stage_extension[shader["type"]]
file_name = program_name + ("_%04d" % index) + extension + ".spv"
output_spirv_path = os.path.join(output_spirv_mod_path, file_name)
input_glsl_path = os.path.join(input_mod_path, 'shaders', shader['file'])
input_glsl_path = os.path.join(input_mod_path, "shaders", shader["file"])
# Some shader programs might use vs and fs shaders from different mods.
if not os.path.isfile(input_glsl_path):
input_glsl_path = None
for dependency in dependencies:
fallback_input_path = os.path.join(dependency, 'shaders', shader['file'])
fallback_input_path = os.path.join(dependency, "shaders", shader["file"])
if os.path.isfile(fallback_input_path):
input_glsl_path = fallback_input_path
break
@ -385,10 +455,11 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
input_mod_path,
output_mod_path,
dependencies,
shader['type'],
shader["type"],
input_glsl_path,
output_spirv_path,
combination + program_defines)
combination + program_defines,
)
spirv_hash = calculate_hash(output_spirv_path)
if spirv_hash not in hashed_cache:
@ -406,77 +477,95 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
else:
hashed_cache[spirv_hash].append(file_name)
shader_element = ET.SubElement(program_root, shader['type'])
shader_element.set('file', 'spirv/' + file_name)
if shader['type'] == 'vertex':
for stream in shader['streams']:
if 'if' in stream and not resolve_if(combination, stream['if']):
shader_element = ET.SubElement(program_root, shader["type"])
shader_element.set("file", "spirv/" + file_name)
if shader["type"] == "vertex":
for stream in shader["streams"]:
if "if" in stream and not resolve_if(combination, stream["if"]):
continue
found_vertex_attribute = False
for vertex_attribute in reflection['vertex_attributes']:
if vertex_attribute['name'] == stream['attribute']:
for vertex_attribute in reflection["vertex_attributes"]:
if vertex_attribute["name"] == stream["attribute"]:
found_vertex_attribute = True
break
if not found_vertex_attribute and stream['attribute'] == 'a_tangent':
if not found_vertex_attribute and stream["attribute"] == "a_tangent":
continue
if not found_vertex_attribute:
sys.stderr.write('Vertex attribute not found: {}\n'.format(stream['attribute']))
sys.stderr.write(
"Vertex attribute not found: {}\n".format(stream["attribute"])
)
assert found_vertex_attribute
stream_element = ET.SubElement(shader_element, 'stream')
stream_element.set('name', stream['name'])
stream_element.set('attribute', stream['attribute'])
for vertex_attribute in reflection['vertex_attributes']:
if vertex_attribute['name'] == stream['attribute']:
stream_element.set('location', vertex_attribute['location'])
stream_element = ET.SubElement(shader_element, "stream")
stream_element.set("name", stream["name"])
stream_element.set("attribute", stream["attribute"])
for vertex_attribute in reflection["vertex_attributes"]:
if vertex_attribute["name"] == stream["attribute"]:
stream_element.set("location", vertex_attribute["location"])
break
for push_constant in reflection['push_constants']:
push_constant_element = ET.SubElement(shader_element, 'push_constant')
push_constant_element.set('name', push_constant['name'])
push_constant_element.set('size', push_constant['size'])
push_constant_element.set('offset', push_constant['offset'])
descriptor_sets_element = ET.SubElement(shader_element, 'descriptor_sets')
for descriptor_set in reflection['descriptor_sets']:
descriptor_set_element = ET.SubElement(descriptor_sets_element, 'descriptor_set')
descriptor_set_element.set('set', descriptor_set['set'])
for binding in descriptor_set['bindings']:
binding_element = ET.SubElement(descriptor_set_element, 'binding')
binding_element.set('type', binding['type'])
binding_element.set('binding', binding['binding'])
if binding['type'] == 'uniform':
binding_element.set('size', binding['size'])
for member in binding['members']:
member_element = ET.SubElement(binding_element, 'member')
member_element.set('name', member['name'])
member_element.set('size', member['size'])
member_element.set('offset', member['offset'])
elif binding['type'].startswith('sampler'):
binding_element.set('name', binding['name'])
elif binding['type'].startswith('storage'):
binding_element.set('name', binding['name'])
for push_constant in reflection["push_constants"]:
push_constant_element = ET.SubElement(shader_element, "push_constant")
push_constant_element.set("name", push_constant["name"])
push_constant_element.set("size", push_constant["size"])
push_constant_element.set("offset", push_constant["offset"])
descriptor_sets_element = ET.SubElement(shader_element, "descriptor_sets")
for descriptor_set in reflection["descriptor_sets"]:
descriptor_set_element = ET.SubElement(descriptor_sets_element, "descriptor_set")
descriptor_set_element.set("set", descriptor_set["set"])
for binding in descriptor_set["bindings"]:
binding_element = ET.SubElement(descriptor_set_element, "binding")
binding_element.set("type", binding["type"])
binding_element.set("binding", binding["binding"])
if binding["type"] == "uniform":
binding_element.set("size", binding["size"])
for member in binding["members"]:
member_element = ET.SubElement(binding_element, "member")
member_element.set("name", member["name"])
member_element.set("size", member["size"])
member_element.set("offset", member["offset"])
elif binding["type"].startswith("sampler"):
binding_element.set("name", binding["name"])
elif binding["type"].startswith("storage"):
binding_element.set("name", binding["name"])
program_tree = ET.ElementTree(program_root)
output_xml_tree(program_tree, os.path.join(output_mod_path, 'shaders', program_path))
output_xml_tree(program_tree, os.path.join(output_mod_path, "shaders", program_path))
tree = ET.ElementTree(root)
output_xml_tree(tree, os.path.join(output_mod_path, 'shaders', 'spirv', program_name + '.xml'))
output_xml_tree(tree, os.path.join(output_mod_path, "shaders", "spirv", program_name + ".xml"))
def run():
parser = argparse.ArgumentParser()
parser.add_argument('input_mod_path', help='a path to a directory with input mod with GLSL shaders like binaries/data/mods/public')
parser.add_argument('rules_path', help='a path to JSON with rules')
parser.add_argument('output_mod_path', help='a path to a directory with mod to store SPIR-V shaders like binaries/data/mods/spirv')
parser.add_argument('-d', '--dependency', action='append', help='a path to a directory with a dependency mod (at least modmod should present as dependency)', required=True)
parser.add_argument('-p', '--program_name', help='a shader program name (in case of presence the only program will be compiled)', default=None)
parser.add_argument(
"input_mod_path",
help="a path to a directory with input mod with GLSL shaders like binaries/data/mods/public",
)
parser.add_argument("rules_path", help="a path to JSON with rules")
parser.add_argument(
"output_mod_path",
help="a path to a directory with mod to store SPIR-V shaders like binaries/data/mods/spirv",
)
parser.add_argument(
"-d",
"--dependency",
action="append",
help="a path to a directory with a dependency mod (at least modmod should present as dependency)",
required=True,
)
parser.add_argument(
"-p",
"--program_name",
help="a shader program name (in case of presence the only program will be compiled)",
default=None,
)
args = parser.parse_args()
if not os.path.isfile(args.rules_path):
sys.stderr.write('Rules "{}" are not found\n'.format(args.rules_path))
return
with open(args.rules_path, 'rt') as handle:
with open(args.rules_path, "rt") as handle:
rules = json.load(handle)
if not os.path.isdir(args.input_mod_path):
@ -487,7 +576,7 @@ def run():
sys.stderr.write('Output mod path "{}" is not a directory\n'.format(args.output_mod_path))
return
mod_shaders_path = os.path.join(args.input_mod_path, 'shaders', 'glsl')
mod_shaders_path = os.path.join(args.input_mod_path, "shaders", "glsl")
if not os.path.isdir(mod_shaders_path):
sys.stderr.write('Directory "{}" was not found\n'.format(mod_shaders_path))
return
@ -497,11 +586,11 @@ def run():
if not args.program_name:
for file_name in os.listdir(mod_shaders_path):
name, ext = os.path.splitext(file_name)
if ext.lower() == '.xml':
if ext.lower() == ".xml":
build(rules, args.input_mod_path, args.output_mod_path, args.dependency, name)
else:
build(rules, args.input_mod_path, args.output_mod_path, args.dependency, args.program_name)
if __name__ == '__main__':
run()
if __name__ == "__main__":
run()

View File

@ -22,15 +22,16 @@
# THE SOFTWARE.
import sys
sys.path
sys.path.append('../entity')
from scriptlib import SimulTemplateEntity
import xml.etree.ElementTree as ET
from pathlib import Path
import os
import glob
sys.path.append("../entity")
from scriptlib import SimulTemplateEntity # noqa: E402
AttackTypes = ["Hack", "Pierce", "Crush", "Poison", "Fire"]
Resources = ["food", "wood", "stone", "metal"]
@ -93,13 +94,14 @@ AddSortingOverlay = True
# This is the path to the /templates/ folder to consider. Change this for mod
# support.
modsFolder = Path(__file__).resolve().parents[3] / 'binaries' / 'data' / 'mods'
basePath = modsFolder / 'public' / 'simulation' / 'templates'
modsFolder = Path(__file__).resolve().parents[3] / "binaries" / "data" / "mods"
basePath = modsFolder / "public" / "simulation" / "templates"
# For performance purposes, cache opened templates files.
globalTemplatesList = {}
sim_entity = SimulTemplateEntity(modsFolder, None)
def htbout(file, balise, value):
file.write("<" + balise + ">" + value + "</" + balise + ">\n")
@ -113,7 +115,9 @@ def fastParse(template_name):
if template_name in globalTemplatesList:
return globalTemplatesList[template_name]
parent_string = ET.parse(template_name).getroot().get("parent")
globalTemplatesList[template_name] = sim_entity.load_inherited('simulation/templates/', str(template_name), ['public'])
globalTemplatesList[template_name] = sim_entity.load_inherited(
"simulation/templates/", str(template_name), ["public"]
)
globalTemplatesList[template_name].set("parent", parent_string)
return globalTemplatesList[template_name]
@ -126,7 +130,9 @@ def getParents(template_name):
parents = set()
for parent in parents_string.split("|"):
parents.add(parent)
for element in getParents(sim_entity.get_file('simulation/templates/', parent + ".xml", 'public')):
for element in getParents(
sim_entity.get_file("simulation/templates/", parent + ".xml", "public")
):
parents.add(element)
return parents
@ -135,13 +141,14 @@ def getParents(template_name):
def ExtractValue(value):
return float(value.text) if value is not None else 0.0
# This function checks that a template has the given parent.
def hasParentTemplate(template_name, parentName):
return any(parentName == parent + '.xml' for parent in getParents(template_name))
return any(parentName == parent + ".xml" for parent in getParents(template_name))
def CalcUnit(UnitName, existingUnit=None):
if existingUnit != None:
if existingUnit is not None:
unit = existingUnit
else:
unit = {
@ -188,23 +195,23 @@ def CalcUnit(UnitName, existingUnit=None):
for type in list(resource_cost):
unit["Cost"][type.tag] = ExtractValue(type)
if Template.find("./Attack/Melee") != None:
if Template.find("./Attack/Melee") is not None:
unit["RepeatRate"]["Melee"] = ExtractValue(Template.find("./Attack/Melee/RepeatTime"))
unit["PrepRate"]["Melee"] = ExtractValue(Template.find("./Attack/Melee/PrepareTime"))
for atttype in AttackTypes:
unit["Attack"]["Melee"][atttype] = ExtractValue( Template.find("./Attack/Melee/Damage/" + atttype))
unit["Attack"]["Melee"][atttype] = ExtractValue(
Template.find("./Attack/Melee/Damage/" + atttype)
)
attack_melee_bonus = Template.find("./Attack/Melee/Bonuses")
if attack_melee_bonus is not None:
for Bonus in attack_melee_bonus:
Against = []
CivAg = []
if Bonus.find("Classes") != None \
and Bonus.find("Classes").text != None:
if Bonus.find("Classes") is not None and Bonus.find("Classes").text is not None:
Against = Bonus.find("Classes").text.split(" ")
if Bonus.find("Civ") != None and Bonus.find("Civ").text != None:
if Bonus.find("Civ") is not None and Bonus.find("Civ").text is not None:
CivAg = Bonus.find("Civ").text.split(" ")
Val = float(Bonus.find("Multiplier").text)
unit["AttackBonuses"][Bonus.tag] = {
@ -223,7 +230,7 @@ def CalcUnit(UnitName, existingUnit=None):
unit["Restricted"].pop(newClasses.index(elem))
unit["Restricted"] += newClasses
elif Template.find("./Attack/Ranged") != None:
elif Template.find("./Attack/Ranged") is not None:
unit["Ranged"] = True
unit["Range"] = ExtractValue(Template.find("./Attack/Ranged/MaxRange"))
unit["Spread"] = ExtractValue(Template.find("./Attack/Ranged/Projectile/Spread"))
@ -231,16 +238,17 @@ def CalcUnit(UnitName, existingUnit=None):
unit["PrepRate"]["Ranged"] = ExtractValue(Template.find("./Attack/Ranged/PrepareTime"))
for atttype in AttackTypes:
unit["Attack"]["Ranged"][atttype] = ExtractValue(Template.find("./Attack/Ranged/Damage/" + atttype) )
unit["Attack"]["Ranged"][atttype] = ExtractValue(
Template.find("./Attack/Ranged/Damage/" + atttype)
)
if Template.find("./Attack/Ranged/Bonuses") != None:
if Template.find("./Attack/Ranged/Bonuses") is not None:
for Bonus in Template.find("./Attack/Ranged/Bonuses"):
Against = []
CivAg = []
if Bonus.find("Classes") != None \
and Bonus.find("Classes").text != None:
if Bonus.find("Classes") is not None and Bonus.find("Classes").text is not None:
Against = Bonus.find("Classes").text.split(" ")
if Bonus.find("Civ") != None and Bonus.find("Civ").text != None:
if Bonus.find("Civ") is not None and Bonus.find("Civ").text is not None:
CivAg = Bonus.find("Civ").text.split(" ")
Val = float(Bonus.find("Multiplier").text)
unit["AttackBonuses"][Bonus.tag] = {
@ -248,9 +256,8 @@ def CalcUnit(UnitName, existingUnit=None):
"Civs": CivAg,
"Multiplier": Val,
}
if Template.find("./Attack/Melee/RestrictedClasses") != None:
newClasses = Template.find("./Attack/Melee/RestrictedClasses")\
.text.split(" ")
if Template.find("./Attack/Melee/RestrictedClasses") is not None:
newClasses = Template.find("./Attack/Melee/RestrictedClasses").text.split(" ")
for elem in newClasses:
if elem.find("-") != -1:
newClasses.pop(newClasses.index(elem))
@ -258,19 +265,17 @@ def CalcUnit(UnitName, existingUnit=None):
unit["Restricted"].pop(newClasses.index(elem))
unit["Restricted"] += newClasses
if Template.find("Resistance") != None:
if Template.find("Resistance") is not None:
for atttype in AttackTypes:
unit["Resistance"][atttype] = ExtractValue(Template.find(
"./Resistance/Entity/Damage/" + atttype
))
unit["Resistance"][atttype] = ExtractValue(
Template.find("./Resistance/Entity/Damage/" + atttype)
)
if Template.find("./UnitMotion") != None:
if Template.find("./UnitMotion/WalkSpeed") != None:
if Template.find("./UnitMotion") is not None:
if Template.find("./UnitMotion/WalkSpeed") is not None:
unit["WalkSpeed"] = ExtractValue(Template.find("./UnitMotion/WalkSpeed"))
if Template.find("./Identity/VisibleClasses") != None:
if Template.find("./Identity/VisibleClasses") is not None:
newClasses = Template.find("./Identity/VisibleClasses").text.split(" ")
for elem in newClasses:
if elem.find("-") != -1:
@ -279,7 +284,7 @@ def CalcUnit(UnitName, existingUnit=None):
unit["Classes"].pop(newClasses.index(elem))
unit["Classes"] += newClasses
if Template.find("./Identity/Classes") != None:
if Template.find("./Identity/Classes") is not None:
newClasses = Template.find("./Identity/Classes").text.split(" ")
for elem in newClasses:
if elem.find("-") != -1:
@ -308,28 +313,23 @@ def WriteUnit(Name, UnitDict):
+ "%</td>"
)
attType = "Ranged" if UnitDict["Ranged"] == True else "Melee"
attType = "Ranged" if UnitDict["Ranged"] is True else "Melee"
if UnitDict["RepeatRate"][attType] != "0":
for atype in AttackTypes:
repeatTime = float(UnitDict["RepeatRate"][attType]) / 1000.0
ret += (
"<td>"
+ str("%.1f" % (
float(UnitDict["Attack"][attType][atype]) / repeatTime
)) + "</td>"
+ str("%.1f" % (float(UnitDict["Attack"][attType][atype]) / repeatTime))
+ "</td>"
)
ret += (
"<td>"
+ str("%.1f" % (float(UnitDict["RepeatRate"][attType]) / 1000.0))
+ "</td>"
)
ret += "<td>" + str("%.1f" % (float(UnitDict["RepeatRate"][attType]) / 1000.0)) + "</td>"
else:
for atype in AttackTypes:
ret += "<td> - </td>"
ret += "<td> - </td>"
if UnitDict["Ranged"] == True and UnitDict["Range"] > 0:
if UnitDict["Ranged"] is True and UnitDict["Range"] > 0:
ret += "<td>" + str("%.1f" % float(UnitDict["Range"])) + "</td>"
spread = float(UnitDict["Spread"])
ret += "<td>" + str("%.1f" % spread) + "</td>"
@ -337,11 +337,9 @@ def WriteUnit(Name, UnitDict):
ret += "<td> - </td><td> - </td>"
for rtype in Resources:
ret += "<td>" + str("%.0f" %
float(UnitDict["Cost"][rtype])) + "</td>"
ret += "<td>" + str("%.0f" % float(UnitDict["Cost"][rtype])) + "</td>"
ret += "<td>" + str("%.0f" %
float(UnitDict["Cost"]["population"])) + "</td>"
ret += "<td>" + str("%.0f" % float(UnitDict["Cost"]["population"])) + "</td>"
ret += '<td style="text-align:left;">'
for Bonus in UnitDict["AttackBonuses"]:
@ -362,11 +360,11 @@ def SortFn(A):
sortVal += 1
if classe in A[1]["Classes"]:
break
if ComparativeSortByChamp == True and A[0].find("champion") == -1:
if ComparativeSortByChamp is True and A[0].find("champion") == -1:
sortVal -= 20
if ComparativeSortByCav == True and A[0].find("cavalry") == -1:
if ComparativeSortByCav is True and A[0].find("cavalry") == -1:
sortVal -= 10
if A[1]["Civ"] != None and A[1]["Civ"] in Civs:
if A[1]["Civ"] is not None and A[1]["Civ"] in Civs:
sortVal += 100 * Civs.index(A[1]["Civ"])
return sortVal
@ -403,9 +401,7 @@ def WriteColouredDiff(file, diff, isChanged):
file.write(
"""<td><span style="color:rgb({});">{}</span></td>
""".format(
rgb_str, cleverParse(diff)
)
""".format(rgb_str, cleverParse(diff))
)
return isChanged
@ -413,10 +409,14 @@ def WriteColouredDiff(file, diff, isChanged):
def computeUnitEfficiencyDiff(TemplatesByParent, Civs):
efficiency_table = {}
for parent in TemplatesByParent:
for template in [template for template in TemplatesByParent[parent] if template[1]["Civ"] not in Civs]:
for template in [
template for template in TemplatesByParent[parent] if template[1]["Civ"] not in Civs
]:
print(template)
TemplatesByParent[parent] = [template for template in TemplatesByParent[parent] if template[1]["Civ"] in Civs]
TemplatesByParent[parent] = [
template for template in TemplatesByParent[parent] if template[1]["Civ"] in Civs
]
TemplatesByParent[parent].sort(key=lambda x: Civs.index(x[1]["Civ"]))
for tp in TemplatesByParent[parent]:
@ -426,15 +426,11 @@ def computeUnitEfficiencyDiff(TemplatesByParent, Civs):
efficiency_table[(parent, tp[0], "HP")] = diff
# Build Time
diff = +1j + (int(tp[1]["BuildTime"]) -
int(templates[parent]["BuildTime"]))
diff = +1j + (int(tp[1]["BuildTime"]) - int(templates[parent]["BuildTime"]))
efficiency_table[(parent, tp[0], "BuildTime")] = diff
# walk speed
diff = -1j + (
float(tp[1]["WalkSpeed"]) -
float(templates[parent]["WalkSpeed"])
)
diff = -1j + (float(tp[1]["WalkSpeed"]) - float(templates[parent]["WalkSpeed"]))
efficiency_table[(parent, tp[0], "WalkSpeed")] = diff
# Resistance
@ -446,54 +442,42 @@ def computeUnitEfficiencyDiff(TemplatesByParent, Civs):
efficiency_table[(parent, tp[0], "Resistance/" + atype)] = diff
# Attack types (DPS) and rate.
attType = "Ranged" if tp[1]["Ranged"] == True else "Melee"
attType = "Ranged" if tp[1]["Ranged"] is True else "Melee"
if tp[1]["RepeatRate"][attType] != "0":
for atype in AttackTypes:
myDPS = float(tp[1]["Attack"][attType][atype]) / (
float(tp[1]["RepeatRate"][attType]) / 1000.0
)
parentDPS = float(
templates[parent]["Attack"][attType][atype]) / (
parentDPS = float(templates[parent]["Attack"][attType][atype]) / (
float(templates[parent]["RepeatRate"][attType]) / 1000.0
)
diff = -1j + (myDPS - parentDPS)
efficiency_table[
(parent, tp[0], "Attack/" + attType + "/" + atype)
] = diff
efficiency_table[(parent, tp[0], "Attack/" + attType + "/" + atype)] = diff
diff = -1j + (
float(tp[1]["RepeatRate"][attType]) / 1000.0
- float(templates[parent]["RepeatRate"][attType]) / 1000.0
)
efficiency_table[
(parent, tp[0], "Attack/" + attType + "/" + atype +
"/RepeatRate")
(parent, tp[0], "Attack/" + attType + "/" + atype + "/RepeatRate")
] = diff
# range and spread
if tp[1]["Ranged"] == True:
diff = -1j + (
float(tp[1]["Range"]) -
float(templates[parent]["Range"])
)
efficiency_table[
(parent, tp[0], "Attack/" + attType + "/Ranged/Range")
] = diff
if tp[1]["Ranged"] is True:
diff = -1j + (float(tp[1]["Range"]) - float(templates[parent]["Range"]))
efficiency_table[(parent, tp[0], "Attack/" + attType + "/Ranged/Range")] = diff
diff = (float(tp[1]["Spread"]) -
float(templates[parent]["Spread"]))
efficiency_table[
(parent, tp[0], "Attack/" + attType + "/Ranged/Spread")
] = diff
diff = float(tp[1]["Spread"]) - float(templates[parent]["Spread"])
efficiency_table[(parent, tp[0], "Attack/" + attType + "/Ranged/Spread")] = (
diff
)
for rtype in Resources:
diff = +1j + (
float(tp[1]["Cost"][rtype])
- float(templates[parent]["Cost"][rtype])
float(tp[1]["Cost"][rtype]) - float(templates[parent]["Cost"][rtype])
)
efficiency_table[(parent, tp[0], "Resources/" + rtype)] = diff
diff = +1j + (
float(tp[1]["Cost"]["population"])
- float(templates[parent]["Cost"]["population"])
float(tp[1]["Cost"]["population"]) - float(templates[parent]["Cost"]["population"])
)
efficiency_table[(parent, tp[0], "Population")] = diff
@ -512,7 +496,7 @@ def computeTemplates(LoadTemplatesIfParent):
if hasParentTemplate(template, possParent):
found = True
break
if found == True:
if found is True:
templates[template] = CalcUnit(template)
os.chdir(pwd)
return templates
@ -541,7 +525,6 @@ def computeCivTemplates(template: dict, Civs: list):
civ_list = list(glob.glob("units/" + Civ + "/*.xml"))
for template in civ_list:
if os.path.isfile(template):
# filter based on FilterOut
breakIt = False
for filter in FilterOut:
@ -601,17 +584,14 @@ CivTemplates = computeCivTemplates(templates, Civs)
TemplatesByParent = computeTemplatesByParent(templates, Civs, CivTemplates)
# Not used; use it for your own custom analysis
efficiencyTable = computeUnitEfficiencyDiff(
TemplatesByParent, Civs
)
efficiencyTable = computeUnitEfficiencyDiff(TemplatesByParent, Civs)
############################################################
def writeHTML():
"""Create the HTML file"""
f = open(
os.path.realpath(__file__).replace("unitTables.py", "")
+ "unit_summary_table.html",
os.path.realpath(__file__).replace("unitTables.py", "") + "unit_summary_table.html",
"w",
)
@ -699,10 +679,7 @@ differences between the two.
TemplatesByParent[parent].sort(key=lambda x: Civs.index(x[1]["Civ"]))
for tp in TemplatesByParent[parent]:
isChanged = False
ff = open(
os.path.realpath(__file__).replace("unitTables.py", "") +
".cache", "w"
)
ff = open(os.path.realpath(__file__).replace("unitTables.py", "") + ".cache", "w")
ff.write("<tr>")
ff.write(
@ -711,9 +688,7 @@ differences between the two.
+ "</th>"
)
ff.write(
'<td class="Sub">'
+ tp[0].replace(".xml", "").replace("units/", "")
+ "</td>"
'<td class="Sub">' + tp[0].replace(".xml", "").replace("units/", "") + "</td>"
)
# HP
@ -721,15 +696,11 @@ differences between the two.
isChanged = WriteColouredDiff(ff, diff, isChanged)
# Build Time
diff = +1j + (int(tp[1]["BuildTime"]) -
int(templates[parent]["BuildTime"]))
diff = +1j + (int(tp[1]["BuildTime"]) - int(templates[parent]["BuildTime"]))
isChanged = WriteColouredDiff(ff, diff, isChanged)
# walk speed
diff = -1j + (
float(tp[1]["WalkSpeed"]) -
float(templates[parent]["WalkSpeed"])
)
diff = -1j + (float(tp[1]["WalkSpeed"]) - float(templates[parent]["WalkSpeed"]))
isChanged = WriteColouredDiff(ff, diff, isChanged)
# Resistance
@ -741,19 +712,16 @@ differences between the two.
isChanged = WriteColouredDiff(ff, diff, isChanged)
# Attack types (DPS) and rate.
attType = "Ranged" if tp[1]["Ranged"] == True else "Melee"
attType = "Ranged" if tp[1]["Ranged"] is True else "Melee"
if tp[1]["RepeatRate"][attType] != "0":
for atype in AttackTypes:
myDPS = float(tp[1]["Attack"][attType][atype]) / (
float(tp[1]["RepeatRate"][attType]) / 1000.0
)
parentDPS = float(
templates[parent]["Attack"][attType][atype]) / (
parentDPS = float(templates[parent]["Attack"][attType][atype]) / (
float(templates[parent]["RepeatRate"][attType]) / 1000.0
)
isChanged = WriteColouredDiff(
ff, -1j + (myDPS - parentDPS), isChanged
)
isChanged = WriteColouredDiff(ff, -1j + (myDPS - parentDPS), isChanged)
isChanged = WriteColouredDiff(
ff,
-1j
@ -764,32 +732,26 @@ differences between the two.
isChanged,
)
# range and spread
if tp[1]["Ranged"] == True:
if tp[1]["Ranged"] is True:
isChanged = WriteColouredDiff(
ff,
-1j
+ (float(tp[1]["Range"]) -
float(templates[parent]["Range"])),
-1j + (float(tp[1]["Range"]) - float(templates[parent]["Range"])),
isChanged,
)
mySpread = float(tp[1]["Spread"])
parentSpread = float(templates[parent]["Spread"])
isChanged = WriteColouredDiff(
ff, +1j + (mySpread - parentSpread), isChanged
)
isChanged = WriteColouredDiff(ff, +1j + (mySpread - parentSpread), isChanged)
else:
ff.write("<td><span style='color:rgb(200,200,200);'>-</span></td><td><span style='color:rgb(200,200,200);'>-</span></td>")
ff.write(
"<td><span style='color:rgb(200,200,200);'>-</span></td><td><span style='color:rgb(200,200,200);'>-</span></td>"
)
else:
ff.write("<td></td><td></td><td></td><td></td><td></td><td></td>")
for rtype in Resources:
isChanged = WriteColouredDiff(
ff,
+1j
+ (
float(tp[1]["Cost"][rtype])
- float(templates[parent]["Cost"][rtype])
),
+1j + (float(tp[1]["Cost"][rtype]) - float(templates[parent]["Cost"][rtype])),
isChanged,
)
@ -808,8 +770,7 @@ differences between the two.
ff.close() # to actually write into the file
with open(
os.path.realpath(__file__).replace("unitTables.py", "") +
".cache", "r"
os.path.realpath(__file__).replace("unitTables.py", "") + ".cache", "r"
) as ff:
unitStr = ff.read()

View File

@ -1,13 +1,14 @@
#!/usr/bin/env python3
from argparse import ArgumentParser
from pathlib import Path
from os.path import sep, join, realpath, exists, basename, dirname
from json import load, loads
from re import split, match
from os.path import join, realpath, exists, dirname
from json import load
from re import match
from logging import getLogger, StreamHandler, INFO, WARNING, Filter, Formatter
import lxml.etree
import sys
class SingleLevelFilter(Filter):
def __init__(self, passlevel, reject):
self.passlevel = passlevel
@ -15,15 +16,17 @@ class SingleLevelFilter(Filter):
def filter(self, record):
if self.reject:
return (record.levelno != self.passlevel)
return record.levelno != self.passlevel
else:
return (record.levelno == self.passlevel)
return record.levelno == self.passlevel
class VFS_File:
def __init__(self, mod_name, vfs_path):
self.mod_name = mod_name
self.vfs_path = vfs_path
class RelaxNGValidator:
def __init__(self, vfs_root, mods=None, verbose=False):
self.mods = mods if mods is not None else []
@ -38,18 +41,18 @@ class RelaxNGValidator:
# create a console handler, seems nicer to Windows and for future uses
ch = StreamHandler(sys.stdout)
ch.setLevel(INFO)
ch.setFormatter(Formatter('%(levelname)s - %(message)s'))
ch.setFormatter(Formatter("%(levelname)s - %(message)s"))
f1 = SingleLevelFilter(INFO, False)
ch.addFilter(f1)
logger.addHandler(ch)
errorch = StreamHandler(sys.stderr)
errorch.setLevel(WARNING)
errorch.setFormatter(Formatter('%(levelname)s - %(message)s'))
errorch.setFormatter(Formatter("%(levelname)s - %(message)s"))
logger.addHandler(errorch)
self.logger = logger
self.inError = False
def run (self):
def run(self):
self.validate_actors()
self.validate_variants()
self.validate_guis()
@ -63,7 +66,7 @@ class RelaxNGValidator:
return self.inError
def main(self):
""" Program entry point, parses command line arguments and launches the validation """
"""Program entry point, parses command line arguments and launches the validation"""
# ordered uniq mods (dict maintains ordered keys from python 3.6)
self.logger.info(f"Checking {'|'.join(self.mods)}'s integrity.")
self.logger.info(f"The following mods will be loaded: {'|'.join(self.mods)}.")
@ -75,88 +78,115 @@ class RelaxNGValidator:
- Path relative to the mod base
- full Path
"""
full_exts = ['.' + ext for ext in ext_list]
full_exts = ["." + ext for ext in ext_list]
def find_recursive(dp, base):
"""(relative Path, full Path) generator"""
if dp.is_dir():
if dp.name != '.svn' and dp.name != '.git' and not dp.name.endswith('~'):
if dp.name != ".svn" and dp.name != ".git" and not dp.name.endswith("~"):
for fp in dp.iterdir():
yield from find_recursive(fp, base)
elif dp.suffix in full_exts:
relative_file_path = dp.relative_to(base)
yield (relative_file_path, dp.resolve())
return [(rp, fp) for mod in mods for (rp, fp) in find_recursive(vfs_root / mod / vfs_path, vfs_root / mod)]
return [
(rp, fp)
for mod in mods
for (rp, fp) in find_recursive(vfs_root / mod / vfs_path, vfs_root / mod)
]
def validate_actors(self):
self.logger.info('Validating actors...')
files = self.find_files(self.vfs_root, self.mods, 'art/actors/', 'xml')
self.validate_files('actors', files, 'art/actors/actor.rng')
self.logger.info("Validating actors...")
files = self.find_files(self.vfs_root, self.mods, "art/actors/", "xml")
self.validate_files("actors", files, "art/actors/actor.rng")
def validate_variants(self):
self.logger.info("Validating variants...")
files = self.find_files(self.vfs_root, self.mods, 'art/variants/', 'xml')
self.validate_files('variant', files, 'art/variants/variant.rng')
files = self.find_files(self.vfs_root, self.mods, "art/variants/", "xml")
self.validate_files("variant", files, "art/variants/variant.rng")
def validate_guis(self):
self.logger.info("Validating gui files...")
pages = [file for file in self.find_files(self.vfs_root, self.mods, 'gui/', 'xml') if match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(file[0]))]
self.validate_files('gui page', pages, 'gui/gui_page.rng')
xmls = [file for file in self.find_files(self.vfs_root, self.mods, 'gui/', 'xml') if not match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(file[0]))]
self.validate_files('gui xml', xmls, 'gui/gui.rng')
pages = [
file
for file in self.find_files(self.vfs_root, self.mods, "gui/", "xml")
if match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(file[0]))
]
self.validate_files("gui page", pages, "gui/gui_page.rng")
xmls = [
file
for file in self.find_files(self.vfs_root, self.mods, "gui/", "xml")
if not match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(file[0]))
]
self.validate_files("gui xml", xmls, "gui/gui.rng")
def validate_maps(self):
self.logger.info("Validating maps...")
files = self.find_files(self.vfs_root, self.mods, 'maps/scenarios/', 'xml')
self.validate_files('map', files, 'maps/scenario.rng')
files = self.find_files(self.vfs_root, self.mods, 'maps/skirmishes/', 'xml')
self.validate_files('map', files, 'maps/scenario.rng')
files = self.find_files(self.vfs_root, self.mods, "maps/scenarios/", "xml")
self.validate_files("map", files, "maps/scenario.rng")
files = self.find_files(self.vfs_root, self.mods, "maps/skirmishes/", "xml")
self.validate_files("map", files, "maps/scenario.rng")
def validate_materials(self):
self.logger.info("Validating materials...")
files = self.find_files(self.vfs_root, self.mods, 'art/materials/', 'xml')
self.validate_files('material', files, 'art/materials/material.rng')
files = self.find_files(self.vfs_root, self.mods, "art/materials/", "xml")
self.validate_files("material", files, "art/materials/material.rng")
def validate_particles(self):
self.logger.info("Validating particles...")
files = self.find_files(self.vfs_root, self.mods, 'art/particles/', 'xml')
self.validate_files('particle', files, 'art/particles/particle.rng')
files = self.find_files(self.vfs_root, self.mods, "art/particles/", "xml")
self.validate_files("particle", files, "art/particles/particle.rng")
def validate_simulation(self):
self.logger.info("Validating simulation...")
file = self.find_files(self.vfs_root, self.mods, 'simulation/data/pathfinder', 'xml')
self.validate_files('pathfinder', file, 'simulation/data/pathfinder.rng')
file = self.find_files(self.vfs_root, self.mods, 'simulation/data/territorymanager', 'xml')
self.validate_files('territory manager', file, 'simulation/data/territorymanager.rng')
file = self.find_files(self.vfs_root, self.mods, "simulation/data/pathfinder", "xml")
self.validate_files("pathfinder", file, "simulation/data/pathfinder.rng")
file = self.find_files(self.vfs_root, self.mods, "simulation/data/territorymanager", "xml")
self.validate_files("territory manager", file, "simulation/data/territorymanager.rng")
def validate_soundgroups(self):
self.logger.info("Validating soundgroups...")
files = self.find_files(self.vfs_root, self.mods, 'audio/', 'xml')
self.validate_files('sound group', files, 'audio/sound_group.rng')
files = self.find_files(self.vfs_root, self.mods, "audio/", "xml")
self.validate_files("sound group", files, "audio/sound_group.rng")
def validate_terrains(self):
self.logger.info("Validating terrains...")
terrains = [file for file in self.find_files(self.vfs_root, self.mods, 'art/terrains/', 'xml') if 'terrains.xml' in str(file[0])]
self.validate_files('terrain', terrains, 'art/terrains/terrain.rng')
terrains_textures = [file for file in self.find_files(self.vfs_root, self.mods, 'art/terrains/', 'xml') if 'terrains.xml' not in str(file[0])]
self.validate_files('terrain texture', terrains_textures, 'art/terrains/terrain_texture.rng')
terrains = [
file
for file in self.find_files(self.vfs_root, self.mods, "art/terrains/", "xml")
if "terrains.xml" in str(file[0])
]
self.validate_files("terrain", terrains, "art/terrains/terrain.rng")
terrains_textures = [
file
for file in self.find_files(self.vfs_root, self.mods, "art/terrains/", "xml")
if "terrains.xml" not in str(file[0])
]
self.validate_files(
"terrain texture", terrains_textures, "art/terrains/terrain_texture.rng"
)
def validate_textures(self):
self.logger.info("Validating textures...")
files = [file for file in self.find_files(self.vfs_root, self.mods, 'art/textures/', 'xml') if 'textures.xml' in str(file[0])]
self.validate_files('texture', files, 'art/textures/texture.rng')
files = [
file
for file in self.find_files(self.vfs_root, self.mods, "art/textures/", "xml")
if "textures.xml" in str(file[0])
]
self.validate_files("texture", files, "art/textures/texture.rng")
def get_physical_path(self, mod_name, vfs_path):
return realpath(join(self.vfs_root, mod_name, vfs_path))
def get_relaxng_file(self, schemapath):
"""We look for the highest priority mod relax NG file"""
for mod in self.mods:
relax_ng_path = self.get_physical_path(mod, schemapath)
if exists(relax_ng_path):
return relax_ng_path
"""We look for the highest priority mod relax NG file"""
for mod in self.mods:
relax_ng_path = self.get_physical_path(mod, schemapath)
if exists(relax_ng_path):
return relax_ng_path
return ""
return ""
def validate_files(self, name, files, schemapath):
relax_ng_path = self.get_relaxng_file(schemapath)
@ -185,27 +215,40 @@ class RelaxNGValidator:
def get_mod_dependencies(vfs_root, *mods):
modjsondeps = []
for mod in mods:
mod_json_path = Path(vfs_root) / mod / 'mod.json'
mod_json_path = Path(vfs_root) / mod / "mod.json"
if not exists(mod_json_path):
continue
with open(mod_json_path, encoding='utf-8') as f:
with open(mod_json_path, encoding="utf-8") as f:
modjson = load(f)
# 0ad's folder isn't named like the mod.
modjsondeps.extend(['public' if '0ad' in dep else dep for dep in modjson.get('dependencies', [])])
modjsondeps.extend(
["public" if "0ad" in dep else dep for dep in modjson.get("dependencies", [])]
)
return modjsondeps
if __name__ == '__main__':
if __name__ == "__main__":
script_dir = dirname(realpath(__file__))
default_root = join(script_dir, '..', '..', '..', 'binaries', 'data', 'mods')
default_root = join(script_dir, "..", "..", "..", "binaries", "data", "mods")
ap = ArgumentParser(description="Validates XML files againt their Relax NG schemas")
ap.add_argument('-r', '--root', action='store', dest='root', default=default_root)
ap.add_argument('-v', '--verbose', action='store_true', default=True,
help="Log validation errors.")
ap.add_argument('-m', '--mods', metavar="MOD", dest='mods', nargs='+', default=['public'],
help="specify which mods to check. Default to public and mod.")
ap.add_argument("-r", "--root", action="store", dest="root", default=default_root)
ap.add_argument(
"-v", "--verbose", action="store_true", default=True, help="Log validation errors."
)
ap.add_argument(
"-m",
"--mods",
metavar="MOD",
dest="mods",
nargs="+",
default=["public"],
help="specify which mods to check. Default to public and mod.",
)
args = ap.parse_args()
mods = list(dict.fromkeys([*args.mods, *get_mod_dependencies(args.root, *args.mods), 'mod']).keys())
mods = list(
dict.fromkeys([*args.mods, *get_mod_dependencies(args.root, *args.mods), "mod"]).keys()
)
relax_ng_validator = RelaxNGValidator(args.root, mods=mods, verbose=args.verbose)
if not relax_ng_validator.main():
sys.exit(1)

View File

@ -6,6 +6,7 @@ import re
import xml.etree.ElementTree
from logging import getLogger, StreamHandler, INFO, WARNING, Formatter, Filter
class SingleLevelFilter(Filter):
def __init__(self, passlevel, reject):
self.passlevel = passlevel
@ -13,9 +14,10 @@ class SingleLevelFilter(Filter):
def filter(self, record):
if self.reject:
return (record.levelno != self.passlevel)
return record.levelno != self.passlevel
else:
return (record.levelno == self.passlevel)
return record.levelno == self.passlevel
class Actor:
def __init__(self, mod_name, vfs_path):
@ -23,7 +25,7 @@ class Actor:
self.vfs_path = vfs_path
self.name = os.path.basename(vfs_path)
self.textures = []
self.material = ''
self.material = ""
self.logger = getLogger(__name__)
def read(self, physical_path):
@ -34,17 +36,17 @@ class Actor:
return False
root = tree.getroot()
# Special case: particles don't need a diffuse texture.
if len(root.findall('.//particles')) > 0:
if len(root.findall(".//particles")) > 0:
self.textures.append("baseTex")
for element in root.findall('.//material'):
for element in root.findall(".//material"):
self.material = element.text
for element in root.findall('.//texture'):
self.textures.append(element.get('name'))
for element in root.findall('.//variant'):
file = element.get('file')
for element in root.findall(".//texture"):
self.textures.append(element.get("name"))
for element in root.findall(".//variant"):
file = element.get("file")
if file:
self.read_variant(physical_path, os.path.join('art', 'variants', file))
self.read_variant(physical_path, os.path.join("art", "variants", file))
return True
def read_variant(self, actor_physical_path, relative_path):
@ -56,12 +58,12 @@ class Actor:
return False
root = tree.getroot()
file = root.get('file')
file = root.get("file")
if file:
self.read_variant(actor_physical_path, os.path.join('art', 'variants', file))
self.read_variant(actor_physical_path, os.path.join("art", "variants", file))
for element in root.findall('.//texture'):
self.textures.append(element.get('name'))
for element in root.findall(".//texture"):
self.textures.append(element.get("name"))
class Material:
@ -77,8 +79,8 @@ class Material:
except xml.etree.ElementTree.ParseError as err:
self.logger.error('"%s": %s' % (physical_path, err.msg))
return False
for element in root.findall('.//required_texture'):
texture_name = element.get('name')
for element in root.findall(".//required_texture"):
texture_name = element.get("name")
self.required_textures.append(texture_name)
return True
@ -86,7 +88,7 @@ class Material:
class Validator:
def __init__(self, vfs_root, mods=None):
if mods is None:
mods = ['mod', 'public']
mods = ["mod", "public"]
self.vfs_root = vfs_root
self.mods = mods
@ -102,13 +104,13 @@ class Validator:
# create a console handler, seems nicer to Windows and for future uses
ch = StreamHandler(sys.stdout)
ch.setLevel(INFO)
ch.setFormatter(Formatter('%(levelname)s - %(message)s'))
ch.setFormatter(Formatter("%(levelname)s - %(message)s"))
f1 = SingleLevelFilter(INFO, False)
ch.addFilter(f1)
logger.addHandler(ch)
errorch = StreamHandler(sys.stderr)
errorch.setLevel(WARNING)
errorch.setFormatter(Formatter('%(levelname)s - %(message)s'))
errorch.setFormatter(Formatter("%(levelname)s - %(message)s"))
logger.addHandler(errorch)
self.logger = logger
self.inError = False
@ -125,17 +127,14 @@ class Validator:
if not os.path.isdir(physical_path):
return result
for file_name in os.listdir(physical_path):
if file_name == '.git' or file_name == '.svn':
if file_name == ".git" or file_name == ".svn":
continue
vfs_file_path = os.path.join(vfs_path, file_name)
physical_file_path = os.path.join(physical_path, file_name)
if os.path.isdir(physical_file_path):
result += self.find_mod_files(mod_name, vfs_file_path, pattern)
elif os.path.isfile(physical_file_path) and pattern.match(file_name):
result.append({
'mod_name': mod_name,
'vfs_path': vfs_file_path
})
result.append({"mod_name": mod_name, "vfs_path": vfs_file_path})
return result
def find_all_mods_files(self, vfs_path, pattern):
@ -145,72 +144,100 @@ class Validator:
return result
def find_materials(self, vfs_path):
self.logger.info('Collecting materials...')
material_files = self.find_all_mods_files(vfs_path, re.compile(r'.*\.xml'))
self.logger.info("Collecting materials...")
material_files = self.find_all_mods_files(vfs_path, re.compile(r".*\.xml"))
for material_file in material_files:
material_name = os.path.basename(material_file['vfs_path'])
material_name = os.path.basename(material_file["vfs_path"])
if material_name in self.materials:
continue
material = Material(material_file['mod_name'], material_file['vfs_path'])
if material.read(self.get_physical_path(material_file['mod_name'], material_file['vfs_path'])):
material = Material(material_file["mod_name"], material_file["vfs_path"])
if material.read(
self.get_physical_path(material_file["mod_name"], material_file["vfs_path"])
):
self.materials[material_name] = material
else:
self.invalid_materials[material_name] = material
def find_actors(self, vfs_path):
self.logger.info('Collecting actors...')
self.logger.info("Collecting actors...")
actor_files = self.find_all_mods_files(vfs_path, re.compile(r'.*\.xml'))
actor_files = self.find_all_mods_files(vfs_path, re.compile(r".*\.xml"))
for actor_file in actor_files:
actor = Actor(actor_file['mod_name'], actor_file['vfs_path'])
if actor.read(self.get_physical_path(actor_file['mod_name'], actor_file['vfs_path'])):
actor = Actor(actor_file["mod_name"], actor_file["vfs_path"])
if actor.read(self.get_physical_path(actor_file["mod_name"], actor_file["vfs_path"])):
self.actors.append(actor)
def run(self):
self.find_materials(os.path.join('art', 'materials'))
self.find_actors(os.path.join('art', 'actors'))
self.logger.info('Validating textures...')
self.find_materials(os.path.join("art", "materials"))
self.find_actors(os.path.join("art", "actors"))
self.logger.info("Validating textures...")
for actor in self.actors:
if not actor.material:
continue
if actor.material not in self.materials and actor.material not in self.invalid_materials:
self.logger.error('"%s": unknown material "%s"' % (
self.get_mod_path(actor.mod_name, actor.vfs_path),
actor.material
))
if (
actor.material not in self.materials
and actor.material not in self.invalid_materials
):
self.logger.error(
'"%s": unknown material "%s"'
% (self.get_mod_path(actor.mod_name, actor.vfs_path), actor.material)
)
self.inError = True
if actor.material not in self.materials:
continue
material = self.materials[actor.material]
missing_textures = ', '.join(set([required_texture for required_texture in material.required_textures if required_texture not in actor.textures]))
missing_textures = ", ".join(
set(
[
required_texture
for required_texture in material.required_textures
if required_texture not in actor.textures
]
)
)
if len(missing_textures) > 0:
self.logger.error('"%s": actor does not contain required texture(s) "%s" from "%s"' % (
self.get_mod_path(actor.mod_name, actor.vfs_path),
missing_textures,
material.name
))
self.logger.error(
'"%s": actor does not contain required texture(s) "%s" from "%s"'
% (
self.get_mod_path(actor.mod_name, actor.vfs_path),
missing_textures,
material.name,
)
)
self.inError = True
extra_textures = ', '.join(set([extra_texture for extra_texture in actor.textures if extra_texture not in material.required_textures]))
extra_textures = ", ".join(
set(
[
extra_texture
for extra_texture in actor.textures
if extra_texture not in material.required_textures
]
)
)
if len(extra_textures) > 0:
self.logger.warning('"%s": actor contains unnecessary texture(s) "%s" from "%s"' % (
self.get_mod_path(actor.mod_name, actor.vfs_path),
extra_textures,
material.name
))
self.logger.warning(
'"%s": actor contains unnecessary texture(s) "%s" from "%s"'
% (
self.get_mod_path(actor.mod_name, actor.vfs_path),
extra_textures,
material.name,
)
)
self.inError = True
return self.inError
if __name__ == '__main__':
if __name__ == "__main__":
script_dir = os.path.dirname(os.path.realpath(__file__))
default_root = os.path.join(script_dir, '..', '..', '..', 'binaries', 'data', 'mods')
parser = argparse.ArgumentParser(description='Actors/materials validator.')
parser.add_argument('-r', '--root', action='store', dest='root', default=default_root)
parser.add_argument('-m', '--mods', action='store', dest='mods', default='mod,public')
default_root = os.path.join(script_dir, "..", "..", "..", "binaries", "data", "mods")
parser = argparse.ArgumentParser(description="Actors/materials validator.")
parser.add_argument("-r", "--root", action="store", dest="root", default=default_root)
parser.add_argument("-m", "--mods", action="store", dest="mods", default="mod,public")
args = parser.parse_args()
validator = Validator(args.root, args.mods.split(','))
validator = Validator(args.root, args.mods.split(","))
if not validator.run():
sys.exit(1)