1
0
forked from 0ad/0ad

Enable additional ruff rules

In the ruff config file added in #6954 explicitly selecting the ruff
rules to check was missed, resulting in ruff only checking a very small
subset of its available rules. That hasn't been desired, so this is the
first of a series of commits enabling more rules. In this PR all rules
whose violations can be either automatically fixed by ruff or are
trivial to fix manually get enabled. For the follow up PRs it's intended
to focus on one area of rules per PR to gradually improve the Python
code quality.
This commit is contained in:
Dunedan 2024-08-25 06:29:39 +02:00
parent 0ba8ea3429
commit e36c6a31fe
Signed by untrusted user: Dunedan
GPG Key ID: 885B16854284E0B2
39 changed files with 509 additions and 435 deletions

View File

@ -1,5 +1,58 @@
line-length = 99
[format]
line-ending = "lf"
[lint]
select = ["ALL"]
ignore = [
"A",
"ARG",
"ANN",
"B018",
"B023",
"C90",
"COM812",
"D",
"DTZ005",
"EM",
"ERA",
"FA",
"FIX",
"FBT",
"ISC001",
"N",
"PERF203",
"PERF401",
"PLR0912",
"PLR0913",
"PLR0915",
"PLR1704",
"PLR2004",
"PLW2901",
"PT",
"PTH",
"RUF012",
"S101",
"S310",
"S314",
"S324",
"S320",
"S603",
"S607",
"SIM102",
"SIM105",
"SIM113",
"SIM115",
"T20",
"TD",
"TRY002",
"TRY003",
"TRY004",
"UP038",
"W505"
]
[lint.isort]
lines-after-imports = 2

30
source/collada/tests/tests.py Normal file → Executable file
View File

@ -2,9 +2,10 @@
# ruff: noqa: F403, F405
from ctypes import *
import os
import xml.etree.ElementTree as ET
from ctypes import *
binaries = "../../../binaries"
@ -16,20 +17,20 @@ dll_filename = {
# The DLL may need other DLLs which are in its directory, so set the path to that
# (Don't care about clobbering the old PATH - it doesn't have anything important)
os.environ["PATH"] = "%s/system/" % binaries
os.environ["PATH"] = f"{binaries}/system/"
# Load the actual library
library = cdll.LoadLibrary("%s/system/%s" % (binaries, dll_filename))
library = cdll.LoadLibrary(f"{binaries}/system/{dll_filename}")
def log(severity, message):
print("[%s] %s" % (("INFO", "WARNING", "ERROR")[severity], message))
print("[{}] {}".format(("INFO", "WARNING", "ERROR")[severity], message))
clog = CFUNCTYPE(None, c_int, c_char_p)(log)
# (the CFUNCTYPE must not be GC'd, so try to keep a reference)
library.set_logger(clog)
skeleton_definitions = open("%s/data/tests/collada/skeletons.xml" % binaries).read()
skeleton_definitions = open(f"{binaries}/data/tests/collada/skeletons.xml").read()
library.set_skeleton_definitions(skeleton_definitions, len(skeleton_definitions))
@ -115,15 +116,16 @@ clean_dir(test_mod + "/art/meshes")
clean_dir(test_mod + "/art/actors")
clean_dir(test_mod + "/art/animation")
# for test_file in ['cube', 'jav2', 'jav2b', 'teapot_basic', 'teapot_skin', 'plane_skin', 'dude_skin', 'mergenonbone', 'densemesh']:
# for test_file in ['cube', 'jav2', 'jav2b', 'teapot_basic', 'teapot_skin', 'plane_skin',
# 'dude_skin', 'mergenonbone', 'densemesh']:
# for test_file in ['teapot_basic', 'jav2b', 'jav2d']:
for test_file in ["xsitest3c", "xsitest3e", "jav2d", "jav2d2"]:
# for test_file in ['xsitest3']:
# for test_file in []:
print("* Converting PMD %s" % (test_file))
print(f"* Converting PMD {test_file}")
input_filename = "%s/%s.dae" % (test_data, test_file)
output_filename = "%s/art/meshes/%s.pmd" % (test_mod, test_file)
input_filename = f"{test_data}/{test_file}.dae"
output_filename = f"{test_mod}/art/meshes/{test_file}.pmd"
input = open(input_filename).read()
output = convert_dae_to_pmd(input)
@ -140,18 +142,18 @@ for test_file in ["xsitest3c", "xsitest3e", "jav2d", "jav2d2"]:
],
[("helmet", "teapot_basic_static")],
)
open("%s/art/actors/%s.xml" % (test_mod, test_file), "w").write(xml)
open(f"{test_mod}/art/actors/{test_file}.xml", "w").write(xml)
xml = create_actor_static(test_file, "male")
open("%s/art/actors/%s_static.xml" % (test_mod, test_file), "w").write(xml)
open(f"{test_mod}/art/actors/{test_file}_static.xml", "w").write(xml)
# for test_file in ['jav2','jav2b', 'jav2d']:
for test_file in ["xsitest3c", "xsitest3e", "jav2d", "jav2d2"]:
# for test_file in []:
print("* Converting PSA %s" % (test_file))
print(f"* Converting PSA {test_file}")
input_filename = "%s/%s.dae" % (test_data, test_file)
output_filename = "%s/art/animation/%s.psa" % (test_mod, test_file)
input_filename = f"{test_data}/{test_file}.dae"
output_filename = f"{test_mod}/art/animation/{test_file}.psa"
input = open(input_filename).read()
output = convert_dae_to_psa(input)

78
source/tools/entity/checkrefs.py Normal file → Executable file
View File

@ -1,15 +1,16 @@
#!/usr/bin/env python3
import sys
from argparse import ArgumentParser
from io import BytesIO
from json import load, loads
from logging import INFO, WARNING, Filter, Formatter, StreamHandler, getLogger
from os.path import basename, exists, sep
from pathlib import Path
from re import split, match
from struct import unpack, calcsize
from os.path import sep, exists, basename
from xml.etree import ElementTree
import sys
from re import match, split
from struct import calcsize, unpack
from xml.etree import ElementTree as ET
from scriptlib import SimulTemplateEntity, find_files
from logging import WARNING, getLogger, StreamHandler, INFO, Formatter, Filter
class SingleLevelFilter(Filter):
@ -20,8 +21,7 @@ class SingleLevelFilter(Filter):
def filter(self, record):
if self.reject:
return record.levelno != self.passlevel
else:
return record.levelno == self.passlevel
return record.levelno == self.passlevel
class CheckRefs:
@ -80,14 +80,15 @@ class CheckRefs:
"-a",
"--validate-actors",
action="store_true",
help="run the validator.py script to check if the actors files have extra or missing textures."
" This currently only works for the public mod.",
help="run the validator.py script to check if the actors files have extra or missing "
"textures. This currently only works for the public mod.",
)
ap.add_argument(
"-t",
"--validate-templates",
action="store_true",
help="run the validator.py script to check if the xml files match their (.rng) grammar file.",
help="run the validator.py script to check if the xml files match their (.rng) "
"grammar file.",
)
ap.add_argument(
"-m",
@ -105,8 +106,8 @@ class CheckRefs:
self.mods = list(
dict.fromkeys([*args.mods, *self.get_mod_dependencies(*args.mods), "mod"]).keys()
)
self.logger.info(f"Checking {'|'.join(args.mods)}'s integrity.")
self.logger.info(f"The following mods will be loaded: {'|'.join(self.mods)}.")
self.logger.info("Checking %s's integrity.", "|".join(args.mods))
self.logger.info("The following mods will be loaded: %s.", "|".join(self.mods))
if args.check_map_xml:
self.add_maps_xml()
self.add_maps_pmp()
@ -185,7 +186,7 @@ class CheckRefs:
for fp, ffp in sorted(mapfiles):
self.files.append(str(fp))
self.roots.append(str(fp))
et_map = ElementTree.parse(ffp).getroot()
et_map = ET.parse(ffp).getroot()
entities = et_map.find("Entities")
used = (
{entity.find("Template").text.strip() for entity in entities.findall("Entity")}
@ -211,7 +212,7 @@ class CheckRefs:
def add_maps_pmp(self):
self.logger.info("Loading maps PMP...")
# Need to generate terrain texture filename=>relative path lookup first
terrains = dict()
terrains = {}
for fp, ffp in self.find_files("art/terrains", "xml"):
name = fp.stem
# ignore terrains.xml
@ -219,7 +220,10 @@ class CheckRefs:
if name in terrains:
self.inError = True
self.logger.error(
f"Duplicate terrain name '{name}' (from '{terrains[name]}' and '{ffp}')"
"Duplicate terrain name '%s' (from '%s' and '%s')",
name,
terrains[name],
ffp,
)
terrains[name] = str(fp)
mapfiles = self.find_files("maps/scenarios", "pmp")
@ -241,7 +245,7 @@ class CheckRefs:
(mapsize,) = unpack(int_fmt, f.read(int_len))
f.seek(2 * (mapsize * 16 + 1) * (mapsize * 16 + 1), 1) # skip heightmap
(numtexs,) = unpack(int_fmt, f.read(int_len))
for i in range(numtexs):
for _i in range(numtexs):
(length,) = unpack(int_fmt, f.read(int_len))
terrain_name = f.read(length).decode("ascii") # suppose ascii encoding
self.deps.append(
@ -279,7 +283,8 @@ class CheckRefs:
def add_entities(self):
self.logger.info("Loading entities...")
simul_templates_path = Path("simulation/templates")
# TODO: We might want to get computed templates through the RL interface instead of computing the values ourselves.
# TODO: We might want to get computed templates through the RL interface instead of
# computing the values ourselves.
simul_template_entity = SimulTemplateEntity(self.vfs_root, self.logger)
custom_phase_techs = self.get_custom_phase_techs()
for fp, _ in sorted(self.find_files(simul_templates_path, "xml")):
@ -307,7 +312,8 @@ class CheckRefs:
actor = entity.find("VisualActor").find("Actor")
if "{phenotype}" in actor.text:
for phenotype in phenotypes:
# See simulation2/components/CCmpVisualActor.cpp and Identity.js for explanation.
# See simulation2/components/CCmpVisualActor.cpp and Identity.js
# for explanation.
actor_path = actor.text.replace("{phenotype}", phenotype)
self.deps.append((str(fp), f"art/actors/{actor_path}"))
else:
@ -332,7 +338,8 @@ class CheckRefs:
if sound_group.text and sound_group.text.strip():
if "{phenotype}" in sound_group.text:
for phenotype in phenotypes:
# see simulation/components/Sound.js and Identity.js for explanation
# see simulation/components/Sound.js and Identity.js
# for explanation
sound_path = sound_group.text.replace(
"{phenotype}", phenotype
).replace("{lang}", lang)
@ -483,7 +490,7 @@ class CheckRefs:
for fp, ffp in sorted(self.find_files("art/actors", "xml")):
self.files.append(str(fp))
self.roots.append(str(fp))
root = ElementTree.parse(ffp).getroot()
root = ET.parse(ffp).getroot()
if root.tag == "actor":
self.append_actor_dependencies(root, fp)
@ -500,7 +507,7 @@ class CheckRefs:
for fp, ffp in sorted(self.find_files("art/variants", "xml")):
self.files.append(str(fp))
self.roots.append(str(fp))
variant = ElementTree.parse(ffp).getroot()
variant = ET.parse(ffp).getroot()
self.append_variant_dependencies(variant, fp)
def add_art(self):
@ -543,7 +550,7 @@ class CheckRefs:
self.logger.info("Loading materials...")
for fp, ffp in sorted(self.find_files("art/materials", "xml")):
self.files.append(str(fp))
material_elem = ElementTree.parse(ffp).getroot()
material_elem = ET.parse(ffp).getroot()
for alternative in material_elem.findall("alternative"):
material = alternative.get("material")
if material is not None:
@ -554,7 +561,7 @@ class CheckRefs:
for fp, ffp in sorted(self.find_files("art/particles", "xml")):
self.files.append(str(fp))
self.roots.append(str(fp))
particle = ElementTree.parse(ffp).getroot()
particle = ET.parse(ffp).getroot()
texture = particle.find("texture")
if texture is not None:
self.deps.append((str(fp), texture.text))
@ -564,7 +571,7 @@ class CheckRefs:
for fp, ffp in sorted(self.find_files("audio", "xml")):
self.files.append(str(fp))
self.roots.append(str(fp))
sound_group = ElementTree.parse(ffp).getroot()
sound_group = ET.parse(ffp).getroot()
path = sound_group.find("Path").text.rstrip("/")
for sound in sound_group.findall("Sound"):
self.deps.append((str(fp), f"{path}/{sound.text}"))
@ -614,7 +621,7 @@ class CheckRefs:
# GUI page definitions are assumed to be named page_[something].xml and alone in that.
if match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(fp)):
self.roots.append(str(fp))
root_xml = ElementTree.parse(ffp).getroot()
root_xml = ET.parse(ffp).getroot()
for include in root_xml.findall("include"):
# If including an entire directory, find all the *.xml files
if include.text.endswith("/"):
@ -629,7 +636,7 @@ class CheckRefs:
else:
self.deps.append((str(fp), f"gui/{include.text}"))
else:
xml = ElementTree.parse(ffp)
xml = ET.parse(ffp)
root_xml = xml.getroot()
name = root_xml.tag
self.roots.append(str(fp))
@ -662,7 +669,6 @@ class CheckRefs:
if style.get("sound_disabled"):
self.deps.append((str(fp), f"{style.get('sound_disabled')}"))
# TODO: look at sprites, styles, etc
pass
elif name == "sprites":
for sprite in root_xml.findall("sprite"):
for image in sprite.findall("image"):
@ -711,7 +717,7 @@ class CheckRefs:
def add_tips(self):
self.logger.info("Loading tips...")
for fp, ffp in sorted(self.find_files("gui/text/tips", "txt")):
for fp, _ffp in sorted(self.find_files("gui/text/tips", "txt")):
relative_path = str(fp)
self.files.append(relative_path)
self.roots.append(relative_path)
@ -770,7 +776,7 @@ class CheckRefs:
continue
self.files.append(str(fp))
self.roots.append(str(fp))
terrain = ElementTree.parse(ffp).getroot()
terrain = ET.parse(ffp).getroot()
for texture in terrain.find("textures").findall("texture"):
if texture.get("file"):
self.deps.append((str(fp), f"art/textures/terrain/{texture.get('file')}"))
@ -796,7 +802,7 @@ class CheckRefs:
uniq_files = set(self.files)
uniq_files = [r.replace(sep, "/") for r in uniq_files]
lower_case_files = {f.lower(): f for f in uniq_files}
reverse_deps = dict()
reverse_deps = {}
for parent, dep in self.deps:
if sep != "/":
parent = parent.replace(sep, "/")
@ -817,16 +823,18 @@ class CheckRefs:
continue
callers = [str(self.vfs_to_relative_to_mods(ref)) for ref in reverse_deps[dep]]
self.logger.error(f"Missing file '{dep}' referenced by: {', '.join(sorted(callers))}")
self.logger.error(
"Missing file '%s' referenced by: %s", dep, ", ".join(sorted(callers))
)
self.inError = True
if dep.lower() in lower_case_files:
self.logger.warning(
f"### Case-insensitive match (found '{lower_case_files[dep.lower()]}')"
"### Case-insensitive match (found '%s')", lower_case_files[dep.lower()]
)
def check_unused(self):
self.logger.info("Looking for unused files...")
deps = dict()
deps = {}
for parent, dep in self.deps:
if sep != "/":
parent = parent.replace(sep, "/")
@ -859,7 +867,7 @@ class CheckRefs:
)
):
continue
self.logger.warning(f"Unused file '{str(self.vfs_to_relative_to_mods(f))}'")
self.logger.warning("Unused file '%s'", str(self.vfs_to_relative_to_mods(f)))
if __name__ == "__main__":

11
source/tools/entity/creationgraph.py Normal file → Executable file
View File

@ -4,7 +4,8 @@ from pathlib import Path
from re import split
from subprocess import run
from sys import exit
from scriptlib import warn, SimulTemplateEntity, find_files
from scriptlib import SimulTemplateEntity, find_files, warn
def find_entities(vfs_root):
@ -56,8 +57,12 @@ def main():
warn(f"Invalid TrainingQueue reference: {f} -> {training_queue}")
dot_f.write(f'"{f}" -> "{training_queue}" [color=blue];\n')
dot_f.write("}\n")
if run(["dot", "-V"], capture_output=True).returncode == 0:
exit(run(["dot", "-Tpng", "creation.dot", "-o", "creation.png"], text=True).returncode)
if run(["dot", "-V"], capture_output=True, check=False).returncode == 0:
exit(
run(
["dot", "-Tpng", "creation.dot", "-o", "creation.png"], text=True, check=False
).returncode
)
if __name__ == "__main__":

28
source/tools/entity/entvalidate.py Normal file → Executable file
View File

@ -1,16 +1,18 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import logging
from pathlib import Path
import shutil
from subprocess import run, CalledProcessError
import sys
from pathlib import Path
from subprocess import CalledProcessError, run
from typing import Sequence
from xml.etree import ElementTree
from xml.etree import ElementTree as ET
from scriptlib import SimulTemplateEntity, find_files
SIMUL_TEMPLATES_PATH = Path("simulation/templates")
ENTITY_RELAXNG_FNAME = "entity.rng"
RELAXNG_SCHEMA_ERROR_MSG = """Relax NG schema non existant.
@ -30,8 +32,7 @@ class SingleLevelFilter(logging.Filter):
def filter(self, record):
if self.reject:
return record.levelno != self.passlevel
else:
return record.levelno == self.passlevel
return record.levelno == self.passlevel
logger = logging.getLogger(__name__)
@ -96,18 +97,21 @@ def main(argv: Sequence[str] | None = None) -> int:
continue
path = fp.as_posix()
if path.startswith(f"{SIMUL_TEMPLATES_PATH.as_posix()}/mixins/") or path.startswith(
f"{SIMUL_TEMPLATES_PATH.as_posix()}/special/"
if path.startswith(
(
f"{SIMUL_TEMPLATES_PATH.as_posix()}/mixins/",
f"{SIMUL_TEMPLATES_PATH.as_posix()}/special/",
)
):
continue
if args.verbose:
logger.info(f"Parsing {fp}...")
logger.info("Parsing %s...", fp)
count += 1
entity = simul_template_entity.load_inherited(
SIMUL_TEMPLATES_PATH, str(fp.relative_to(SIMUL_TEMPLATES_PATH)), [args.mod_name]
)
xmlcontent = ElementTree.tostring(entity, encoding="unicode")
xmlcontent = ET.tostring(entity, encoding="unicode")
try:
run(
["xmllint", "--relaxng", str(args.relaxng_schema.resolve()), "-"],
@ -120,11 +124,11 @@ def main(argv: Sequence[str] | None = None) -> int:
except CalledProcessError as e:
failed += 1
if e.stderr:
logger.error(e.stderr)
logger.exception(e.stderr)
if e.stdout:
logger.info(e.stdout)
logger.info(f"Total: {count}; failed: {failed}")
logger.info("Total: %s; failed: %s", count, failed)
return 0

View File

@ -1,8 +1,8 @@
from collections import Counter
from decimal import Decimal
from re import split
from xml.etree import ElementTree
from os.path import exists
from re import split
from xml.etree import ElementTree as ET
class SimulTemplateEntity:
@ -77,7 +77,7 @@ class SimulTemplateEntity:
base_tag.remove(base_child)
base_child = None
if base_child is None:
base_child = ElementTree.Element(child.tag)
base_child = ET.Element(child.tag)
base_tag.append(base_child)
self.apply_layer(base_child, child)
if "replace" in base_child.attrib:
@ -95,28 +95,27 @@ class SimulTemplateEntity:
if "|" in vfs_path:
paths = vfs_path.split("|", 1)
base = self._load_inherited(base_path, paths[1], mods, base)
base = self._load_inherited(base_path, paths[0], mods, base)
return base
return self._load_inherited(base_path, paths[0], mods, base)
main_mod = self.get_main_mod(base_path, vfs_path, mods)
fp = self.get_file(base_path, vfs_path, main_mod)
layer = ElementTree.parse(fp).getroot()
layer = ET.parse(fp).getroot()
for el in layer.iter():
children = [x.tag for x in el]
duplicates = [x for x, c in Counter(children).items() if c > 1]
if duplicates:
for dup in duplicates:
self.logger.warning(f"Duplicate child node '{dup}' in tag {el.tag} of {fp}")
self.logger.warning(
"Duplicate child node '%s' in tag %s of %s", dup, el.tag, fp
)
if layer.get("parent"):
parent = self._load_inherited(base_path, layer.get("parent"), mods, base)
self.apply_layer(parent, layer)
return parent
else:
if not base:
return layer
else:
self.apply_layer(base, layer)
return base
if not base:
return layer
self.apply_layer(base, layer)
return base
def find_files(vfs_root, mods, vfs_path, *ext_list):
@ -130,7 +129,7 @@ def find_files(vfs_root, mods, vfs_path, *ext_list):
def find_recursive(dp, base):
"""(relative Path, full Path) generator"""
if dp.is_dir():
if dp.name != ".svn" and dp.name != ".git" and not dp.name.endswith("~"):
if dp.name not in (".svn", ".git") and not dp.name.endswith("~"):
for fp in dp.iterdir():
yield from find_recursive(fp, base)
elif dp.suffix in full_exts:

View File

@ -1,9 +1,11 @@
# Adapted from http://cairographics.org/freetypepython/
import ctypes
import cairo
import sys
import cairo
CAIRO_STATUS_SUCCESS = 0
FT_Err_Ok = 0
@ -58,11 +60,11 @@ def create_cairo_font_face_for_file(filename, faceindex=0, loadoptions=0):
# create cairo font face for freetype face
cr_face = _cairo_so.cairo_ft_font_face_create_for_ft_face(ft_face, loadoptions)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_font_face_status(cr_face):
if _cairo_so.cairo_font_face_status(cr_face) != CAIRO_STATUS_SUCCESS:
raise Exception("Error creating cairo font face for " + filename)
_cairo_so.cairo_set_font_face(cairo_t, cr_face)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_status(cairo_t):
if _cairo_so.cairo_status(cairo_t) != CAIRO_STATUS_SUCCESS:
raise Exception("Error creating cairo font face for " + filename)
face = cairo_ctx.get_font_face()

View File

@ -23,7 +23,7 @@ class OutOfSpaceError(Exception):
pass
class Point(object):
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
@ -33,7 +33,7 @@ class Point(object):
return self.x - other.x
class RectanglePacker(object):
class RectanglePacker:
"""Base class for rectangle packing algorithms
By uniting all rectangle packers under this common base class, you can
@ -41,13 +41,15 @@ class RectanglePacker(object):
performant one for a given job.
An almost exhaustive list of packing algorithms can be found here:
http://www.csc.liv.ac.uk/~epa/surveyhtml.html"""
http://www.csc.liv.ac.uk/~epa/surveyhtml.html
"""
def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area"""
packingAreaHeight: Maximum height of the packing area
"""
self.packingAreaWidth = packingAreaWidth
self.packingAreaHeight = packingAreaHeight
@ -57,7 +59,8 @@ class RectanglePacker(object):
rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate
Returns the location at which the rectangle has been placed"""
Returns the location at which the rectangle has been placed
"""
point = self.TryPack(rectangleWidth, rectangleHeight)
if not point:
@ -72,7 +75,8 @@ class RectanglePacker(object):
rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None"""
be found, otherwise returns None
"""
raise NotImplementedError
@ -112,13 +116,15 @@ class CygonRectanglePacker(RectanglePacker):
To quickly discover these locations, the packer uses a sophisticated
data structure that stores the upper silhouette of the packing area. When
a new rectangle needs to be added, only the silouette edges need to be
analyzed to find the position where the rectangle would achieve the lowest"""
analyzed to find the position where the rectangle would achieve the lowest
"""
def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area"""
packingAreaHeight: Maximum height of the packing area
"""
RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight)
# Stores the height silhouette of the rectangles
@ -134,7 +140,8 @@ class CygonRectanglePacker(RectanglePacker):
rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None"""
be found, otherwise returns None
"""
placement = None
# If the rectangle is larger than the packing area in any dimension,
@ -159,7 +166,8 @@ class CygonRectanglePacker(RectanglePacker):
rectangleHeight: Height of the rectangle to find a position for
Returns a Point instance if a valid placement for the rectangle could
be found, otherwise returns None"""
be found, otherwise returns None
"""
# Slice index, vertical position and score of the best placement we
# could find
bestSliceIndex = -1 # Slice index where the best placement was found
@ -181,8 +189,7 @@ class CygonRectanglePacker(RectanglePacker):
# any lower than this without overlapping the other rectangles.
highest = self.heightSlices[leftSliceIndex].y
for index in range(leftSliceIndex + 1, rightSliceIndex):
if self.heightSlices[index].y > highest:
highest = self.heightSlices[index].y
highest = max(self.heightSlices[index].y, highest)
# Only process this position if it doesn't leave the packing area
if highest + rectangleHeight < self.packingAreaHeight:
@ -224,15 +231,15 @@ class CygonRectanglePacker(RectanglePacker):
# could be found.
if bestSliceIndex == -1:
return None
else:
return Point(self.heightSlices[bestSliceIndex].x, bestSliceY)
return Point(self.heightSlices[bestSliceIndex].x, bestSliceY)
def integrateRectangle(self, left, width, bottom):
"""Integrates a new rectangle into the height slice table
left: Position of the rectangle's left side
width: Width of the rectangle
bottom: Position of the rectangle's lower side"""
bottom: Position of the rectangle's lower side
"""
# Find the first slice that is touched by the rectangle
startSlice = bisect_left(self.heightSlices, Point(left, 0))

View File

View File

@ -7,7 +7,7 @@ import FontLoader
def dump_font(ttf):
(face, indexes) = FontLoader.create_cairo_font_face_for_file(
"../../../binaries/data/tools/fontbuilder/fonts/%s" % ttf, 0, FontLoader.FT_LOAD_DEFAULT
f"../../../binaries/data/tools/fontbuilder/fonts/{ttf}", 0, FontLoader.FT_LOAD_DEFAULT
)
mappings = [(c, indexes(chr(c))) for c in range(1, 65535)]

20
source/tools/fontbuilder2/fontbuilder.py Normal file → Executable file
View File

@ -1,15 +1,15 @@
#!/usr/bin/env python3
import cairo
import codecs
import math
import cairo
import FontLoader
import Packer
# Representation of a rendered glyph
class Glyph(object):
class Glyph:
def __init__(self, ctx, renderstyle, char, idx, face, size):
self.renderstyle = renderstyle
self.char = char
@ -18,7 +18,7 @@ class Glyph(object):
self.size = size
self.glyph = (idx, 0, 0)
if not ctx.get_font_face() == self.face:
if ctx.get_font_face() != self.face:
ctx.set_font_face(self.face)
ctx.set_font_size(self.size)
extents = ctx.glyph_extents([self.glyph])
@ -31,7 +31,7 @@ class Glyph(object):
bb = [inf, inf, -inf, -inf]
if "stroke" in self.renderstyle:
for c, w in self.renderstyle["stroke"]:
for _c, w in self.renderstyle["stroke"]:
ctx.set_line_width(w)
ctx.glyph_path([self.glyph])
e = ctx.stroke_extents()
@ -60,7 +60,7 @@ class Glyph(object):
self.pos = packer.Pack(self.w, self.h)
def render(self, ctx):
if not ctx.get_font_face() == self.face:
if ctx.get_font_face() != self.face:
ctx.set_font_face(self.face)
ctx.set_font_size(self.size)
ctx.save()
@ -107,7 +107,7 @@ def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
indexList = []
for i in range(len(ttfNames)):
(face, indices) = FontLoader.create_cairo_font_face_for_file(
"../../../binaries/data/tools/fontbuilder/fonts/%s" % ttfNames[i], 0, loadopts
f"../../../binaries/data/tools/fontbuilder/fonts/{ttfNames[i]}", 0, loadopts
)
faceList.append(face)
if ttfNames[i] not in dsizes:
@ -166,10 +166,10 @@ def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
ctx, surface = setup_context(w, h, renderstyle)
for g in glyphs:
g.render(ctx)
surface.write_to_png("%s.png" % outname)
surface.write_to_png(f"{outname}.png")
# Output the .fnt file with all the glyph positions etc
fnt = open("%s.fnt" % outname, "w")
fnt = open(f"{outname}.fnt", "w")
fnt.write("101\n")
fnt.write("%d %d\n" % (w, h))
fnt.write("%s\n" % ("rgba" if "colour" in renderstyle else "a"))
@ -249,7 +249,7 @@ fonts = (
)
for name, (fontnames, loadopts), size, style in fonts:
print("%s..." % name)
print(f"{name}...")
generate_font(
"../../../binaries/data/mods/mod/fonts/%s" % name, fontnames, loadopts, size, style, dsizes
f"../../../binaries/data/mods/mod/fonts/{name}", fontnames, loadopts, size, style, dsizes
)

19
source/tools/i18n/checkDiff.py Normal file → Executable file
View File

@ -30,18 +30,18 @@ def get_diff():
"""Return a diff using svn diff"""
os.chdir(projectRootDirectory)
diff_process = subprocess.run(["svn", "diff", "binaries"], capture_output=True)
diff_process = subprocess.run(["svn", "diff", "binaries"], capture_output=True, check=False)
if diff_process.returncode != 0:
print(f"Error running svn diff: {diff_process.stderr.decode('utf-8')}. Exiting.")
return
return None
return io.StringIO(diff_process.stdout.decode("utf-8"))
def check_diff(diff: io.StringIO, verbose=False) -> List[str]:
"""Run through a diff of .po files and check that some of the changes
are real translations changes and not just noise (line changes....).
The algorithm isn't extremely clever, but it is quite fast."""
The algorithm isn't extremely clever, but it is quite fast.
"""
keep = set()
files = set()
@ -85,10 +85,11 @@ def check_diff(diff: io.StringIO, verbose=False) -> List[str]:
def revert_files(files: List[str], verbose=False):
revert_process = subprocess.run(["svn", "revert"] + files, capture_output=True)
revert_process = subprocess.run(["svn", "revert", *files], capture_output=True, check=False)
if revert_process.returncode != 0:
print(
f"Warning: Some files could not be reverted. Error: {revert_process.stderr.decode('utf-8')}"
"Warning: Some files could not be reverted. "
f"Error: {revert_process.stderr.decode('utf-8')}"
)
if verbose:
for file in files:
@ -97,7 +98,7 @@ def revert_files(files: List[str], verbose=False):
def add_untracked(verbose=False):
"""Add untracked .po files to svn"""
diff_process = subprocess.run(["svn", "st", "binaries"], capture_output=True)
diff_process = subprocess.run(["svn", "st", "binaries"], capture_output=True, check=False)
if diff_process.stderr != b"":
print(f"Error running svn st: {diff_process.stderr.decode('utf-8')}. Exiting.")
return
@ -110,7 +111,9 @@ def add_untracked(verbose=False):
file = line[1:].strip()
if not file.endswith(".po") and not file.endswith(".pot"):
continue
add_process = subprocess.run(["svn", "add", file, "--parents"], capture_output=True)
add_process = subprocess.run(
["svn", "add", file, "--parents"], capture_output=True, check=False
)
if add_process.stderr != b"":
print(f"Warning: file {file} could not be added.")
if verbose:

23
source/tools/i18n/checkTranslations.py Normal file → Executable file
View File

@ -16,15 +16,16 @@
# You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import sys
import multiprocessing
import os
import re
import multiprocessing
import sys
from i18n_helper import l10nFolderName, projectRootDirectory
from i18n_helper.catalog import Catalog
from i18n_helper.globber import getCatalogs
VERBOSE = 0
@ -49,7 +50,8 @@ class MessageChecker:
pluralUrls = set(self.regex.findall(templateMessage.id[1]))
if pluralUrls.difference(patterns):
print(
f"{inputFilePath} - Different {self.human_name} in singular and plural source strings "
f"{inputFilePath} - Different {self.human_name} in "
f"singular and plural source strings "
f"for '{templateMessage}' in '{inputFilePath}'"
)
@ -71,8 +73,10 @@ class MessageChecker:
if unknown_patterns:
print(
f'{inputFilePath} - {translationCatalog.locale}: '
f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns])} in the translation '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in patterns])}'
f'Found unknown {self.human_name} '
f'{", ".join(["`" + x + "`" for x in unknown_patterns])} '
f'in the translation which do not match any of the URLs '
f'in the template: {", ".join(["`" + x + "`" for x in patterns])}'
)
if templateMessage.pluralizable and translationMessage.pluralizable:
@ -84,8 +88,11 @@ class MessageChecker:
if unknown_patterns_multi:
print(
f'{inputFilePath} - {translationCatalog.locale}: '
f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns_multi])} in the pluralised translation '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in pluralUrls])}'
f'Found unknown {self.human_name} '
f'{", ".join(["`" + x + "`" for x in unknown_patterns_multi])} '
f'in the pluralised translation which do not '
f'match any of the URLs in the template: '
f'{", ".join(["`" + x + "`" for x in pluralUrls])}'
)
@ -123,7 +130,7 @@ def main():
"before you run this script.\n\tPOT files are not in the repository.\n"
)
foundPots = 0
for root, folders, filenames in os.walk(projectRootDirectory):
for root, _folders, filenames in os.walk(projectRootDirectory):
for filename in filenames:
if (
len(filename) > 4

10
source/tools/i18n/cleanTranslationFiles.py Normal file → Executable file
View File

@ -26,13 +26,13 @@ However that needs to be fixed on the transifex side, see rP25896. For now
strip the e-mails using this script.
"""
import sys
import os
import glob
import re
import fileinput
import glob
import os
import re
import sys
from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory
from i18n_helper import l10nFolderName, projectRootDirectory, transifexClientFolder
def main():

View File

@ -36,11 +36,11 @@ from collections import defaultdict
from pathlib import Path
from babel import Locale, UnknownLocaleError
from i18n_helper import l10nFolderName, projectRootDirectory, transifexClientFolder
from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory
poLocations = []
for root, folders, filenames in os.walk(projectRootDirectory):
for root, folders, _filenames in os.walk(projectRootDirectory):
for folder in folders:
if folder == l10nFolderName:
if os.path.exists(os.path.join(root, folder, transifexClientFolder)):
@ -78,7 +78,7 @@ for location in poLocations:
lang = file.stem.split(".")[0]
# Skip debug translations
if lang == "debug" or lang == "long":
if lang in ("debug", "long"):
continue
with file.open(encoding="utf-8") as poFile:
@ -98,12 +98,10 @@ for location in poLocations:
# Sort translator names and remove duplicates
# Sorting should ignore case, but prefer versions of names starting
# with an upper case letter to have a neat credits list.
for lang in langsLists.keys():
for lang in langsLists:
translators = {}
for name in sorted(langsLists[lang], reverse=True):
if name.lower() not in translators.keys():
translators[name.lower()] = name
elif name.istitle():
if name.lower() not in translators or name.istitle():
translators[name.lower()] = name
langsLists[lang] = sorted(translators.values(), key=lambda s: s.lower())

88
source/tools/i18n/extractors/extractors.py Normal file → Executable file
View File

@ -3,29 +3,33 @@
# Copyright (C) 2024 Wildfire Games.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
# The name of the author may not be used to endorse or promote products derived from this software without specific
# prior written permission.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import re
import os
import sys
import json as jsonParser
import os
import re
import sys
from textwrap import dedent
@ -51,7 +55,7 @@ def pathmatch(mask, path):
return re.match(p, path) is not None
class Extractor(object):
class Extractor:
def __init__(self, directoryPath, filemasks, options):
self.directoryPath = directoryPath
self.options = options
@ -66,14 +70,15 @@ class Extractor(object):
def run(self):
"""Extracts messages.
:return: An iterator over ``(message, plural, context, (location, pos), comment)`` tuples.
:return: An iterator over ``(message, plural, context, (location, pos), comment)``
tuples.
:rtype: ``iterator``
"""
empty_string_pattern = re.compile(r"^\s*$")
directoryAbsolutePath = os.path.abspath(self.directoryPath)
for root, folders, filenames in os.walk(directoryAbsolutePath):
for subdir in folders:
if subdir.startswith(".") or subdir.startswith("_"):
if subdir.startswith((".", "_")):
folders.remove(subdir)
folders.sort()
filenames.sort()
@ -108,7 +113,6 @@ class Extractor(object):
:return: An iterator over ``(message, plural, context, position, comments)`` tuples.
:rtype: ``iterator``
"""
pass
class javascript(Extractor):
@ -134,8 +138,7 @@ class javascript(Extractor):
for token in tokenize(fileObject.read(), dotted=False):
if token.type == "operator" and (
token.value == "("
or (call_stack != -1 and (token.value == "[" or token.value == "{"))
token.value == "(" or (call_stack != -1 and (token.value in ("[", "{")))
):
if funcname:
message_lineno = token.lineno
@ -215,11 +218,7 @@ class javascript(Extractor):
elif token.value == "+":
concatenate_next = True
elif (
call_stack > 0
and token.type == "operator"
and (token.value == ")" or token.value == "]" or token.value == "}")
):
elif call_stack > 0 and token.type == "operator" and (token.value in (")", "]", "}")):
call_stack -= 1
elif funcname and call_stack == -1:
@ -242,10 +241,7 @@ class javascript(Extractor):
def extractFromFile(self, filepath):
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject:
for lineno, funcname, messages, comments in self.extractJavascriptFromFile(fileObject):
if funcname:
spec = self.options.get("keywords", {})[funcname] or (1,)
else:
spec = (1,)
spec = self.options.get("keywords", {})[funcname] or (1,) if funcname else (1,)
if not isinstance(messages, (list, tuple)):
messages = [messages]
if not messages:
@ -300,8 +296,6 @@ class javascript(Extractor):
class cpp(javascript):
"""Extract messages from C++ source code."""
pass
class txt(Extractor):
"""Extract messages from plain text files."""
@ -318,8 +312,12 @@ class txt(Extractor):
class json(Extractor):
"""Extract messages from JSON files."""
def __init__(self, directoryPath=None, filemasks=[], options={}):
super(json, self).__init__(directoryPath, filemasks, options)
def __init__(self, directoryPath=None, filemasks=None, options=None):
if options is None:
options = {}
if filemasks is None:
filemasks = []
super().__init__(directoryPath, filemasks, options)
self.keywords = self.options.get("keywords", {})
self.context = self.options.get("context", None)
self.comments = self.options.get("comments", [])
@ -347,7 +345,8 @@ class json(Extractor):
yield message, context
else:
raise Exception(
"Unexpected JSON document parent structure (not a list or a dictionary). You must extend the JSON extractor to support it."
"Unexpected JSON document parent structure (not a list or a dictionary). "
"You must extend the JSON extractor to support it."
)
def parseList(self, itemsList):
@ -431,8 +430,7 @@ class json(Extractor):
if isinstance(dictionary[innerKeyword], str):
yield self.extractString(dictionary[innerKeyword], keyword)
elif isinstance(dictionary[innerKeyword], list):
for message, context in self.extractList(dictionary[innerKeyword], keyword):
yield message, context
yield from self.extractList(dictionary[innerKeyword], keyword)
elif isinstance(dictionary[innerKeyword], dict):
extract = self.extractDictionary(dictionary[innerKeyword], keyword)
if extract:
@ -443,7 +441,7 @@ class xml(Extractor):
"""Extract messages from XML files."""
def __init__(self, directoryPath, filemasks, options):
super(xml, self).__init__(directoryPath, filemasks, options)
super().__init__(directoryPath, filemasks, options)
self.keywords = self.options.get("keywords", {})
self.jsonExtractor = None
@ -483,7 +481,9 @@ class xml(Extractor):
comments.append(comment)
if "splitOnWhitespace" in self.keywords[keyword]:
for splitText in element.text.split():
# split on whitespace is used for token lists, there, a leading '-' means the token has to be removed, so it's not to be processed here either
# split on whitespace is used for token lists, there, a
# leading '-' means the token has to be removed, so it's not
# to be processed here either
if splitText[0] != "-":
yield str(splitText), None, context, lineno, comments
else:
@ -491,7 +491,7 @@ class xml(Extractor):
# Hack from http://stackoverflow.com/a/2819788
class FakeSectionHeader(object):
class FakeSectionHeader:
def __init__(self, fp):
self.fp = fp
self.sechead = "[root]\n"
@ -510,7 +510,7 @@ class ini(Extractor):
"""Extract messages from INI files."""
def __init__(self, directoryPath, filemasks, options):
super(ini, self).__init__(directoryPath, filemasks, options)
super().__init__(directoryPath, filemasks, options)
self.keywords = self.options.get("keywords", [])
def extractFromFile(self, filepath):

9
source/tools/i18n/generateDebugTranslation.py Normal file → Executable file
View File

@ -17,9 +17,9 @@
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import argparse
import multiprocessing
import os
import sys
import multiprocessing
from i18n_helper import l10nFolderName, projectRootDirectory
from i18n_helper.catalog import Catalog
@ -179,9 +179,10 @@ def main():
if found_pot_files == 0:
print(
"This script did not work because no ‘.pot’ files were found. "
"Please, run ‘updateTemplates.py’ to generate the ‘.pot’ files, and run ‘pullTranslations.py’ to pull the latest translations from Transifex. "
"Then you can run this script to generate ‘.po’ files with obvious debug strings."
"This script did not work because no '.pot' files were found. "
"Please, run 'updateTemplates.py' to generate the '.pot' files, and run "
"'pullTranslations.py' to pull the latest translations from Transifex. "
"Then you can run this script to generate '.po' files with obvious debug strings."
)

View File

@ -1,5 +1,6 @@
import os
l10nFolderName = "l10n"
transifexClientFolder = ".tx"
l10nToolsDirectory = os.path.dirname(os.path.realpath(__file__))

View File

@ -41,7 +41,7 @@ class Catalog(BabelCatalog):
}:
headers.append((name, value))
return [("Project-Id-Version", self._project)] + headers
return [("Project-Id-Version", self._project), *headers]
@staticmethod
def readFrom(file_path, locale=None):

View File

@ -1,12 +1,12 @@
"""Utils to list .po"""
import os
from typing import List
from typing import List, Optional
from i18n_helper.catalog import Catalog
def getCatalogs(inputFilePath, filters: List[str] = None) -> List[Catalog]:
def getCatalogs(inputFilePath, filters: Optional[List[str]] = None) -> List[Catalog]:
"""Returns a list of "real" catalogs (.po) in the given folder."""
existingTranslationCatalogs = []
l10nFolderPath = os.path.dirname(inputFilePath)

4
source/tools/i18n/pullTranslations.py Normal file → Executable file
View File

@ -19,7 +19,7 @@
import os
import subprocess
from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory
from i18n_helper import l10nFolderName, projectRootDirectory, transifexClientFolder
def main():
@ -30,7 +30,7 @@ def main():
path = os.path.join(root, folder)
os.chdir(path)
print(f"INFO: Starting to pull translations in {path}...")
subprocess.run(["tx", "pull", "-a", "-f"])
subprocess.run(["tx", "pull", "-a", "-f"], check=False)
if __name__ == "__main__":

View File

View File

@ -1,7 +1,9 @@
import io
import pytest
from checkDiff import check_diff
PATCHES = [
"""
Index: binaries/data/l10n/en_GB.engine.po

31
source/tools/i18n/updateTemplates.py Normal file → Executable file
View File

@ -17,14 +17,14 @@
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import multiprocessing
import os
from importlib import import_module
from i18n_helper import l10nFolderName, projectRootDirectory
from i18n_helper.catalog import Catalog
messagesFilename = "messages.json"
@ -38,32 +38,25 @@ def warnAboutUntouchedMods():
if modFolder[0] != "_" and modFolder[0] != ".":
if not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName)):
untouchedMods[modFolder] = (
"There is no '{folderName}' folder in the root folder of this mod.".format(
folderName=l10nFolderName
)
f"There is no '{l10nFolderName}' folder in the root folder of this mod."
)
elif not os.path.exists(
os.path.join(modsRootFolder, modFolder, l10nFolderName, messagesFilename)
):
untouchedMods[modFolder] = (
"There is no '{filename}' file within the '{folderName}' folder in the root folder of this mod.".format(
folderName=l10nFolderName, filename=messagesFilename
)
f"There is no '{messagesFilename}' file within the '{l10nFolderName}' folder "
f"in the root folder of this mod."
)
if untouchedMods:
print("" "Warning: No messages were extracted from the following mods:" "")
for mod in untouchedMods:
print(
"{modName}: {warningMessage}".format(
modName=mod, warningMessage=untouchedMods[mod]
)
)
print(f"{mod}: {untouchedMods[mod]}")
print(
""
f"For this script to extract messages from a mod folder, this mod folder must contain a '{l10nFolderName}' "
f"folder, and this folder must contain a '{messagesFilename}' file that describes how to extract messages for the "
f"mod. See the folder of the main mod ('public') for an example, and see the documentation for more "
f"information."
f"For this script to extract messages from a mod folder, this mod folder must contain "
f"a '{l10nFolderName}' folder, and this folder must contain a '{messagesFilename}' "
f"file that describes how to extract messages for the mod. See the folder of the main "
f"mod ('public') for an example, and see the documentation for more information."
)
@ -108,7 +101,7 @@ def generatePOT(templateSettings, rootPath):
def generateTemplatesForMessagesFile(messagesFilePath):
with open(messagesFilePath, "r") as fileObject:
with open(messagesFilePath) as fileObject:
settings = json.load(fileObject)
for templateSettings in settings:
@ -127,7 +120,7 @@ def main():
"Type '.' for current working directory",
)
args = parser.parse_args()
for root, folders, filenames in os.walk(args.scandir or projectRootDirectory):
for root, folders, _filenames in os.walk(args.scandir or projectRootDirectory):
for folder in folders:
if folder == l10nFolderName:
messagesFilePath = os.path.join(root, folder, messagesFilename)

View File

@ -27,14 +27,17 @@ import os
import struct
import sys
parser = argparse.ArgumentParser(
description="Convert maps compatible with 0 A.D. version Alpha XVIII (A18) to maps compatible with version Alpha XIX (A19), or the other way around."
description="Convert maps compatible with 0 A.D. version Alpha XVIII (A18) to maps compatible "
"with version Alpha XIX (A19), or the other way around."
)
parser.add_argument(
"--reverse",
action="store_true",
help="Make an A19 map compatible with A18 (note that conversion will fail if mountains are too high)",
help="Make an A19 map compatible with A18 (note that conversion will fail "
"if mountains are too high)",
)
parser.add_argument(
"--no-version-bump", action="store_true", help="Don't change the version number of the map"
@ -51,7 +54,6 @@ parser.add_argument(
)
args = parser.parse_args()
HEIGHTMAP_BIT_SHIFT = 3
for xmlFile in args.files:
@ -68,25 +70,22 @@ for xmlFile in args.files:
version = struct.unpack("<I", f1.read(4))[0]
if args.no_version_bump:
f2.write(struct.pack("<I", version))
elif args.reverse:
if version != 6:
print(
f"Warning: File {pmpFile} was not at version 6, while a negative version "
f"bump was requested.\nABORTING ..."
)
continue
f2.write(struct.pack("<I", version - 1))
else:
if args.reverse:
if version != 6:
print(
"Warning: File "
+ pmpFile
+ " was not at version 6, while a negative version bump was requested.\nABORTING ..."
)
continue
f2.write(struct.pack("<I", version - 1))
else:
if version != 5:
print(
"Warning: File "
+ pmpFile
+ " was not at version 5, while a version bump was requested.\nABORTING ..."
)
continue
f2.write(struct.pack("<I", version + 1))
if version != 5:
print(
f"Warning: File {pmpFile} was not at version 5, while a version bump was "
f"requested.\nABORTING ..."
)
continue
f2.write(struct.pack("<I", version + 1))
# 4 bytes a for file size (which shouldn't change)
f2.write(f1.read(4))
@ -100,17 +99,16 @@ for xmlFile in args.files:
def height_transform(h):
return h
elif args.reverse:
def height_transform(h):
return h << HEIGHTMAP_BIT_SHIFT
else:
if args.reverse:
def height_transform(h):
return h << HEIGHTMAP_BIT_SHIFT
else:
def height_transform(h):
return h >> HEIGHTMAP_BIT_SHIFT
def height_transform(h):
return h >> HEIGHTMAP_BIT_SHIFT
for i in range(0, (map_size * 16 + 1) * (map_size * 16 + 1)):
for _i in range((map_size * 16 + 1) * (map_size * 16 + 1)):
height = struct.unpack("<H", f1.read(2))[0]
f2.write(struct.pack("<H", height_transform(height)))
@ -128,7 +126,7 @@ for xmlFile in args.files:
os.rename(pmpFile + "~", pmpFile)
if os.path.isfile(xmlFile):
with open(xmlFile, "r") as f1, open(xmlFile + "~", "w") as f2:
with open(xmlFile) as f1, open(xmlFile + "~", "w") as f2:
data = f1.read()
# bump version number (rely on how Atlas formats the XML)
@ -136,23 +134,20 @@ for xmlFile in args.files:
if args.reverse:
if data.find('<Scenario version="6">') == -1:
print(
"Warning: File "
+ xmlFile
+ " was not at version 6, while a negative version bump was requested.\nABORTING ..."
f"Warning: File {xmlFile} was not at version 6, while a negative "
f"version bump was requested.\nABORTING ..."
)
sys.exit()
else:
data = data.replace('<Scenario version="6">', '<Scenario version="5">')
elif data.find('<Scenario version="5">') == -1:
print(
f"Warning: File {xmlFile} was not at version 5, while a version bump "
f"was requested.\nABORTING ..."
)
sys.exit()
else:
if data.find('<Scenario version="5">') == -1:
print(
"Warning: File "
+ xmlFile
+ " was not at version 5, while a version bump was requested.\nABORTING ..."
)
sys.exit()
else:
data = data.replace('<Scenario version="5">', '<Scenario version="6">')
data = data.replace('<Scenario version="5">', '<Scenario version="6">')
# transform the color keys
if not args.no_color_spelling:

View File

View File

@ -1,17 +1,17 @@
# This script provides an overview of the zero_ad wrapper for 0 AD
from os import path
import zero_ad
# First, we will define some helper functions we will use later.
import math
from os import path
import zero_ad
def dist(p1, p2):
return math.sqrt(sum((math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2))))
return math.sqrt(sum(math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2)))
def center(units):
sum_position = map(sum, zip(*map(lambda u: u.position(), units)))
sum_position = map(sum, zip(*(u.position() for u in units)))
return [x / len(units) for x in sum_position]
@ -33,7 +33,7 @@ game = zero_ad.ZeroAD("http://localhost:6000")
# Load the Arcadia map
samples_dir = path.dirname(path.realpath(__file__))
scenario_config_path = path.join(samples_dir, "arcadia.json")
with open(scenario_config_path, "r") as f:
with open(scenario_config_path, encoding="utf8") as f:
arcadia_config = f.read()
state = game.reset(arcadia_config)

View File

@ -1,5 +1,6 @@
from setuptools import setup
setup(
name="zero_ad",
version="0.0.1",

View File

@ -1,19 +1,21 @@
import zero_ad
import math
from os import path
import zero_ad
game = zero_ad.ZeroAD("http://localhost:6000")
scriptdir = path.dirname(path.realpath(__file__))
with open(path.join(scriptdir, "..", "samples", "arcadia.json"), "r") as f:
with open(path.join(scriptdir, "..", "samples", "arcadia.json"), encoding="utf8") as f:
config = f.read()
def dist(p1, p2):
return math.sqrt(sum((math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2))))
return math.sqrt(sum(math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2)))
def center(units):
sum_position = map(sum, zip(*map(lambda u: u.position(), units)))
sum_position = map(sum, zip(*(u.position() for u in units)))
return [x / len(units) for x in sum_position]

View File

@ -1,12 +1,14 @@
import zero_ad
from os import path
import zero_ad
game = zero_ad.ZeroAD("http://localhost:6000")
scriptdir = path.dirname(path.realpath(__file__))
with open(path.join(scriptdir, "..", "samples", "arcadia.json"), "r") as f:
with open(path.join(scriptdir, "..", "samples", "arcadia.json")) as f:
config = f.read()
with open(path.join(scriptdir, "fastactions.js"), "r") as f:
with open(path.join(scriptdir, "fastactions.js")) as f:
fastactions = f.read()

View File

@ -1,5 +1,8 @@
from . import actions # noqa: F401
from . import environment
from . import (
actions, # noqa: F401
environment,
)
ZeroAD = environment.ZeroAD
GameState = environment.GameState

View File

@ -1,5 +1,5 @@
from urllib import request
import json
from urllib import request
class RLAPI:
@ -11,7 +11,7 @@ class RLAPI:
return response.read()
def step(self, commands):
post_data = "\n".join((f"{player};{json.dumps(action)}" for (player, action) in commands))
post_data = "\n".join(f"{player};{json.dumps(action)}" for (player, action) in commands)
return self.post("step", post_data)
def reset(self, scenario_config, player_id, save_replay):

View File

@ -1,7 +1,8 @@
from .api import RLAPI
import json
from xml.etree import ElementTree
from itertools import cycle
from xml.etree import ElementTree as ET
from .api import RLAPI
class ZeroAD:
@ -11,7 +12,9 @@ class ZeroAD:
self.cache = {}
self.player_id = 1
def step(self, actions=[], player=None):
def step(self, actions=None, player=None):
if actions is None:
actions = []
player_ids = cycle([self.player_id]) if player is None else cycle(player)
cmds = zip(player_ids, actions)
@ -35,8 +38,10 @@ class ZeroAD:
templates = self.api.get_templates(names)
return [(name, EntityTemplate(content)) for (name, content) in templates]
def update_templates(self, types=[]):
all_types = list(set([unit.type() for unit in self.current_state.units()]))
def update_templates(self, types=None):
if types is None:
types = []
all_types = list({unit.type() for unit in self.current_state.units()})
all_types += types
template_pairs = self.get_templates(all_types)
@ -106,7 +111,7 @@ class Entity:
class EntityTemplate:
def __init__(self, xml):
self.data = ElementTree.fromstring(f"<Entity>{xml}</Entity>")
self.data = ET.fromstring(f"<Entity>{xml}</Entity>")
def get(self, path):
node = self.data.find(path)
@ -120,4 +125,4 @@ class EntityTemplate:
return node is not None
def __str__(self):
return ElementTree.tostring(self.data).decode("utf-8")
return ET.tostring(self.data).decode("utf-8")

141
source/tools/spirv/compile.py Normal file → Executable file
View File

@ -28,10 +28,10 @@ import json
import os
import subprocess
import sys
import yaml
import xml.etree.ElementTree as ET
import yaml
def execute(command):
try:
@ -81,9 +81,8 @@ def resolve_if(defines, expression):
if define["value"] != "1":
return True
found_define = True
else:
if define["value"] == "1":
return True
elif define["value"] == "1":
return True
if invert and not found_define:
return True
return False
@ -124,12 +123,11 @@ def compile_and_reflect(
command.append("-DSTAGE_{}={}".format(stage.upper(), "1"))
command += ["-o", output_path]
# Compile the shader with debug information to see names in reflection.
ret, out, err = execute(command + ["-g"])
ret, out, err = execute([*command, "-g"])
if ret:
sys.stderr.write(
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
ret, " ".join(command), input_path, output_path, err
)
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\n"
"Error: {}\n".format(ret, " ".join(command), input_path, output_path, err)
)
preprocessor_output_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "preprocessed_file.glsl")
@ -139,24 +137,23 @@ def compile_and_reflect(
ret, out, err = execute(["spirv-reflect", "-y", "-v", "1", output_path])
if ret:
sys.stderr.write(
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
ret, " ".join(command), input_path, output_path, err
)
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\n"
"Error: {}\n".format(ret, " ".join(command), input_path, output_path, err)
)
raise ValueError(err)
# Reflect the result SPIRV.
data = yaml.safe_load(out)
module = data["module"]
interface_variables = []
if "all_interface_variables" in data and data["all_interface_variables"]:
if data.get("all_interface_variables"):
interface_variables = data["all_interface_variables"]
push_constants = []
vertex_attributes = []
if "push_constants" in module and module["push_constants"]:
if module.get("push_constants"):
assert len(module["push_constants"]) == 1
def add_push_constants(node, push_constants):
if ("members" in node) and node["members"]:
if node.get("members"):
for member in node["members"]:
add_push_constants(member, push_constants)
else:
@ -173,7 +170,7 @@ def compile_and_reflect(
assert module["push_constants"][0]["size"] <= 128
add_push_constants(module["push_constants"][0], push_constants)
descriptor_sets = []
if "descriptor_sets" in module and module["descriptor_sets"]:
if module.get("descriptor_sets"):
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1
VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6
@ -232,43 +229,39 @@ def compile_and_reflect(
"name": binding["name"],
}
)
else:
if use_descriptor_indexing:
if descriptor_set["set"] == 0:
assert descriptor_set["binding_count"] >= 1
for binding in descriptor_set["bindings"]:
assert (
binding["descriptor_type"]
== VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
assert binding["array"]["dims"][0] == 16384
if binding["binding"] == 0:
assert binding["name"] == "textures2D"
elif binding["binding"] == 1:
assert binding["name"] == "texturesCube"
elif binding["binding"] == 2:
assert binding["name"] == "texturesShadow"
else:
assert False
else:
assert descriptor_set["binding_count"] > 0
elif use_descriptor_indexing:
if descriptor_set["set"] == 0:
assert descriptor_set["binding_count"] >= 1
for binding in descriptor_set["bindings"]:
assert (
binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
assert binding["image"]["sampled"] == 1
assert binding["image"]["arrayed"] == 0
assert binding["image"]["ms"] == 0
sampler_type = "sampler{}D".format(binding["image"]["dim"] + 1)
if binding["image"]["dim"] == 3:
sampler_type = "samplerCube"
bindings.append(
{
"binding": binding["binding"],
"type": sampler_type,
"name": binding["name"],
}
)
assert binding["array"]["dims"][0] == 16384
if binding["binding"] == 0:
assert binding["name"] == "textures2D"
elif binding["binding"] == 1:
assert binding["name"] == "texturesCube"
elif binding["binding"] == 2:
assert binding["name"] == "texturesShadow"
else:
raise AssertionError
else:
assert descriptor_set["binding_count"] > 0
for binding in descriptor_set["bindings"]:
assert binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
assert binding["image"]["sampled"] == 1
assert binding["image"]["arrayed"] == 0
assert binding["image"]["ms"] == 0
sampler_type = "sampler{}D".format(binding["image"]["dim"] + 1)
if binding["image"]["dim"] == 3:
sampler_type = "samplerCube"
bindings.append(
{
"binding": binding["binding"],
"type": sampler_type,
"name": binding["name"],
}
)
descriptor_sets.append(
{
"set": descriptor_set["set"],
@ -290,9 +283,8 @@ def compile_and_reflect(
ret, out, err = execute(command)
if ret:
sys.stderr.write(
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
ret, " ".join(command), input_path, output_path, err
)
"Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\n"
"Error: {}\n".format(ret, " ".join(command), input_path, output_path, err)
)
raise ValueError(err)
return {
@ -304,30 +296,28 @@ def compile_and_reflect(
def output_xml_tree(tree, path):
"""We use a simple custom printer to have the same output for all platforms."""
with open(path, "wt") as handle:
with open(path, "w") as handle:
handle.write('<?xml version="1.0" encoding="utf-8"?>\n')
handle.write(
"<!-- DO NOT EDIT: GENERATED BY SCRIPT {} -->\n".format(os.path.basename(__file__))
)
handle.write(f"<!-- DO NOT EDIT: GENERATED BY SCRIPT {os.path.basename(__file__)} -->\n")
def output_xml_node(node, handle, depth):
indent = "\t" * depth
attributes = ""
for attribute_name in sorted(node.attrib.keys()):
attributes += ' {}="{}"'.format(attribute_name, node.attrib[attribute_name])
attributes += f' {attribute_name}="{node.attrib[attribute_name]}"'
if len(node) > 0:
handle.write("{}<{}{}>\n".format(indent, node.tag, attributes))
handle.write(f"{indent}<{node.tag}{attributes}>\n")
for child in node:
output_xml_node(child, handle, depth + 1)
handle.write("{}</{}>\n".format(indent, node.tag))
handle.write(f"{indent}</{node.tag}>\n")
else:
handle.write("{}<{}{}/>\n".format(indent, node.tag, attributes))
handle.write(f"{indent}<{node.tag}{attributes}/>\n")
output_xml_node(tree.getroot(), handle, 0)
def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
sys.stdout.write('Program "{}"\n'.format(program_name))
sys.stdout.write(f'Program "{program_name}"\n')
if rules and program_name not in rules:
sys.stdout.write(" Skip.\n")
return
@ -392,7 +382,7 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
}
)
else:
raise ValueError('Unsupported element tag: "{}"'.format(element_tag))
raise ValueError(f'Unsupported element tag: "{element_tag}"')
stage_extension = {
"vertex": ".vs",
@ -525,9 +515,9 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
member_element.set("name", member["name"])
member_element.set("size", member["size"])
member_element.set("offset", member["offset"])
elif binding["type"].startswith("sampler"):
binding_element.set("name", binding["name"])
elif binding["type"].startswith("storage"):
elif binding["type"].startswith("sampler") or binding["type"].startswith(
"storage"
):
binding_element.set("name", binding["name"])
program_tree = ET.ElementTree(program_root)
output_xml_tree(program_tree, os.path.join(output_mod_path, "shaders", program_path))
@ -540,18 +530,21 @@ def run():
parser = argparse.ArgumentParser()
parser.add_argument(
"input_mod_path",
help="a path to a directory with input mod with GLSL shaders like binaries/data/mods/public",
help="a path to a directory with input mod with GLSL shaders "
"like binaries/data/mods/public",
)
parser.add_argument("rules_path", help="a path to JSON with rules")
parser.add_argument(
"output_mod_path",
help="a path to a directory with mod to store SPIR-V shaders like binaries/data/mods/spirv",
help="a path to a directory with mod to store SPIR-V shaders "
"like binaries/data/mods/spirv",
)
parser.add_argument(
"-d",
"--dependency",
action="append",
help="a path to a directory with a dependency mod (at least modmod should present as dependency)",
help="a path to a directory with a dependency mod (at least "
"modmod should present as dependency)",
required=True,
)
parser.add_argument(
@ -563,26 +556,26 @@ def run():
args = parser.parse_args()
if not os.path.isfile(args.rules_path):
sys.stderr.write('Rules "{}" are not found\n'.format(args.rules_path))
sys.stderr.write(f'Rules "{args.rules_path}" are not found\n')
return
with open(args.rules_path, "rt") as handle:
with open(args.rules_path) as handle:
rules = json.load(handle)
if not os.path.isdir(args.input_mod_path):
sys.stderr.write('Input mod path "{}" is not a directory\n'.format(args.input_mod_path))
sys.stderr.write(f'Input mod path "{args.input_mod_path}" is not a directory\n')
return
if not os.path.isdir(args.output_mod_path):
sys.stderr.write('Output mod path "{}" is not a directory\n'.format(args.output_mod_path))
sys.stderr.write(f'Output mod path "{args.output_mod_path}" is not a directory\n')
return
mod_shaders_path = os.path.join(args.input_mod_path, "shaders", "glsl")
if not os.path.isdir(mod_shaders_path):
sys.stderr.write('Directory "{}" was not found\n'.format(mod_shaders_path))
sys.stderr.write(f'Directory "{mod_shaders_path}" was not found\n')
return
mod_name = os.path.basename(os.path.normpath(args.input_mod_path))
sys.stdout.write('Building SPIRV for "{}"\n'.format(mod_name))
sys.stdout.write(f'Building SPIRV for "{mod_name}"\n')
if not args.program_name:
for file_name in os.listdir(mod_shaders_path):
name, ext = os.path.splitext(file_name)

50
source/tools/templatesanalyzer/unitTables.py Normal file → Executable file
View File

@ -21,15 +21,15 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
import os
import sys
import xml.etree.ElementTree as ET
from pathlib import Path
import os
import glob
sys.path.append("../entity")
from scriptlib import SimulTemplateEntity # noqa: E402
from scriptlib import SimulTemplateEntity
AttackTypes = ["Hack", "Pierce", "Crush", "Poison", "Fire"]
@ -299,15 +299,15 @@ def CalcUnit(UnitName, existingUnit=None):
def WriteUnit(Name, UnitDict):
ret = "<tr>"
ret += '<td class="Sub">' + Name + "</td>"
ret += "<td>" + str("%.0f" % float(UnitDict["HP"])) + "</td>"
ret += "<td>" + str("%.0f" % float(UnitDict["BuildTime"])) + "</td>"
ret += "<td>" + str("%.1f" % float(UnitDict["WalkSpeed"])) + "</td>"
ret += "<td>" + str("{:.0f}".format(float(UnitDict["HP"]))) + "</td>"
ret += "<td>" + str("{:.0f}".format(float(UnitDict["BuildTime"]))) + "</td>"
ret += "<td>" + str("{:.1f}".format(float(UnitDict["WalkSpeed"]))) + "</td>"
for atype in AttackTypes:
PercentValue = 1.0 - (0.9 ** float(UnitDict["Resistance"][atype]))
ret += (
"<td>"
+ str("%.0f" % float(UnitDict["Resistance"][atype]))
+ str("{:.0f}".format(float(UnitDict["Resistance"][atype])))
+ " / "
+ str("%.0f" % (PercentValue * 100.0))
+ "%</td>"
@ -325,28 +325,28 @@ def WriteUnit(Name, UnitDict):
ret += "<td>" + str("%.1f" % (float(UnitDict["RepeatRate"][attType]) / 1000.0)) + "</td>"
else:
for atype in AttackTypes:
for _ in AttackTypes:
ret += "<td> - </td>"
ret += "<td> - </td>"
if UnitDict["Ranged"] is True and UnitDict["Range"] > 0:
ret += "<td>" + str("%.1f" % float(UnitDict["Range"])) + "</td>"
ret += "<td>" + str("{:.1f}".format(float(UnitDict["Range"]))) + "</td>"
spread = float(UnitDict["Spread"])
ret += "<td>" + str("%.1f" % spread) + "</td>"
ret += "<td>" + str(f"{spread:.1f}") + "</td>"
else:
ret += "<td> - </td><td> - </td>"
for rtype in Resources:
ret += "<td>" + str("%.0f" % float(UnitDict["Cost"][rtype])) + "</td>"
ret += "<td>" + str("{:.0f}".format(float(UnitDict["Cost"][rtype]))) + "</td>"
ret += "<td>" + str("%.0f" % float(UnitDict["Cost"]["population"])) + "</td>"
ret += "<td>" + str("{:.0f}".format(float(UnitDict["Cost"]["population"]))) + "</td>"
ret += '<td style="text-align:left;">'
for Bonus in UnitDict["AttackBonuses"]:
ret += "["
for classe in UnitDict["AttackBonuses"][Bonus]["Classes"]:
ret += classe + " "
ret += ": %s] " % UnitDict["AttackBonuses"][Bonus]["Multiplier"]
ret += ": {}] ".format(UnitDict["AttackBonuses"][Bonus]["Multiplier"])
ret += "</td>"
ret += "</tr>\n"
@ -370,7 +370,7 @@ def SortFn(A):
def WriteColouredDiff(file, diff, isChanged):
"""helper to write coloured text.
"""Helper to write coloured text.
diff value must always be computed as a unit_spec - unit_generic.
A positive imaginary part represents advantageous trait.
"""
@ -378,8 +378,7 @@ def WriteColouredDiff(file, diff, isChanged):
def cleverParse(diff):
if float(diff) - int(diff) < 0.001:
return str(int(diff))
else:
return str("%.1f" % float(diff))
return str(f"{float(diff):.1f}")
isAdvantageous = diff.imag > 0
diff = diff.real
@ -392,16 +391,14 @@ def WriteColouredDiff(file, diff, isChanged):
if diff == 0:
rgb_str = "200,200,200"
elif isAdvantageous and diff > 0:
rgb_str = "180,0,0"
elif (not isAdvantageous) and diff < 0:
elif isAdvantageous and diff > 0 or (not isAdvantageous) and diff < 0:
rgb_str = "180,0,0"
else:
rgb_str = "0,150,0"
file.write(
"""<td><span style="color:rgb({});">{}</span></td>
""".format(rgb_str, cleverParse(diff))
f"""<td><span style="color:rgb({rgb_str});">{cleverParse(diff)}</span></td>
"""
)
return isChanged
@ -743,7 +740,8 @@ differences between the two.
isChanged = WriteColouredDiff(ff, +1j + (mySpread - parentSpread), isChanged)
else:
ff.write(
"<td><span style='color:rgb(200,200,200);'>-</span></td><td><span style='color:rgb(200,200,200);'>-</span></td>"
"<td><span style='color:rgb(200,200,200);'>-</span></td><td>"
"<span style='color:rgb(200,200,200);'>-</span></td>"
)
else:
ff.write("<td></td><td></td><td></td><td></td><td></td><td></td>")
@ -769,9 +767,7 @@ differences between the two.
ff.write("</tr>\n")
ff.close() # to actually write into the file
with open(
os.path.realpath(__file__).replace("unitTables.py", "") + ".cache", "r"
) as ff:
with open(os.path.realpath(__file__).replace("unitTables.py", "") + ".cache") as ff:
unitStr = ff.read()
if showChangedOnly:
@ -832,7 +828,7 @@ each loaded generic template.
)
for civ in Civs:
count = 0
for units in CivTemplates[civ]:
for _units in CivTemplates[civ]:
count += 1
f.write('<td style="text-align:center;">' + str(count) + "</td>\n")

34
source/tools/xmlvalidator/validate_grammar.py Normal file → Executable file
View File

@ -1,12 +1,13 @@
#!/usr/bin/env python3
from argparse import ArgumentParser
from pathlib import Path
from os.path import join, realpath, exists, dirname
from json import load
from re import match
from logging import getLogger, StreamHandler, INFO, WARNING, Filter, Formatter
import lxml.etree
import sys
from argparse import ArgumentParser
from json import load
from logging import INFO, WARNING, Filter, Formatter, StreamHandler, getLogger
from os.path import dirname, exists, join, realpath
from pathlib import Path
from re import match
import lxml.etree
class SingleLevelFilter(Filter):
@ -17,8 +18,7 @@ class SingleLevelFilter(Filter):
def filter(self, record):
if self.reject:
return record.levelno != self.passlevel
else:
return record.levelno == self.passlevel
return record.levelno == self.passlevel
class VFS_File:
@ -68,8 +68,8 @@ class RelaxNGValidator:
def main(self):
"""Program entry point, parses command line arguments and launches the validation"""
# ordered uniq mods (dict maintains ordered keys from python 3.6)
self.logger.info(f"Checking {'|'.join(self.mods)}'s integrity.")
self.logger.info(f"The following mods will be loaded: {'|'.join(self.mods)}.")
self.logger.info("Checking %s's integrity.", "|".join(self.mods))
self.logger.info("The following mods will be loaded: %s.", "|".join(self.mods))
return self.run()
def find_files(self, vfs_root, mods, vfs_path, *ext_list):
@ -83,7 +83,7 @@ class RelaxNGValidator:
def find_recursive(dp, base):
"""(relative Path, full Path) generator"""
if dp.is_dir():
if dp.name != ".svn" and dp.name != ".git" and not dp.name.endswith("~"):
if dp.name not in (".svn", ".git") and not dp.name.endswith("~"):
for fp in dp.iterdir():
yield from find_recursive(fp, base)
elif dp.suffix in full_exts:
@ -191,7 +191,7 @@ class RelaxNGValidator:
def validate_files(self, name, files, schemapath):
relax_ng_path = self.get_relaxng_file(schemapath)
if relax_ng_path == "":
self.logger.warning(f"Could not find {schemapath}")
self.logger.warning("Could not find %s", schemapath)
return
data = lxml.etree.parse(relax_ng_path)
@ -201,14 +201,14 @@ class RelaxNGValidator:
try:
doc = lxml.etree.parse(str(file[1]))
relaxng.assertValid(doc)
except Exception as e:
except Exception:
error_count = error_count + 1
self.logger.error(f"{file[1]}: " + str(e))
self.logger.exception(file[1])
if self.verbose:
self.logger.info(f"{error_count} {name} validation errors")
self.logger.info("%d %s validation errors", error_count, name)
elif error_count > 0:
self.logger.error(f"{error_count} {name} validation errors")
self.logger.error("%d %s validation errors", error_count, name)
self.inError = True

80
source/tools/xmlvalidator/validator.py Normal file → Executable file
View File

@ -1,10 +1,10 @@
#!/usr/bin/env python3
import argparse
import os
import sys
import re
import xml.etree.ElementTree
from logging import getLogger, StreamHandler, INFO, WARNING, Formatter, Filter
import sys
from logging import INFO, WARNING, Filter, Formatter, StreamHandler, getLogger
from xml.etree import ElementTree as ET
class SingleLevelFilter(Filter):
@ -15,8 +15,7 @@ class SingleLevelFilter(Filter):
def filter(self, record):
if self.reject:
return record.levelno != self.passlevel
else:
return record.levelno == self.passlevel
return record.levelno == self.passlevel
class Actor:
@ -30,9 +29,9 @@ class Actor:
def read(self, physical_path):
try:
tree = xml.etree.ElementTree.parse(physical_path)
except xml.etree.ElementTree.ParseError as err:
self.logger.error('"%s": %s' % (physical_path, err.msg))
tree = ET.parse(physical_path)
except ET.ParseError:
self.logger.exception(physical_path)
return False
root = tree.getroot()
# Special case: particles don't need a diffuse texture.
@ -52,10 +51,10 @@ class Actor:
def read_variant(self, actor_physical_path, relative_path):
physical_path = actor_physical_path.replace(self.vfs_path, relative_path)
try:
tree = xml.etree.ElementTree.parse(physical_path)
except xml.etree.ElementTree.ParseError as err:
self.logger.error('"%s": %s' % (physical_path, err.msg))
return False
tree = ET.parse(physical_path)
except ET.ParseError:
self.logger.exception(physical_path)
return
root = tree.getroot()
file = root.get("file")
@ -75,9 +74,9 @@ class Material:
def read(self, physical_path):
try:
root = xml.etree.ElementTree.parse(physical_path).getroot()
except xml.etree.ElementTree.ParseError as err:
self.logger.error('"%s": %s' % (physical_path, err.msg))
root = ET.parse(physical_path).getroot()
except ET.ParseError:
self.logger.exception(physical_path)
return False
for element in root.findall(".//required_texture"):
texture_name = element.get("name")
@ -127,7 +126,7 @@ class Validator:
if not os.path.isdir(physical_path):
return result
for file_name in os.listdir(physical_path):
if file_name == ".git" or file_name == ".svn":
if file_name in (".git", ".svn"):
continue
vfs_file_path = os.path.join(vfs_path, file_name)
physical_file_path = os.path.join(physical_path, file_name)
@ -180,8 +179,9 @@ class Validator:
and actor.material not in self.invalid_materials
):
self.logger.error(
'"%s": unknown material "%s"'
% (self.get_mod_path(actor.mod_name, actor.vfs_path), actor.material)
'"%s": unknown material "%s"',
self.get_mod_path(actor.mod_name, actor.vfs_path),
actor.material,
)
self.inError = True
if actor.material not in self.materials:
@ -189,42 +189,34 @@ class Validator:
material = self.materials[actor.material]
missing_textures = ", ".join(
set(
[
required_texture
for required_texture in material.required_textures
if required_texture not in actor.textures
]
)
{
required_texture
for required_texture in material.required_textures
if required_texture not in actor.textures
}
)
if len(missing_textures) > 0:
self.logger.error(
'"%s": actor does not contain required texture(s) "%s" from "%s"'
% (
self.get_mod_path(actor.mod_name, actor.vfs_path),
missing_textures,
material.name,
)
'"%s": actor does not contain required texture(s) "%s" from "%s"',
self.get_mod_path(actor.mod_name, actor.vfs_path),
missing_textures,
material.name,
)
self.inError = True
extra_textures = ", ".join(
set(
[
extra_texture
for extra_texture in actor.textures
if extra_texture not in material.required_textures
]
)
{
extra_texture
for extra_texture in actor.textures
if extra_texture not in material.required_textures
}
)
if len(extra_textures) > 0:
self.logger.warning(
'"%s": actor contains unnecessary texture(s) "%s" from "%s"'
% (
self.get_mod_path(actor.mod_name, actor.vfs_path),
extra_textures,
material.name,
)
'"%s": actor contains unnecessary texture(s) "%s" from "%s"',
self.get_mod_path(actor.mod_name, actor.vfs_path),
extra_textures,
material.name,
)
self.inError = True