Compare commits

..

10 Commits

Author SHA1 Message Date
0ba8ea3429
Update CODEOWNERS
Some checks failed
pre-commit / build (push) Has been cancelled
This updates CODEOWNERS to remove owners for the lobby bot code, which
isn't included in this repository anymore. It also adds me for some
areas where I can help with PR reviews.
2024-08-24 13:48:43 +02:00
1f51fcb87f
Add hook for non-breaking space in 0 A.D.
This replaces the existing arclint linter to ensure the project name
does only include a non-breaking space with a pre-commit hook. The regex
to check is slightly different to account for escaped non-breaking
spaces in JavaScript files and to avoid some false-positives.
2024-08-24 13:08:42 +02:00
a44dd59a0c Add some missing headers
When running clang-format which reorders headers, those are the ones
that came up as missing.

Signed-off-by: Ralph Sennhauser <ralph.sennhauser@gmail.com>
2024-08-24 12:19:52 +02:00
d055090dd2
Add ruff pre-commit hook
Add configuration for pre-commit, ruff as a hook and configuration to
run ruff whenever a pull request is opened or code is pushed.
2024-08-24 10:54:26 +02:00
c49d4eedd0
Lint and format Python files using ruff
To improve quality und uniformity of the included Python code this
lints and formats the included Python files with ruff.
2024-08-24 10:54:26 +02:00
8519eb9b86
Reduce time needed for STUN
In my tests this reduced the time necessary for starting to host a game
or joining a hosted game by ~180ms.
2024-08-23 21:00:15 +02:00
0efaf5ac4b Add common IDE temporary files to gitignore 2024-08-23 19:29:28 +02:00
eeb0f1cce6
Make checkrefs and called scripts return an error 2024-08-23 17:54:56 +02:00
2a06eea08a
Fix checkrefs.py for python 3.12
Fixes #6898
Reported by: @Itms
2024-08-23 15:59:36 +02:00
393ad6c8a6
Improve windows pipeline 2024-08-23 13:08:04 +02:00
48 changed files with 2180 additions and 1581 deletions

View File

@ -1,5 +1,8 @@
## == Project configuration
## Gitea configuration ## Gitea configuration
\\.gitea/.* @Stan @Itms \\.gitea/.* @Stan @Itms
## Linting
\\.pre-commit-config\\.yaml @Dunedan
## == Build & Libraries ## == Build & Libraries
(build|libraries)/.* @Itms @Stan (build|libraries)/.* @Itms @Stan
@ -15,10 +18,9 @@ source/(collada|graphics|renderer)/.* @vladislavbelov
source/simulation2/.* @wraitii @Itms source/simulation2/.* @wraitii @Itms
## == Tools ## == Tools
source/tools/.*\\.py @Dunedan
## Atlas ## Atlas
source/tools/atlas/.* @vladislavbelov @trompetin17 source/tools/atlas/.* @vladislavbelov @trompetin17
## Lobby
source/tools/XpartaMuPP/.* @Dunedan @user1
## == Scripts ## == Scripts
## GUI ## GUI

View File

@ -0,0 +1,12 @@
---
name: pre-commit
on:
- push
- pull_request
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- uses: pre-commit/action@v3.0.1

2
.gitignore vendored
View File

@ -100,4 +100,4 @@ Thumbs.db
# IDE files # IDE files
/.vs/ /.vs/
/.vscode/ /.vscode/
/.idea/ /.idea/

26
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,26 @@
---
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.1
hooks:
- id: ruff
args:
- --output-format=full
exclude: ^source/tools/webservices/
- id: ruff-format
args:
- --check
- --target-version
- py311
exclude: ^source/tools/webservices/
- repo: local
hooks:
- id: non-breaking-space-in-0ad
name: check for non-breaking space in "0 A.D."
description: |
Verify a non-breaking spaces is used in the project name ("0 A.D").
entry: '0(?!(\xc2\xa0|\\xa0)A\.D\.)\s?(?<!\\xa0)(A|a)\.(D|d)\.?'
language: pygrep
types: [text]
files: ^binaries/
exclude: (^binaries/data/mods/(mod|public)/art/.*\.xml|\.dae$)

View File

@ -518,8 +518,13 @@ enabled = true ; The STUN protocol allows hosting games wi
; If STUN is disabled, the game relies on direct connection, UPnP and port forwarding. ; If STUN is disabled, the game relies on direct connection, UPnP and port forwarding.
server = "lobby.wildfiregames.com" ; Address of the STUN server. server = "lobby.wildfiregames.com" ; Address of the STUN server.
port = 3478 ; Port of the STUN server. port = 3478 ; Port of the STUN server.
delay = 200 ; Duration in milliseconds that is waited between STUN messages. delay = 10 ; Duration in milliseconds that is waited between checking for retrieved STUN responses.
; Smaller numbers speed up joins but also become less stable. ; Smaller numbers speed up joins but may make them less stable, if max_tries isn't increased proportionally as well.
max_tries = 100 ; Maximum number of tries for receiving STUN responses.
[lobby.fw_punch]
delay = 200 ; Duration in milliseconds between sending hole punching messages.
num_msg = 3 ; Number of hole punching messages to send.
[mod] [mod]
enabledmods = "mod public" enabledmods = "mod public"

View File

@ -12,7 +12,6 @@ phutil_register_library_map(array(
'ESLintLinter' => 'src/ESLintLinter.php', 'ESLintLinter' => 'src/ESLintLinter.php',
'JenkinsRenderer' => 'src/JenkinsRenderer.php', 'JenkinsRenderer' => 'src/JenkinsRenderer.php',
'LicenceYearLinter' => 'src/LicenceYearLinter.php', 'LicenceYearLinter' => 'src/LicenceYearLinter.php',
'ProjectNameLinter' => 'src/ProjectNameLinter.php',
), ),
'function' => array( 'function' => array(
'remove_null' => 'src/JenkinsRenderer.php', 'remove_null' => 'src/JenkinsRenderer.php',
@ -21,6 +20,5 @@ phutil_register_library_map(array(
'ESLintLinter' => 'ArcanistExternalLinter', 'ESLintLinter' => 'ArcanistExternalLinter',
'JenkinsRenderer' => 'ArcanistLintRenderer', 'JenkinsRenderer' => 'ArcanistLintRenderer',
'LicenceYearLinter' => 'ArcanistLinter', 'LicenceYearLinter' => 'ArcanistLinter',
'ProjectNameLinter' => 'ArcanistLinter',
), ),
)); ));

View File

@ -1,76 +0,0 @@
<?php
/**
* Copyright 2023 Wildfire Games.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Linter for the project name 0 A.D..
*/
final class ProjectNameLinter extends ArcanistLinter {
public function getInfoName() {
return pht('Project Name Linter');
}
public function getLinterName() {
return 'Project Name';
}
public function getLinterConfigurationName() {
return 'project-name';
}
const BAD_NAME = 1;
public function getLintSeverityMap() {
return array(
self::BAD_NAME => ArcanistLintSeverity::SEVERITY_WARNING,
);
}
public function getLintNameMap() {
return array(
self::BAD_NAME => pht('Incorrect project name. Notice the non-breaking space in 0 A.D.'),
);
}
public function lintPath($path) {
$binaries_prefix = "binaries";
if (substr($path, 0, strlen($binaries_prefix)) != $binaries_prefix) {
return;
}
$txt = $this->getData($path);
$matches = null;
$preg = preg_match_all(
"/((?!0 A\\.D\\.|0ad)0\\s?(?:A|a)\\.?(?:D|d)\\.?)/",
$txt,
$matches,
PREG_OFFSET_CAPTURE);
if (!$preg) {
return;
}
foreach ($matches[0] as $match) {
list($string, $offset) = $match;
$this->raiseLintAtOffset(
$offset,
self::BAD_NAME,
pht('Incorrect project name. Notice the non-breaking space in 0 A.D.'),
$string);
}
}
}

View File

@ -43,11 +43,23 @@ pipeline {
} }
} }
stage("Debug Build") {
steps {
bat("cd build\\workspaces\\vs2017 && ${visualStudioPath} pyrogenesis.sln /p:Configuration=Debug ${buildOptions}")
timeout(time: 15) {
bat "cd binaries\\system && test_dbg.exe > cxxtest-debug.xml"
}
}
post {
always {
junit 'binaries/system/cxxtest-debug.xml'
}
}
}
stage ("Release Build") { stage ("Release Build") {
steps { steps {
dir('build\\workspaces\\vs2017'){ bat("cd build\\workspaces\\vs2017 && ${visualStudioPath} pyrogenesis.sln /p:Configuration=Release ${buildOptions}")
bat("${visualStudioPath} pyrogenesis.sln /p:Configuration=Release ${buildOptions}")
}
timeout(time: 5) { timeout(time: 5) {
bat "cd binaries\\system && test.exe > cxxtest-release.xml" bat "cd binaries\\system && test.exe > cxxtest-release.xml"
} }

10
ruff.toml Normal file
View File

@ -0,0 +1,10 @@
line-length = 99
[lint.isort]
lines-after-imports = 2
[lint.pycodestyle]
max-doc-length = 72
[lint.pydocstyle]
convention = "pep257"

View File

@ -1,138 +1,158 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# ruff: noqa: F403, F405
from ctypes import * from ctypes import *
import sys
import os import os
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
binaries = '../../../binaries' binaries = "../../../binaries"
# Work out the platform-dependent library filename # Work out the platform-dependent library filename
dll_filename = { dll_filename = {
'posix': './libCollada_dbg.so', "posix": "./libCollada_dbg.so",
'nt': 'Collada_dbg.dll', "nt": "Collada_dbg.dll",
}[os.name] }[os.name]
# The DLL may need other DLLs which are in its directory, so set the path to that # The DLL may need other DLLs which are in its directory, so set the path to that
# (Don't care about clobbering the old PATH - it doesn't have anything important) # (Don't care about clobbering the old PATH - it doesn't have anything important)
os.environ['PATH'] = '%s/system/' % binaries os.environ["PATH"] = "%s/system/" % binaries
# Load the actual library # Load the actual library
library = cdll.LoadLibrary('%s/system/%s' % (binaries, dll_filename)) library = cdll.LoadLibrary("%s/system/%s" % (binaries, dll_filename))
def log(severity, message): def log(severity, message):
print('[%s] %s' % (('INFO', 'WARNING', 'ERROR')[severity], message)) print("[%s] %s" % (("INFO", "WARNING", "ERROR")[severity], message))
clog = CFUNCTYPE(None, c_int, c_char_p)(log) clog = CFUNCTYPE(None, c_int, c_char_p)(log)
# (the CFUNCTYPE must not be GC'd, so try to keep a reference) # (the CFUNCTYPE must not be GC'd, so try to keep a reference)
library.set_logger(clog) library.set_logger(clog)
skeleton_definitions = open('%s/data/tests/collada/skeletons.xml' % binaries).read() skeleton_definitions = open("%s/data/tests/collada/skeletons.xml" % binaries).read()
library.set_skeleton_definitions(skeleton_definitions, len(skeleton_definitions)) library.set_skeleton_definitions(skeleton_definitions, len(skeleton_definitions))
def _convert_dae(func, filename, expected_status=0):
output = []
def cb(cbdata, str, len):
output.append(string_at(str, len))
cbtype = CFUNCTYPE(None, POINTER(None), POINTER(c_char), c_uint) def _convert_dae(func, filename, expected_status=0):
status = func(filename, cbtype(cb), None) output = []
assert(status == expected_status)
return ''.join(output) def cb(cbdata, str, len):
output.append(string_at(str, len))
cbtype = CFUNCTYPE(None, POINTER(None), POINTER(c_char), c_uint)
status = func(filename, cbtype(cb), None)
assert status == expected_status
return "".join(output)
def convert_dae_to_pmd(*args, **kwargs): def convert_dae_to_pmd(*args, **kwargs):
return _convert_dae(library.convert_dae_to_pmd, *args, **kwargs) return _convert_dae(library.convert_dae_to_pmd, *args, **kwargs)
def convert_dae_to_psa(*args, **kwargs): def convert_dae_to_psa(*args, **kwargs):
return _convert_dae(library.convert_dae_to_psa, *args, **kwargs) return _convert_dae(library.convert_dae_to_psa, *args, **kwargs)
def clean_dir(path): def clean_dir(path):
# Remove all files first # Remove all files first
try: try:
for f in os.listdir(path): for f in os.listdir(path):
os.remove(path+'/'+f) os.remove(path + "/" + f)
os.rmdir(path) os.rmdir(path)
except OSError: except OSError:
pass # (ignore errors if files are in use) pass # (ignore errors if files are in use)
# Make sure the directory exists # Make sure the directory exists
try: try:
os.makedirs(path) os.makedirs(path)
except OSError: except OSError:
pass # (ignore errors if it already exists) pass # (ignore errors if it already exists)
def create_actor(mesh, texture, anims, props_): def create_actor(mesh, texture, anims, props_):
actor = ET.Element('actor', version='1') actor = ET.Element("actor", version="1")
ET.SubElement(actor, 'castshadow') ET.SubElement(actor, "castshadow")
group = ET.SubElement(actor, 'group') group = ET.SubElement(actor, "group")
variant = ET.SubElement(group, 'variant', frequency='100', name='Base') variant = ET.SubElement(group, "variant", frequency="100", name="Base")
ET.SubElement(variant, 'mesh').text = mesh+'.pmd' ET.SubElement(variant, "mesh").text = mesh + ".pmd"
ET.SubElement(variant, 'texture').text = texture+'.dds' ET.SubElement(variant, "texture").text = texture + ".dds"
animations = ET.SubElement(variant, "animations")
for name, file in anims:
ET.SubElement(animations, "animation", file=file + ".psa", name=name, speed="100")
props = ET.SubElement(variant, "props")
for name, file in props_:
ET.SubElement(props, "prop", actor=file + ".xml", attachpoint=name)
return ET.tostring(actor)
animations = ET.SubElement(variant, 'animations')
for name, file in anims:
ET.SubElement(animations, 'animation', file=file+'.psa', name=name, speed='100')
props = ET.SubElement(variant, 'props')
for name, file in props_:
ET.SubElement(props, 'prop', actor=file+'.xml', attachpoint=name)
return ET.tostring(actor)
def create_actor_static(mesh, texture): def create_actor_static(mesh, texture):
actor = ET.Element('actor', version='1') actor = ET.Element("actor", version="1")
ET.SubElement(actor, 'castshadow') ET.SubElement(actor, "castshadow")
group = ET.SubElement(actor, 'group') group = ET.SubElement(actor, "group")
variant = ET.SubElement(group, 'variant', frequency='100', name='Base') variant = ET.SubElement(group, "variant", frequency="100", name="Base")
ET.SubElement(variant, 'mesh').text = mesh+'.pmd' ET.SubElement(variant, "mesh").text = mesh + ".pmd"
ET.SubElement(variant, 'texture').text = texture+'.dds' ET.SubElement(variant, "texture").text = texture + ".dds"
return ET.tostring(actor) return ET.tostring(actor)
################################ ################################
# Error handling # Error handling
if False: if False:
convert_dae_to_pmd('This is not well-formed XML', expected_status=-2) convert_dae_to_pmd("This is not well-formed XML", expected_status=-2)
convert_dae_to_pmd('<html>This is not COLLADA</html>', expected_status=-2) convert_dae_to_pmd("<html>This is not COLLADA</html>", expected_status=-2)
convert_dae_to_pmd('<COLLADA>This is still not valid COLLADA</COLLADA>', expected_status=-2) convert_dae_to_pmd("<COLLADA>This is still not valid COLLADA</COLLADA>", expected_status=-2)
# Do some real conversions, so the output can be tested in the Actor Viewer # Do some real conversions, so the output can be tested in the Actor Viewer
test_data = binaries + '/data/tests/collada' test_data = binaries + "/data/tests/collada"
test_mod = binaries + '/data/mods/_test.collada' test_mod = binaries + "/data/mods/_test.collada"
clean_dir(test_mod + '/art/meshes') clean_dir(test_mod + "/art/meshes")
clean_dir(test_mod + '/art/actors') clean_dir(test_mod + "/art/actors")
clean_dir(test_mod + '/art/animation') clean_dir(test_mod + "/art/animation")
#for test_file in ['cube', 'jav2', 'jav2b', 'teapot_basic', 'teapot_skin', 'plane_skin', 'dude_skin', 'mergenonbone', 'densemesh']: # for test_file in ['cube', 'jav2', 'jav2b', 'teapot_basic', 'teapot_skin', 'plane_skin', 'dude_skin', 'mergenonbone', 'densemesh']:
#for test_file in ['teapot_basic', 'jav2b', 'jav2d']: # for test_file in ['teapot_basic', 'jav2b', 'jav2d']:
for test_file in ['xsitest3c','xsitest3e','jav2d','jav2d2']: for test_file in ["xsitest3c", "xsitest3e", "jav2d", "jav2d2"]:
#for test_file in ['xsitest3']: # for test_file in ['xsitest3']:
#for test_file in []: # for test_file in []:
print("* Converting PMD %s" % (test_file)) print("* Converting PMD %s" % (test_file))
input_filename = '%s/%s.dae' % (test_data, test_file) input_filename = "%s/%s.dae" % (test_data, test_file)
output_filename = '%s/art/meshes/%s.pmd' % (test_mod, test_file) output_filename = "%s/art/meshes/%s.pmd" % (test_mod, test_file)
input = open(input_filename).read()
output = convert_dae_to_pmd(input)
open(output_filename, 'wb').write(output)
xml = create_actor(test_file, 'male', [('Idle','dudeidle'),('Corpse','dudecorpse'),('attack1',test_file),('attack2','jav2d')], [('helmet','teapot_basic_static')]) input = open(input_filename).read()
open('%s/art/actors/%s.xml' % (test_mod, test_file), 'w').write(xml) output = convert_dae_to_pmd(input)
open(output_filename, "wb").write(output)
xml = create_actor_static(test_file, 'male') xml = create_actor(
open('%s/art/actors/%s_static.xml' % (test_mod, test_file), 'w').write(xml) test_file,
"male",
[
("Idle", "dudeidle"),
("Corpse", "dudecorpse"),
("attack1", test_file),
("attack2", "jav2d"),
],
[("helmet", "teapot_basic_static")],
)
open("%s/art/actors/%s.xml" % (test_mod, test_file), "w").write(xml)
#for test_file in ['jav2','jav2b', 'jav2d']: xml = create_actor_static(test_file, "male")
for test_file in ['xsitest3c','xsitest3e','jav2d','jav2d2']: open("%s/art/actors/%s_static.xml" % (test_mod, test_file), "w").write(xml)
#for test_file in []:
print("* Converting PSA %s" % (test_file))
input_filename = '%s/%s.dae' % (test_data, test_file) # for test_file in ['jav2','jav2b', 'jav2d']:
output_filename = '%s/art/animation/%s.psa' % (test_mod, test_file) for test_file in ["xsitest3c", "xsitest3e", "jav2d", "jav2d2"]:
# for test_file in []:
print("* Converting PSA %s" % (test_file))
input = open(input_filename).read() input_filename = "%s/%s.dae" % (test_data, test_file)
output = convert_dae_to_psa(input) output_filename = "%s/art/animation/%s.psa" % (test_mod, test_file)
open(output_filename, 'wb').write(output)
input = open(input_filename).read()
output = convert_dae_to_psa(input)
open(output_filename, "wb").write(output)

View File

@ -26,6 +26,8 @@
#include "lib/sysdep/compiler.h" // MSC_VERSION #include "lib/sysdep/compiler.h" // MSC_VERSION
#include "lib/sysdep/arch.h" // ARCH_AMD64 #include "lib/sysdep/arch.h" // ARCH_AMD64
#include <cstdint>
template<typename T> template<typename T>
inline bool IsAligned(T t, uintptr_t multiple) inline bool IsAligned(T t, uintptr_t multiple)
{ {

View File

@ -30,6 +30,8 @@
#include "lib/sysdep/compiler.h" #include "lib/sysdep/compiler.h"
#include "lib/sysdep/arch.h" // ARCH_AMD64 #include "lib/sysdep/arch.h" // ARCH_AMD64
#include <cstddef>
/** /**
* mark a function parameter as unused and avoid * mark a function parameter as unused and avoid
* the corresponding compiler warning. * the corresponding compiler warning.

View File

@ -161,6 +161,10 @@ To summarize: +/-1SHHCC (S=subsystem, HH=header, CC=code number)
#ifndef INCLUDED_STATUS #ifndef INCLUDED_STATUS
#define INCLUDED_STATUS #define INCLUDED_STATUS
#include "lib/types.h"
#include <cstddef>
// an integral type allows defining error codes in separate headers, // an integral type allows defining error codes in separate headers,
// but is not as type-safe as an enum. use Lint's 'strong type' checking // but is not as type-safe as an enum. use Lint's 'strong type' checking
// to catch errors such as Status Func() { return 1; }. // to catch errors such as Status Func() { return 1; }.

View File

@ -23,6 +23,8 @@
#ifndef INCLUDED_UNIX_EXECUTABLE_PATHNAME #ifndef INCLUDED_UNIX_EXECUTABLE_PATHNAME
#define INCLUDED_UNIX_EXECUTABLE_PATHNAME #define INCLUDED_UNIX_EXECUTABLE_PATHNAME
#include "lib/os_path.h"
OsPath unix_ExecutablePathname(); OsPath unix_ExecutablePathname();
#endif // INCLUDED_UNIX_EXECUTABLE_PATHNAME #endif // INCLUDED_UNIX_EXECUTABLE_PATHNAME

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2022 Wildfire Games. /* Copyright (C) 2024 Wildfire Games.
* Copyright (C) 2013-2016 SuperTuxKart-Team. * Copyright (C) 2013-2016 SuperTuxKart-Team.
* This file is part of 0 A.D. * This file is part of 0 A.D.
* *
@ -183,12 +183,13 @@ bool ReceiveStunResponse(ENetHost& transactionHost, std::vector<u8>& buffer)
ENetAddress sender = m_StunServer; ENetAddress sender = m_StunServer;
int len = enet_socket_receive(transactionHost.socket, &sender, &enetBuffer, 1); int len = enet_socket_receive(transactionHost.socket, &sender, &enetBuffer, 1);
int delay = 200; int delay = 10;
CFG_GET_VAL("lobby.stun.delay", delay); CFG_GET_VAL("lobby.stun.delay", delay);
int maxTries = 100;
CFG_GET_VAL("lobby.stun.max_tries", maxTries);
// Wait to receive the message because enet sockets are non-blocking // Wait to receive the message because enet sockets are non-blocking
const int max_tries = 5; for (int count = 0; len <= 0 && (count < maxTries || maxTries == -1); ++count)
for (int count = 0; len <= 0 && (count < max_tries || max_tries == -1); ++count)
{ {
std::this_thread::sleep_for(std::chrono::milliseconds(delay)); std::this_thread::sleep_for(std::chrono::milliseconds(delay));
len = enet_socket_receive(transactionHost.socket, &sender, &enetBuffer, 1); len = enet_socket_receive(transactionHost.socket, &sender, &enetBuffer, 1);
@ -359,10 +360,12 @@ void SendHolePunchingMessages(ENetHost& enetClient, const std::string& serverAdd
enet_address_set_host(&addr, serverAddress.c_str()); enet_address_set_host(&addr, serverAddress.c_str());
int delay = 200; int delay = 200;
CFG_GET_VAL("lobby.stun.delay", delay); CFG_GET_VAL("lobby.fw_punch.delay", delay);
int numMsg = 3;
CFG_GET_VAL("lobby.fw_punch.num_msg", numMsg);
// Send an UDP message from enet host to ip:port // Send an UDP message from enet host to ip:port
for (int i = 0; i < 3; ++i) for (int i = 0; i < numMsg || numMsg == -1; ++i)
{ {
SendStunRequest(enetClient, addr); SendStunRequest(enetClient, addr);
std::this_thread::sleep_for(std::chrono::milliseconds(delay)); std::this_thread::sleep_for(std::chrono::milliseconds(delay));

File diff suppressed because it is too large Load Diff

View File

@ -8,41 +8,58 @@ from scriptlib import warn, SimulTemplateEntity, find_files
def find_entities(vfs_root): def find_entities(vfs_root):
base = vfs_root / 'public' / 'simulation' / 'templates' base = vfs_root / "public" / "simulation" / "templates"
return [str(fp.relative_to(base).with_suffix('')) for (_, fp) in find_files(vfs_root, ['public'], 'simulation/templates', 'xml')] return [
str(fp.relative_to(base).with_suffix(""))
for (_, fp) in find_files(vfs_root, ["public"], "simulation/templates", "xml")
]
def main(): def main():
vfs_root = Path(__file__).resolve().parents[3] / 'binaries' / 'data' / 'mods' vfs_root = Path(__file__).resolve().parents[3] / "binaries" / "data" / "mods"
simul_templates_path = Path('simulation/templates') simul_templates_path = Path("simulation/templates")
simul_template_entity = SimulTemplateEntity(vfs_root) simul_template_entity = SimulTemplateEntity(vfs_root)
with open('creation.dot', 'w') as dot_f: with open("creation.dot", "w") as dot_f:
dot_f.write('digraph G {\n') dot_f.write("digraph G {\n")
files = sorted(find_entities(vfs_root)) files = sorted(find_entities(vfs_root))
for f in files: for f in files:
if f.startswith('template_'): if f.startswith("template_"):
continue continue
print(f"# {f}...") print(f"# {f}...")
entity = simul_template_entity.load_inherited(simul_templates_path, f, ['public']) entity = simul_template_entity.load_inherited(simul_templates_path, f, ["public"])
if entity.find('Builder') is not None and entity.find('Builder').find('Entities') is not None: if (
entities = entity.find('Builder').find('Entities').text.replace('{civ}', entity.find('Identity').find('Civ').text) entity.find("Builder") is not None
builders = split(r'\s+', entities.strip()) and entity.find("Builder").find("Entities") is not None
):
entities = (
entity.find("Builder")
.find("Entities")
.text.replace("{civ}", entity.find("Identity").find("Civ").text)
)
builders = split(r"\s+", entities.strip())
for builder in builders: for builder in builders:
if Path(builder) in files: if Path(builder) in files:
warn(f"Invalid Builder reference: {f} -> {builder}") warn(f"Invalid Builder reference: {f} -> {builder}")
dot_f.write(f'"{f}" -> "{builder}" [color=green];\n') dot_f.write(f'"{f}" -> "{builder}" [color=green];\n')
if entity.find('TrainingQueue') is not None and entity.find('TrainingQueue').find('Entities') is not None: if (
entities = entity.find('TrainingQueue').find('Entities').text.replace('{civ}', entity.find('Identity').find('Civ').text) entity.find("TrainingQueue") is not None
training_queues = split(r'\s+', entities.strip()) and entity.find("TrainingQueue").find("Entities") is not None
):
entities = (
entity.find("TrainingQueue")
.find("Entities")
.text.replace("{civ}", entity.find("Identity").find("Civ").text)
)
training_queues = split(r"\s+", entities.strip())
for training_queue in training_queues: for training_queue in training_queues:
if Path(training_queue) in files: if Path(training_queue) in files:
warn(f"Invalid TrainingQueue reference: {f} -> {training_queue}") warn(f"Invalid TrainingQueue reference: {f} -> {training_queue}")
dot_f.write(f'"{f}" -> "{training_queue}" [color=blue];\n') dot_f.write(f'"{f}" -> "{training_queue}" [color=blue];\n')
dot_f.write('}\n') dot_f.write("}\n")
if run(['dot', '-V'], capture_output=True).returncode == 0: if run(["dot", "-V"], capture_output=True).returncode == 0:
exit(run(['dot', '-Tpng', 'creation.dot', '-o', 'creation.png'], text=True).returncode) exit(run(["dot", "-Tpng", "creation.dot", "-o", "creation.png"], text=True).returncode)
if __name__ == '__main__': if __name__ == "__main__":
chdir(Path(__file__).resolve().parent) chdir(Path(__file__).resolve().parent)
main() main()

View File

@ -17,8 +17,10 @@ RELAXNG_SCHEMA_ERROR_MSG = """Relax NG schema non existant.
Please create the file: {} Please create the file: {}
You can do that by running 'pyrogenesis -dumpSchema' in the 'system' directory You can do that by running 'pyrogenesis -dumpSchema' in the 'system' directory
""" """
XMLLINT_ERROR_MSG = ("xmllint not found in your PATH, please install it " XMLLINT_ERROR_MSG = (
"(usually in libxml2 package)") "xmllint not found in your PATH, please install it " "(usually in libxml2 package)"
)
class SingleLevelFilter(logging.Filter): class SingleLevelFilter(logging.Filter):
def __init__(self, passlevel, reject): def __init__(self, passlevel, reject):
@ -27,37 +29,48 @@ class SingleLevelFilter(logging.Filter):
def filter(self, record): def filter(self, record):
if self.reject: if self.reject:
return (record.levelno != self.passlevel) return record.levelno != self.passlevel
else: else:
return (record.levelno == self.passlevel) return record.levelno == self.passlevel
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
# create a console handler, seems nicer to Windows and for future uses # create a console handler, seems nicer to Windows and for future uses
ch = logging.StreamHandler(sys.stdout) ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO) ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(levelname)s - %(message)s')) ch.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
f1 = SingleLevelFilter(logging.INFO, False) f1 = SingleLevelFilter(logging.INFO, False)
ch.addFilter(f1) ch.addFilter(f1)
logger.addHandler(ch) logger.addHandler(ch)
errorch =logging. StreamHandler(sys.stderr) errorch = logging.StreamHandler(sys.stderr)
errorch.setLevel(logging.WARNING) errorch.setLevel(logging.WARNING)
errorch.setFormatter(logging.Formatter('%(levelname)s - %(message)s')) errorch.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
logger.addHandler(errorch) logger.addHandler(errorch)
def main(argv: Sequence[str] | None = None) -> int: def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser(description="Validate templates") parser = argparse.ArgumentParser(description="Validate templates")
parser.add_argument("-m", "--mod-name", required=True, parser.add_argument("-m", "--mod-name", required=True, help="The name of the mod to validate.")
help="The name of the mod to validate.") parser.add_argument(
parser.add_argument("-r", "--root", dest="vfs_root", default=Path(), "-r",
type=Path, help="The path to mod's root location.") "--root",
parser.add_argument("-s", "--relaxng-schema", dest="vfs_root",
default=Path() / ENTITY_RELAXNG_FNAME, type=Path, default=Path(),
help="The path to mod's root location.") type=Path,
parser.add_argument("-t", "--templates", nargs="*", help="The path to mod's root location.",
help="Optionally, a list of templates to validate.") )
parser.add_argument("-v", "--verbose", parser.add_argument(
help="Be verbose about the output.", default=False) "-s",
"--relaxng-schema",
default=Path() / ENTITY_RELAXNG_FNAME,
type=Path,
help="The path to mod's root location.",
)
parser.add_argument(
"-t", "--templates", nargs="*", help="Optionally, a list of templates to validate."
)
parser.add_argument("-v", "--verbose", help="Be verbose about the output.", default=False)
args = parser.parse_args(argv) args = parser.parse_args(argv)
@ -72,8 +85,9 @@ def main(argv: Sequence[str] | None = None) -> int:
if args.templates: if args.templates:
templates = sorted([(Path(t), None) for t in args.templates]) templates = sorted([(Path(t), None) for t in args.templates])
else: else:
templates = sorted(find_files(args.vfs_root, [args.mod_name], templates = sorted(
SIMUL_TEMPLATES_PATH.as_posix(), "xml")) find_files(args.vfs_root, [args.mod_name], SIMUL_TEMPLATES_PATH.as_posix(), "xml")
)
simul_template_entity = SimulTemplateEntity(args.vfs_root, logger) simul_template_entity = SimulTemplateEntity(args.vfs_root, logger)
count, failed = 0, 0 count, failed = 0, 0
@ -82,29 +96,32 @@ def main(argv: Sequence[str] | None = None) -> int:
continue continue
path = fp.as_posix() path = fp.as_posix()
if (path.startswith(f"{SIMUL_TEMPLATES_PATH.as_posix()}/mixins/") if path.startswith(f"{SIMUL_TEMPLATES_PATH.as_posix()}/mixins/") or path.startswith(
or path.startswith( f"{SIMUL_TEMPLATES_PATH.as_posix()}/special/"
f"{SIMUL_TEMPLATES_PATH.as_posix()}/special/")): ):
continue continue
if (args.verbose): if args.verbose:
logger.info(f"Parsing {fp}...") logger.info(f"Parsing {fp}...")
count += 1 count += 1
entity = simul_template_entity.load_inherited( entity = simul_template_entity.load_inherited(
SIMUL_TEMPLATES_PATH, SIMUL_TEMPLATES_PATH, str(fp.relative_to(SIMUL_TEMPLATES_PATH)), [args.mod_name]
str(fp.relative_to(SIMUL_TEMPLATES_PATH)),
[args.mod_name]
) )
xmlcontent = ElementTree.tostring(entity, encoding="unicode") xmlcontent = ElementTree.tostring(entity, encoding="unicode")
try: try:
run(["xmllint", "--relaxng", run(
str(args.relaxng_schema.resolve()), "-"], ["xmllint", "--relaxng", str(args.relaxng_schema.resolve()), "-"],
input=xmlcontent, encoding="utf-8", capture_output=True, text=True, check=True) input=xmlcontent,
encoding="utf-8",
capture_output=True,
text=True,
check=True,
)
except CalledProcessError as e: except CalledProcessError as e:
failed += 1 failed += 1
if (e.stderr): if e.stderr:
logger.error(e.stderr) logger.error(e.stderr)
if (e.stdout): if e.stdout:
logger.info(e.stdout) logger.info(e.stdout)
logger.info(f"Total: {count}; failed: {failed}") logger.info(f"Total: {count}; failed: {failed}")

View File

@ -4,6 +4,7 @@ from re import split
from xml.etree import ElementTree from xml.etree import ElementTree
from os.path import exists from os.path import exists
class SimulTemplateEntity: class SimulTemplateEntity:
def __init__(self, vfs_root, logger): def __init__(self, vfs_root, logger):
self.vfs_root = vfs_root self.vfs_root = vfs_root
@ -11,11 +12,11 @@ class SimulTemplateEntity:
def get_file(self, base_path, vfs_path, mod): def get_file(self, base_path, vfs_path, mod):
default_path = self.vfs_root / mod / base_path default_path = self.vfs_root / mod / base_path
file = (default_path / "special" / "filter" / vfs_path).with_suffix('.xml') file = (default_path / "special" / "filter" / vfs_path).with_suffix(".xml")
if not exists(file): if not exists(file):
file = (default_path / "mixins" / vfs_path).with_suffix('.xml') file = (default_path / "mixins" / vfs_path).with_suffix(".xml")
if not exists(file): if not exists(file):
file = (default_path / vfs_path).with_suffix('.xml') file = (default_path / vfs_path).with_suffix(".xml")
return file return file
def get_main_mod(self, base_path, vfs_path, mods): def get_main_mod(self, base_path, vfs_path, mods):
@ -35,52 +36,52 @@ class SimulTemplateEntity:
""" """
apply tag layer to base_tag apply tag layer to base_tag
""" """
if tag.get('datatype') == 'tokens': if tag.get("datatype") == "tokens":
base_tokens = split(r'\s+', base_tag.text or '') base_tokens = split(r"\s+", base_tag.text or "")
tokens = split(r'\s+', tag.text or '') tokens = split(r"\s+", tag.text or "")
final_tokens = base_tokens.copy() final_tokens = base_tokens.copy()
for token in tokens: for token in tokens:
if token.startswith('-'): if token.startswith("-"):
token_to_remove = token[1:] token_to_remove = token[1:]
if token_to_remove in final_tokens: if token_to_remove in final_tokens:
final_tokens.remove(token_to_remove) final_tokens.remove(token_to_remove)
elif token not in final_tokens: elif token not in final_tokens:
final_tokens.append(token) final_tokens.append(token)
base_tag.text = ' '.join(final_tokens) base_tag.text = " ".join(final_tokens)
base_tag.set("datatype", "tokens") base_tag.set("datatype", "tokens")
elif tag.get('op'): elif tag.get("op"):
op = tag.get('op') op = tag.get("op")
op1 = Decimal(base_tag.text or '0') op1 = Decimal(base_tag.text or "0")
op2 = Decimal(tag.text or '0') op2 = Decimal(tag.text or "0")
# Try converting to integers if possible, to pass validation. # Try converting to integers if possible, to pass validation.
if op == 'add': if op == "add":
base_tag.text = str(int(op1 + op2) if int(op1 + op2) == op1 + op2 else op1 + op2) base_tag.text = str(int(op1 + op2) if int(op1 + op2) == op1 + op2 else op1 + op2)
elif op == 'mul': elif op == "mul":
base_tag.text = str(int(op1 * op2) if int(op1 * op2) == op1 * op2 else op1 * op2) base_tag.text = str(int(op1 * op2) if int(op1 * op2) == op1 * op2 else op1 * op2)
elif op == 'mul_round': elif op == "mul_round":
base_tag.text = str(round(op1 * op2)) base_tag.text = str(round(op1 * op2))
else: else:
raise ValueError(f"Invalid operator '{op}'") raise ValueError(f"Invalid operator '{op}'")
else: else:
base_tag.text = tag.text base_tag.text = tag.text
for prop in tag.attrib: for prop in tag.attrib:
if prop not in ('disable', 'replace', 'parent', 'merge'): if prop not in ("disable", "replace", "parent", "merge"):
base_tag.set(prop, tag.get(prop)) base_tag.set(prop, tag.get(prop))
for child in tag: for child in tag:
base_child = base_tag.find(child.tag) base_child = base_tag.find(child.tag)
if 'disable' in child.attrib: if "disable" in child.attrib:
if base_child is not None: if base_child is not None:
base_tag.remove(base_child) base_tag.remove(base_child)
elif ('merge' not in child.attrib) or (base_child is not None): elif ("merge" not in child.attrib) or (base_child is not None):
if 'replace' in child.attrib and base_child is not None: if "replace" in child.attrib and base_child is not None:
base_tag.remove(base_child) base_tag.remove(base_child)
base_child = None base_child = None
if base_child is None: if base_child is None:
base_child = ElementTree.Element(child.tag) base_child = ElementTree.Element(child.tag)
base_tag.append(base_child) base_tag.append(base_child)
self.apply_layer(base_child, child) self.apply_layer(base_child, child)
if 'replace' in base_child.attrib: if "replace" in base_child.attrib:
del base_child.attrib['replace'] del base_child.attrib["replace"]
def load_inherited(self, base_path, vfs_path, mods): def load_inherited(self, base_path, vfs_path, mods):
entity = self._load_inherited(base_path, vfs_path, mods) entity = self._load_inherited(base_path, vfs_path, mods)
@ -91,7 +92,7 @@ class SimulTemplateEntity:
""" """
vfs_path should be relative to base_path in a mod vfs_path should be relative to base_path in a mod
""" """
if '|' in vfs_path: if "|" in vfs_path:
paths = vfs_path.split("|", 1) paths = vfs_path.split("|", 1)
base = self._load_inherited(base_path, paths[1], mods, base) base = self._load_inherited(base_path, paths[1], mods, base)
base = self._load_inherited(base_path, paths[0], mods, base) base = self._load_inherited(base_path, paths[0], mods, base)
@ -106,8 +107,8 @@ class SimulTemplateEntity:
if duplicates: if duplicates:
for dup in duplicates: for dup in duplicates:
self.logger.warning(f"Duplicate child node '{dup}' in tag {el.tag} of {fp}") self.logger.warning(f"Duplicate child node '{dup}' in tag {el.tag} of {fp}")
if layer.get('parent'): if layer.get("parent"):
parent = self._load_inherited(base_path, layer.get('parent'), mods, base) parent = self._load_inherited(base_path, layer.get("parent"), mods, base)
self.apply_layer(parent, layer) self.apply_layer(parent, layer)
return parent return parent
else: else:
@ -124,15 +125,20 @@ def find_files(vfs_root, mods, vfs_path, *ext_list):
- Path relative to the mod base - Path relative to the mod base
- full Path - full Path
""" """
full_exts = ['.' + ext for ext in ext_list] full_exts = ["." + ext for ext in ext_list]
def find_recursive(dp, base): def find_recursive(dp, base):
"""(relative Path, full Path) generator""" """(relative Path, full Path) generator"""
if dp.is_dir(): if dp.is_dir():
if dp.name != '.svn' and dp.name != '.git' and not dp.name.endswith('~'): if dp.name != ".svn" and dp.name != ".git" and not dp.name.endswith("~"):
for fp in dp.iterdir(): for fp in dp.iterdir():
yield from find_recursive(fp, base) yield from find_recursive(fp, base)
elif dp.suffix in full_exts: elif dp.suffix in full_exts:
relative_file_path = dp.relative_to(base) relative_file_path = dp.relative_to(base)
yield (relative_file_path, dp.resolve()) yield (relative_file_path, dp.resolve())
return [(rp, fp) for mod in mods for (rp, fp) in find_recursive(vfs_root / mod / vfs_path, vfs_root / mod)]
return [
(rp, fp)
for mod in mods
for (rp, fp) in find_recursive(vfs_root / mod / vfs_path, vfs_root / mod)
]

View File

@ -20,47 +20,54 @@ else:
ft_lib = "libfreetype.so.6" ft_lib = "libfreetype.so.6"
lc_lib = "libcairo.so.2" lc_lib = "libcairo.so.2"
_freetype_so = ctypes.CDLL (ft_lib) _freetype_so = ctypes.CDLL(ft_lib)
_cairo_so = ctypes.CDLL (lc_lib) _cairo_so = ctypes.CDLL(lc_lib)
_cairo_so.cairo_ft_font_face_create_for_ft_face.restype = ctypes.c_void_p _cairo_so.cairo_ft_font_face_create_for_ft_face.restype = ctypes.c_void_p
_cairo_so.cairo_ft_font_face_create_for_ft_face.argtypes = [ ctypes.c_void_p, ctypes.c_int ] _cairo_so.cairo_ft_font_face_create_for_ft_face.argtypes = [ctypes.c_void_p, ctypes.c_int]
_cairo_so.cairo_set_font_face.argtypes = [ ctypes.c_void_p, ctypes.c_void_p ] _cairo_so.cairo_set_font_face.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_cairo_so.cairo_font_face_status.argtypes = [ ctypes.c_void_p ] _cairo_so.cairo_font_face_status.argtypes = [ctypes.c_void_p]
_cairo_so.cairo_status.argtypes = [ ctypes.c_void_p ] _cairo_so.cairo_status.argtypes = [ctypes.c_void_p]
# initialize freetype # initialize freetype
_ft_lib = ctypes.c_void_p () _ft_lib = ctypes.c_void_p()
if FT_Err_Ok != _freetype_so.FT_Init_FreeType (ctypes.byref (_ft_lib)): if FT_Err_Ok != _freetype_so.FT_Init_FreeType(ctypes.byref(_ft_lib)):
raise Exception("Error initialising FreeType library.") raise Exception("Error initialising FreeType library.")
_surface = cairo.ImageSurface (cairo.FORMAT_A8, 0, 0) _surface = cairo.ImageSurface(cairo.FORMAT_A8, 0, 0)
class PycairoContext(ctypes.Structure): class PycairoContext(ctypes.Structure):
_fields_ = [("PyObject_HEAD", ctypes.c_byte * object.__basicsize__), _fields_ = [
("PyObject_HEAD", ctypes.c_byte * object.__basicsize__),
("ctx", ctypes.c_void_p), ("ctx", ctypes.c_void_p),
("base", ctypes.c_void_p)] ("base", ctypes.c_void_p),
]
def create_cairo_font_face_for_file (filename, faceindex=0, loadoptions=0):
def create_cairo_font_face_for_file(filename, faceindex=0, loadoptions=0):
# create freetype face # create freetype face
ft_face = ctypes.c_void_p() ft_face = ctypes.c_void_p()
cairo_ctx = cairo.Context (_surface) cairo_ctx = cairo.Context(_surface)
cairo_t = PycairoContext.from_address(id(cairo_ctx)).ctx cairo_t = PycairoContext.from_address(id(cairo_ctx)).ctx
if FT_Err_Ok != _freetype_so.FT_New_Face (_ft_lib, filename.encode('ascii'), faceindex, ctypes.byref(ft_face)): if FT_Err_Ok != _freetype_so.FT_New_Face(
_ft_lib, filename.encode("ascii"), faceindex, ctypes.byref(ft_face)
):
raise Exception("Error creating FreeType font face for " + filename) raise Exception("Error creating FreeType font face for " + filename)
# create cairo font face for freetype face # create cairo font face for freetype face
cr_face = _cairo_so.cairo_ft_font_face_create_for_ft_face (ft_face, loadoptions) cr_face = _cairo_so.cairo_ft_font_face_create_for_ft_face(ft_face, loadoptions)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_font_face_status (cr_face): if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_font_face_status(cr_face):
raise Exception("Error creating cairo font face for " + filename) raise Exception("Error creating cairo font face for " + filename)
_cairo_so.cairo_set_font_face (cairo_t, cr_face) _cairo_so.cairo_set_font_face(cairo_t, cr_face)
if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_status (cairo_t): if CAIRO_STATUS_SUCCESS != _cairo_so.cairo_status(cairo_t):
raise Exception("Error creating cairo font face for " + filename) raise Exception("Error creating cairo font face for " + filename)
face = cairo_ctx.get_font_face () face = cairo_ctx.get_font_face()
indexes = lambda char: _freetype_so.FT_Get_Char_Index(ft_face, ord(char)) def indexes(char):
return _freetype_so.FT_Get_Char_Index(ft_face, ord(char))
return (face, indexes) return (face, indexes)

View File

@ -18,59 +18,64 @@
from bisect import bisect_left from bisect import bisect_left
class OutOfSpaceError(Exception): pass
class OutOfSpaceError(Exception):
pass
class Point(object): class Point(object):
def __init__(self, x, y): def __init__(self, x, y):
self.x = x self.x = x
self.y = y self.y = y
def __cmp__(self, other): def __cmp__(self, other):
"""Compares the starting position of height slices""" """Compares the starting position of height slices"""
return self.x - other.x return self.x - other.x
class RectanglePacker(object): class RectanglePacker(object):
"""Base class for rectangle packing algorithms """Base class for rectangle packing algorithms
By uniting all rectangle packers under this common base class, you can By uniting all rectangle packers under this common base class, you can
easily switch between different algorithms to find the most efficient or easily switch between different algorithms to find the most efficient or
performant one for a given job. performant one for a given job.
An almost exhaustive list of packing algorithms can be found here: An almost exhaustive list of packing algorithms can be found here:
http://www.csc.liv.ac.uk/~epa/surveyhtml.html""" http://www.csc.liv.ac.uk/~epa/surveyhtml.html"""
def __init__(self, packingAreaWidth, packingAreaHeight): def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer """Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area""" packingAreaHeight: Maximum height of the packing area"""
self.packingAreaWidth = packingAreaWidth self.packingAreaWidth = packingAreaWidth
self.packingAreaHeight = packingAreaHeight self.packingAreaHeight = packingAreaHeight
def Pack(self, rectangleWidth, rectangleHeight): def Pack(self, rectangleWidth, rectangleHeight):
"""Allocates space for a rectangle in the packing area """Allocates space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate rectangleHeight: Height of the rectangle to allocate
Returns the location at which the rectangle has been placed""" Returns the location at which the rectangle has been placed"""
point = self.TryPack(rectangleWidth, rectangleHeight) point = self.TryPack(rectangleWidth, rectangleHeight)
if not point: if not point:
raise OutOfSpaceError("Rectangle does not fit in packing area") raise OutOfSpaceError("Rectangle does not fit in packing area")
return point return point
def TryPack(self, rectangleWidth, rectangleHeight): def TryPack(self, rectangleWidth, rectangleHeight):
"""Tries to allocate space for a rectangle in the packing area """Tries to allocate space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None""" be found, otherwise returns None"""
raise NotImplementedError raise NotImplementedError
class DumbRectanglePacker(RectanglePacker): class DumbRectanglePacker(RectanglePacker):
def __init__(self, packingAreaWidth, packingAreaHeight): def __init__(self, packingAreaWidth, packingAreaHeight):
RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight) RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight)
@ -91,86 +96,85 @@ class DumbRectanglePacker(RectanglePacker):
self.rowh = max(self.rowh, rectangleHeight) self.rowh = max(self.rowh, rectangleHeight)
return r return r
class CygonRectanglePacker(RectanglePacker): class CygonRectanglePacker(RectanglePacker):
""" """
Packer using a custom algorithm by Markus 'Cygon' Ewald Packer using a custom algorithm by Markus 'Cygon' Ewald
Algorithm conceived by Markus Ewald (cygon at nuclex dot org), though Algorithm conceived by Markus Ewald (cygon at nuclex dot org), though
I'm quite sure I'm not the first one to come up with it :) I'm quite sure I'm not the first one to come up with it :)
The algorithm always places rectangles as low as possible in the packing The algorithm always places rectangles as low as possible in the packing
area. So, for any new rectangle that is to be added, the packer has to area. So, for any new rectangle that is to be added, the packer has to
determine the X coordinate at which the rectangle can have the lowest determine the X coordinate at which the rectangle can have the lowest
overall height without intersecting any other rectangles. overall height without intersecting any other rectangles.
To quickly discover these locations, the packer uses a sophisticated To quickly discover these locations, the packer uses a sophisticated
data structure that stores the upper silhouette of the packing area. When data structure that stores the upper silhouette of the packing area. When
a new rectangle needs to be added, only the silouette edges need to be a new rectangle needs to be added, only the silouette edges need to be
analyzed to find the position where the rectangle would achieve the lowest""" analyzed to find the position where the rectangle would achieve the lowest"""
def __init__(self, packingAreaWidth, packingAreaHeight): def __init__(self, packingAreaWidth, packingAreaHeight):
"""Initializes a new rectangle packer """Initializes a new rectangle packer
packingAreaWidth: Maximum width of the packing area packingAreaWidth: Maximum width of the packing area
packingAreaHeight: Maximum height of the packing area""" packingAreaHeight: Maximum height of the packing area"""
RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight) RectanglePacker.__init__(self, packingAreaWidth, packingAreaHeight)
# Stores the height silhouette of the rectangles # Stores the height silhouette of the rectangles
self.heightSlices = [] self.heightSlices = []
# At the beginning, the packing area is a single slice of height 0 # At the beginning, the packing area is a single slice of height 0
self.heightSlices.append(Point(0,0)) self.heightSlices.append(Point(0, 0))
def TryPack(self, rectangleWidth, rectangleHeight): def TryPack(self, rectangleWidth, rectangleHeight):
"""Tries to allocate space for a rectangle in the packing area """Tries to allocate space for a rectangle in the packing area
rectangleWidth: Width of the rectangle to allocate rectangleWidth: Width of the rectangle to allocate
rectangleHeight: Height of the rectangle to allocate rectangleHeight: Height of the rectangle to allocate
Returns a Point instance if space for the rectangle could be allocated Returns a Point instance if space for the rectangle could be allocated
be found, otherwise returns None""" be found, otherwise returns None"""
placement = None placement = None
# If the rectangle is larger than the packing area in any dimension, # If the rectangle is larger than the packing area in any dimension,
# it will never fit! # it will never fit!
if rectangleWidth > self.packingAreaWidth or rectangleHeight > \ if rectangleWidth > self.packingAreaWidth or rectangleHeight > self.packingAreaHeight:
self.packingAreaHeight:
return None return None
# Determine the placement for the new rectangle # Determine the placement for the new rectangle
placement = self.tryFindBestPlacement(rectangleWidth, rectangleHeight) placement = self.tryFindBestPlacement(rectangleWidth, rectangleHeight)
# If a place for the rectangle could be found, update the height slice # If a place for the rectangle could be found, update the height slice
# table to mark the region of the rectangle as being taken. # table to mark the region of the rectangle as being taken.
if placement: if placement:
self.integrateRectangle(placement.x, rectangleWidth, placement.y \ self.integrateRectangle(placement.x, rectangleWidth, placement.y + rectangleHeight)
+ rectangleHeight)
return placement return placement
def tryFindBestPlacement(self, rectangleWidth, rectangleHeight): def tryFindBestPlacement(self, rectangleWidth, rectangleHeight):
"""Finds the best position for a rectangle of the given dimensions """Finds the best position for a rectangle of the given dimensions
rectangleWidth: Width of the rectangle to find a position for rectangleWidth: Width of the rectangle to find a position for
rectangleHeight: Height of the rectangle to find a position for rectangleHeight: Height of the rectangle to find a position for
Returns a Point instance if a valid placement for the rectangle could Returns a Point instance if a valid placement for the rectangle could
be found, otherwise returns None""" be found, otherwise returns None"""
# Slice index, vertical position and score of the best placement we # Slice index, vertical position and score of the best placement we
# could find # could find
bestSliceIndex = -1 # Slice index where the best placement was found bestSliceIndex = -1 # Slice index where the best placement was found
bestSliceY = 0 # Y position of the best placement found bestSliceY = 0 # Y position of the best placement found
# lower == better! # lower == better!
bestScore = self.packingAreaHeight bestScore = self.packingAreaHeight
# This is the counter for the currently checked position. The search # This is the counter for the currently checked position. The search
# works by skipping from slice to slice, determining the suitability # works by skipping from slice to slice, determining the suitability
# of the location for the placement of the rectangle. # of the location for the placement of the rectangle.
leftSliceIndex = 0 leftSliceIndex = 0
# Determine the slice in which the right end of the rectangle is located # Determine the slice in which the right end of the rectangle is located
rightSliceIndex = bisect_left(self.heightSlices, Point(rectangleWidth, 0)) rightSliceIndex = bisect_left(self.heightSlices, Point(rectangleWidth, 0))
while rightSliceIndex <= len(self.heightSlices): while rightSliceIndex <= len(self.heightSlices):
# Determine the highest slice within the slices covered by the # Determine the highest slice within the slices covered by the
# rectangle at its current placement. We cannot put the rectangle # rectangle at its current placement. We cannot put the rectangle
@ -179,21 +183,21 @@ class CygonRectanglePacker(RectanglePacker):
for index in range(leftSliceIndex + 1, rightSliceIndex): for index in range(leftSliceIndex + 1, rightSliceIndex):
if self.heightSlices[index].y > highest: if self.heightSlices[index].y > highest:
highest = self.heightSlices[index].y highest = self.heightSlices[index].y
# Only process this position if it doesn't leave the packing area # Only process this position if it doesn't leave the packing area
if highest + rectangleHeight < self.packingAreaHeight: if highest + rectangleHeight < self.packingAreaHeight:
score = highest score = highest
if score < bestScore: if score < bestScore:
bestSliceIndex = leftSliceIndex bestSliceIndex = leftSliceIndex
bestSliceY = highest bestSliceY = highest
bestScore = score bestScore = score
# Advance the starting slice to the next slice start # Advance the starting slice to the next slice start
leftSliceIndex += 1 leftSliceIndex += 1
if leftSliceIndex >= len(self.heightSlices): if leftSliceIndex >= len(self.heightSlices):
break break
# Advance the ending slice until we're on the proper slice again, # Advance the ending slice until we're on the proper slice again,
# given the new starting position of the rectangle. # given the new starting position of the rectangle.
rightRectangleEnd = self.heightSlices[leftSliceIndex].x + rectangleWidth rightRectangleEnd = self.heightSlices[leftSliceIndex].x + rectangleWidth
@ -202,18 +206,18 @@ class CygonRectanglePacker(RectanglePacker):
rightSliceStart = self.packingAreaWidth rightSliceStart = self.packingAreaWidth
else: else:
rightSliceStart = self.heightSlices[rightSliceIndex].x rightSliceStart = self.heightSlices[rightSliceIndex].x
# Is this the slice we're looking for? # Is this the slice we're looking for?
if rightSliceStart > rightRectangleEnd: if rightSliceStart > rightRectangleEnd:
break break
rightSliceIndex += 1 rightSliceIndex += 1
# If we crossed the end of the slice array, the rectangle's right # If we crossed the end of the slice array, the rectangle's right
# end has left the packing area, and thus, our search ends. # end has left the packing area, and thus, our search ends.
if rightSliceIndex > len(self.heightSlices): if rightSliceIndex > len(self.heightSlices):
break break
# Return the best placement we found for this rectangle. If the # Return the best placement we found for this rectangle. If the
# rectangle didn't fit anywhere, the slice index will still have its # rectangle didn't fit anywhere, the slice index will still have its
# initialization value of -1 and we can report that no placement # initialization value of -1 and we can report that no placement
@ -222,23 +226,23 @@ class CygonRectanglePacker(RectanglePacker):
return None return None
else: else:
return Point(self.heightSlices[bestSliceIndex].x, bestSliceY) return Point(self.heightSlices[bestSliceIndex].x, bestSliceY)
def integrateRectangle(self, left, width, bottom): def integrateRectangle(self, left, width, bottom):
"""Integrates a new rectangle into the height slice table """Integrates a new rectangle into the height slice table
left: Position of the rectangle's left side left: Position of the rectangle's left side
width: Width of the rectangle width: Width of the rectangle
bottom: Position of the rectangle's lower side""" bottom: Position of the rectangle's lower side"""
# Find the first slice that is touched by the rectangle # Find the first slice that is touched by the rectangle
startSlice = bisect_left(self.heightSlices, Point(left, 0)) startSlice = bisect_left(self.heightSlices, Point(left, 0))
# We scored a direct hit, so we can replace the slice we have hit # We scored a direct hit, so we can replace the slice we have hit
firstSliceOriginalHeight = self.heightSlices[startSlice].y firstSliceOriginalHeight = self.heightSlices[startSlice].y
self.heightSlices[startSlice] = Point(left, bottom) self.heightSlices[startSlice] = Point(left, bottom)
right = left + width right = left + width
startSlice += 1 startSlice += 1
# Special case, the rectangle started on the last slice, so we cannot # Special case, the rectangle started on the last slice, so we cannot
# use the start slice + 1 for the binary search and the possibly # use the start slice + 1 for the binary search and the possibly
# already modified start slice height now only remains in our temporary # already modified start slice height now only remains in our temporary
@ -249,21 +253,24 @@ class CygonRectanglePacker(RectanglePacker):
# to return to the original height at the end of the rectangle. # to return to the original height at the end of the rectangle.
if right < self.packingAreaWidth: if right < self.packingAreaWidth:
self.heightSlices.append(Point(right, firstSliceOriginalHeight)) self.heightSlices.append(Point(right, firstSliceOriginalHeight))
else: # The rectangle doesn't start on the last slice else: # The rectangle doesn't start on the last slice
endSlice = bisect_left(self.heightSlices, Point(right,0), \ endSlice = bisect_left(
startSlice, len(self.heightSlices)) self.heightSlices, Point(right, 0), startSlice, len(self.heightSlices)
)
# Another direct hit on the final slice's end? # Another direct hit on the final slice's end?
if endSlice < len(self.heightSlices) and not (Point(right, 0) < self.heightSlices[endSlice]): if endSlice < len(self.heightSlices) and not (
Point(right, 0) < self.heightSlices[endSlice]
):
del self.heightSlices[startSlice:endSlice] del self.heightSlices[startSlice:endSlice]
else: # No direct hit, rectangle ends inside another slice else: # No direct hit, rectangle ends inside another slice
# Find out to which height we need to return at the right end of # Find out to which height we need to return at the right end of
# the rectangle # the rectangle
if endSlice == startSlice: if endSlice == startSlice:
returnHeight = firstSliceOriginalHeight returnHeight = firstSliceOriginalHeight
else: else:
returnHeight = self.heightSlices[endSlice - 1].y returnHeight = self.heightSlices[endSlice - 1].y
# Remove all slices covered by the rectangle and begin a new # Remove all slices covered by the rectangle and begin a new
# slice at its end to return back to the height of the slice on # slice at its end to return back to the height of the slice on
# which the rectangle ends. # which the rectangle ends.

View File

@ -4,13 +4,16 @@
import FontLoader import FontLoader
def dump_font(ttf): def dump_font(ttf):
(face, indexes) = FontLoader.create_cairo_font_face_for_file(
"../../../binaries/data/tools/fontbuilder/fonts/%s" % ttf, 0, FontLoader.FT_LOAD_DEFAULT
)
(face, indexes) = FontLoader.create_cairo_font_face_for_file("../../../binaries/data/tools/fontbuilder/fonts/%s" % ttf, 0, FontLoader.FT_LOAD_DEFAULT) mappings = [(c, indexes(chr(c))) for c in range(1, 65535)]
print(ttf, end=" ")
print(" ".join(str(c) for (c, g) in mappings if g != 0))
mappings = [ (c, indexes(chr(c))) for c in range(1, 65535) ]
print(ttf, end=' ')
print(' '.join(str(c) for (c, g) in mappings if g != 0))
dump_font("DejaVuSansMono.ttf") dump_font("DejaVuSansMono.ttf")
dump_font("FreeSans.ttf") dump_font("FreeSans.ttf")

View File

@ -7,6 +7,7 @@ import math
import FontLoader import FontLoader
import Packer import Packer
# Representation of a rendered glyph # Representation of a rendered glyph
class Glyph(object): class Glyph(object):
def __init__(self, ctx, renderstyle, char, idx, face, size): def __init__(self, ctx, renderstyle, char, idx, face, size):
@ -30,7 +31,7 @@ class Glyph(object):
bb = [inf, inf, -inf, -inf] bb = [inf, inf, -inf, -inf]
if "stroke" in self.renderstyle: if "stroke" in self.renderstyle:
for (c, w) in self.renderstyle["stroke"]: for c, w in self.renderstyle["stroke"]:
ctx.set_line_width(w) ctx.set_line_width(w)
ctx.glyph_path([self.glyph]) ctx.glyph_path([self.glyph])
e = ctx.stroke_extents() e = ctx.stroke_extents()
@ -52,8 +53,8 @@ class Glyph(object):
# Force multiple of 4, to avoid leakage across S3TC blocks # Force multiple of 4, to avoid leakage across S3TC blocks
# (TODO: is this useful?) # (TODO: is this useful?)
#self.w += (4 - (self.w % 4)) % 4 # self.w += (4 - (self.w % 4)) % 4
#self.h += (4 - (self.h % 4)) % 4 # self.h += (4 - (self.h % 4)) % 4
def pack(self, packer): def pack(self, packer):
self.pos = packer.Pack(self.w, self.h) self.pos = packer.Pack(self.w, self.h)
@ -69,20 +70,21 @@ class Glyph(object):
# Render each stroke, and then each fill on top of it # Render each stroke, and then each fill on top of it
if "stroke" in self.renderstyle: if "stroke" in self.renderstyle:
for ((r, g, b, a), w) in self.renderstyle["stroke"]: for (r, g, b, a), w in self.renderstyle["stroke"]:
ctx.set_line_width(w) ctx.set_line_width(w)
ctx.set_source_rgba(r, g, b, a) ctx.set_source_rgba(r, g, b, a)
ctx.glyph_path([self.glyph]) ctx.glyph_path([self.glyph])
ctx.stroke() ctx.stroke()
if "fill" in self.renderstyle: if "fill" in self.renderstyle:
for (r, g, b, a) in self.renderstyle["fill"]: for r, g, b, a in self.renderstyle["fill"]:
ctx.set_source_rgba(r, g, b, a) ctx.set_source_rgba(r, g, b, a)
ctx.glyph_path([self.glyph]) ctx.glyph_path([self.glyph])
ctx.fill() ctx.fill()
ctx.restore() ctx.restore()
# Load the set of characters contained in the given text file # Load the set of characters contained in the given text file
def load_char_list(filename): def load_char_list(filename):
f = codecs.open(filename, "r", "utf-8") f = codecs.open(filename, "r", "utf-8")
@ -90,22 +92,25 @@ def load_char_list(filename):
f.close() f.close()
return set(chars) return set(chars)
# Construct a Cairo context and surface for rendering text with the given parameters # Construct a Cairo context and surface for rendering text with the given parameters
def setup_context(width, height, renderstyle): def setup_context(width, height, renderstyle):
format = (cairo.FORMAT_ARGB32 if "colour" in renderstyle else cairo.FORMAT_A8) format = cairo.FORMAT_ARGB32 if "colour" in renderstyle else cairo.FORMAT_A8
surface = cairo.ImageSurface(format, width, height) surface = cairo.ImageSurface(format, width, height)
ctx = cairo.Context(surface) ctx = cairo.Context(surface)
ctx.set_line_join(cairo.LINE_JOIN_ROUND) ctx.set_line_join(cairo.LINE_JOIN_ROUND)
return ctx, surface return ctx, surface
def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
faceList = [] faceList = []
indexList = [] indexList = []
for i in range(len(ttfNames)): for i in range(len(ttfNames)):
(face, indices) = FontLoader.create_cairo_font_face_for_file("../../../binaries/data/tools/fontbuilder/fonts/%s" % ttfNames[i], 0, loadopts) (face, indices) = FontLoader.create_cairo_font_face_for_file(
"../../../binaries/data/tools/fontbuilder/fonts/%s" % ttfNames[i], 0, loadopts
)
faceList.append(face) faceList.append(face)
if not ttfNames[i] in dsizes: if ttfNames[i] not in dsizes:
dsizes[ttfNames[i]] = 0 dsizes[ttfNames[i]] = 0
indexList.append(indices) indexList.append(indices)
@ -123,32 +128,36 @@ def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
# Translate all the characters into glyphs # Translate all the characters into glyphs
# (This is inefficient if multiple characters have the same glyph) # (This is inefficient if multiple characters have the same glyph)
glyphs = [] glyphs = []
#for c in chars: # for c in chars:
for c in range(0x20, 0xFFFE): for c in range(0x20, 0xFFFE):
for i in range(len(indexList)): for i in range(len(indexList)):
idx = indexList[i](chr(c)) idx = indexList[i](chr(c))
if c == 0xFFFD and idx == 0: # use "?" if the missing-glyph glyph is missing if c == 0xFFFD and idx == 0: # use "?" if the missing-glyph glyph is missing
idx = indexList[i]("?") idx = indexList[i]("?")
if idx: if idx:
glyphs.append(Glyph(ctx, renderstyle, chr(c), idx, faceList[i], size + dsizes[ttfNames[i]])) glyphs.append(
Glyph(ctx, renderstyle, chr(c), idx, faceList[i], size + dsizes[ttfNames[i]])
)
break break
# Sort by decreasing height (tie-break on decreasing width) # Sort by decreasing height (tie-break on decreasing width)
glyphs.sort(key = lambda g: (-g.h, -g.w)) glyphs.sort(key=lambda g: (-g.h, -g.w))
# Try various sizes to pack the glyphs into # Try various sizes to pack the glyphs into
sizes = [] sizes = []
for h in [32, 64, 128, 256, 512, 1024, 2048, 4096]: for h in [32, 64, 128, 256, 512, 1024, 2048, 4096]:
sizes.append((h, h)) sizes.append((h, h))
sizes.append((h*2, h)) sizes.append((h * 2, h))
sizes.sort(key = lambda w_h: (w_h[0]*w_h[1], max(w_h[0], w_h[1]))) # prefer smaller and squarer sizes.sort(
key=lambda w_h: (w_h[0] * w_h[1], max(w_h[0], w_h[1]))
) # prefer smaller and squarer
for w, h in sizes: for w, h in sizes:
try: try:
# Using the dump pacher usually creates bigger textures, but runs faster # Using the dump pacher usually creates bigger textures, but runs faster
# In practice the size difference is so small it always ends up in the same size # In practice the size difference is so small it always ends up in the same size
packer = Packer.DumbRectanglePacker(w, h) packer = Packer.DumbRectanglePacker(w, h)
#packer = Packer.CygonRectanglePacker(w, h) # packer = Packer.CygonRectanglePacker(w, h)
for g in glyphs: for g in glyphs:
g.pack(packer) g.pack(packer)
except Packer.OutOfSpaceError: except Packer.OutOfSpaceError:
@ -168,7 +177,7 @@ def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
fnt.write("%d\n" % linespacing) fnt.write("%d\n" % linespacing)
fnt.write("%d\n" % charheight) fnt.write("%d\n" % charheight)
# sorting unneeded, as glyphs are added in increasing order # sorting unneeded, as glyphs are added in increasing order
#glyphs.sort(key = lambda g: ord(g.char)) # glyphs.sort(key = lambda g: ord(g.char))
for g in glyphs: for g in glyphs:
x0 = g.x0 x0 = g.x0
y0 = g.y0 y0 = g.y0
@ -179,31 +188,39 @@ def generate_font(outname, ttfNames, loadopts, size, renderstyle, dsizes):
# glyph by an arbitrary amount to make it roughly the right # glyph by an arbitrary amount to make it roughly the right
# place when used after an a-macron glyph. # place when used after an a-macron glyph.
if ord(g.char) == 0x0301: if ord(g.char) == 0x0301:
y0 += charheight/3 y0 += charheight / 3
fnt.write("%d %d %d %d %d %d %d %d\n" % (ord(g.char), g.pos.x, h-g.pos.y, g.w, g.h, -x0, y0, g.xadvance)) fnt.write(
"%d %d %d %d %d %d %d %d\n"
% (ord(g.char), g.pos.x, h - g.pos.y, g.w, g.h, -x0, y0, g.xadvance)
)
fnt.close() fnt.close()
return return
print("Failed to fit glyphs in texture") print("Failed to fit glyphs in texture")
filled = { "fill": [(1, 1, 1, 1)] }
stroked1 = { "colour": True, "stroke": [((0, 0, 0, 1), 2.0), ((0, 0, 0, 1), 2.0)], "fill": [(1, 1, 1, 1)] } filled = {"fill": [(1, 1, 1, 1)]}
stroked2 = { "colour": True, "stroke": [((0, 0, 0, 1), 2.0)], "fill": [(1, 1, 1, 1), (1, 1, 1, 1)] } stroked1 = {
stroked3 = { "colour": True, "stroke": [((0, 0, 0, 1), 2.5)], "fill": [(1, 1, 1, 1), (1, 1, 1, 1)] } "colour": True,
"stroke": [((0, 0, 0, 1), 2.0), ((0, 0, 0, 1), 2.0)],
"fill": [(1, 1, 1, 1)],
}
stroked2 = {"colour": True, "stroke": [((0, 0, 0, 1), 2.0)], "fill": [(1, 1, 1, 1), (1, 1, 1, 1)]}
stroked3 = {"colour": True, "stroke": [((0, 0, 0, 1), 2.5)], "fill": [(1, 1, 1, 1), (1, 1, 1, 1)]}
# For extra glyph support, add your preferred font to the font array # For extra glyph support, add your preferred font to the font array
Sans = (["LinBiolinum_Rah.ttf","FreeSans.ttf"], FontLoader.FT_LOAD_DEFAULT) Sans = (["LinBiolinum_Rah.ttf", "FreeSans.ttf"], FontLoader.FT_LOAD_DEFAULT)
Sans_Bold = (["LinBiolinum_RBah.ttf","FreeSansBold.ttf"], FontLoader.FT_LOAD_DEFAULT) Sans_Bold = (["LinBiolinum_RBah.ttf", "FreeSansBold.ttf"], FontLoader.FT_LOAD_DEFAULT)
Sans_Italic = (["LinBiolinum_RIah.ttf","FreeSansOblique.ttf"], FontLoader.FT_LOAD_DEFAULT) Sans_Italic = (["LinBiolinum_RIah.ttf", "FreeSansOblique.ttf"], FontLoader.FT_LOAD_DEFAULT)
SansMono = (["DejaVuSansMono.ttf","FreeMono.ttf"], FontLoader.FT_LOAD_DEFAULT) SansMono = (["DejaVuSansMono.ttf", "FreeMono.ttf"], FontLoader.FT_LOAD_DEFAULT)
Serif = (["texgyrepagella-regular.otf","FreeSerif.ttf"], FontLoader.FT_LOAD_NO_HINTING) Serif = (["texgyrepagella-regular.otf", "FreeSerif.ttf"], FontLoader.FT_LOAD_NO_HINTING)
Serif_Bold = (["texgyrepagella-bold.otf","FreeSerifBold.ttf"], FontLoader.FT_LOAD_NO_HINTING) Serif_Bold = (["texgyrepagella-bold.otf", "FreeSerifBold.ttf"], FontLoader.FT_LOAD_NO_HINTING)
# Define the size differences used to render different fallback fonts # Define the size differences used to render different fallback fonts
# I.e. when adding a fallback font has smaller glyphs than the original, you can bump it # I.e. when adding a fallback font has smaller glyphs than the original, you can bump it
dsizes = {'HanaMinA.ttf': 2} # make the glyphs for the (chinese font 2 pts bigger) dsizes = {"HanaMinA.ttf": 2} # make the glyphs for the (chinese font 2 pts bigger)
fonts = ( fonts = (
("mono-10", SansMono, 10, filled), ("mono-10", SansMono, 10, filled),
@ -231,6 +248,8 @@ fonts = (
("sans-stroke-16", Sans, 16, stroked2), ("sans-stroke-16", Sans, 16, stroked2),
) )
for (name, (fontnames, loadopts), size, style) in fonts: for name, (fontnames, loadopts), size, style in fonts:
print("%s..." % name) print("%s..." % name)
generate_font("../../../binaries/data/mods/mod/fonts/%s" % name, fontnames, loadopts, size, style, dsizes) generate_font(
"../../../binaries/data/mods/mod/fonts/%s" % name, fontnames, loadopts, size, style, dsizes
)

View File

@ -16,6 +16,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>. # along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
# ruff: noqa: E741
import io import io
import os import os
import subprocess import subprocess
@ -23,6 +25,7 @@ from typing import List
from i18n_helper import projectRootDirectory from i18n_helper import projectRootDirectory
def get_diff(): def get_diff():
"""Return a diff using svn diff""" """Return a diff using svn diff"""
os.chdir(projectRootDirectory) os.chdir(projectRootDirectory)
@ -31,9 +34,10 @@ def get_diff():
if diff_process.returncode != 0: if diff_process.returncode != 0:
print(f"Error running svn diff: {diff_process.stderr.decode('utf-8')}. Exiting.") print(f"Error running svn diff: {diff_process.stderr.decode('utf-8')}. Exiting.")
return return
return io.StringIO(diff_process.stdout.decode('utf-8')) return io.StringIO(diff_process.stdout.decode("utf-8"))
def check_diff(diff : io.StringIO, verbose = False) -> List[str]:
def check_diff(diff: io.StringIO, verbose=False) -> List[str]:
"""Run through a diff of .po files and check that some of the changes """Run through a diff of .po files and check that some of the changes
are real translations changes and not just noise (line changes....). are real translations changes and not just noise (line changes....).
The algorithm isn't extremely clever, but it is quite fast.""" The algorithm isn't extremely clever, but it is quite fast."""
@ -57,13 +61,18 @@ def check_diff(diff : io.StringIO, verbose = False) -> List[str]:
diff.readline() diff.readline()
l = diff.readline() l = diff.readline()
continue continue
if l[0] != '-' and l[0] != '+': if l[0] != "-" and l[0] != "+":
l = diff.readline() l = diff.readline()
continue continue
if l[1:].strip() == "" or (l[1] == '#' and l[2] == ':'): if l[1:].strip() == "" or (l[1] == "#" and l[2] == ":"):
l = diff.readline() l = diff.readline()
continue continue
if "# Copyright (C)" in l or "POT-Creation-Date:" in l or "PO-Revision-Date" in l or "Last-Translator" in l: if (
"# Copyright (C)" in l
or "POT-Creation-Date:" in l
or "PO-Revision-Date" in l
or "Last-Translator" in l
):
l = diff.readline() l = diff.readline()
continue continue
# We've hit a real line # We've hit a real line
@ -75,23 +84,25 @@ def check_diff(diff : io.StringIO, verbose = False) -> List[str]:
return list(files.difference(keep)) return list(files.difference(keep))
def revert_files(files: List[str], verbose = False): def revert_files(files: List[str], verbose=False):
revert_process = subprocess.run(["svn", "revert"] + files, capture_output=True) revert_process = subprocess.run(["svn", "revert"] + files, capture_output=True)
if revert_process.returncode != 0: if revert_process.returncode != 0:
print(f"Warning: Some files could not be reverted. Error: {revert_process.stderr.decode('utf-8')}") print(
f"Warning: Some files could not be reverted. Error: {revert_process.stderr.decode('utf-8')}"
)
if verbose: if verbose:
for file in files: for file in files:
print(f"Reverted {file}") print(f"Reverted {file}")
def add_untracked(verbose = False): def add_untracked(verbose=False):
"""Add untracked .po files to svn""" """Add untracked .po files to svn"""
diff_process = subprocess.run(["svn", "st", "binaries"], capture_output=True) diff_process = subprocess.run(["svn", "st", "binaries"], capture_output=True)
if diff_process.stderr != b'': if diff_process.stderr != b"":
print(f"Error running svn st: {diff_process.stderr.decode('utf-8')}. Exiting.") print(f"Error running svn st: {diff_process.stderr.decode('utf-8')}. Exiting.")
return return
for line in diff_process.stdout.decode('utf-8').split('\n'): for line in diff_process.stdout.decode("utf-8").split("\n"):
if not line.startswith("?"): if not line.startswith("?"):
continue continue
# Ignore non PO files. This is important so that the translator credits # Ignore non PO files. This is important so that the translator credits
@ -100,16 +111,17 @@ def add_untracked(verbose = False):
if not file.endswith(".po") and not file.endswith(".pot"): if not file.endswith(".po") and not file.endswith(".pot"):
continue continue
add_process = subprocess.run(["svn", "add", file, "--parents"], capture_output=True) add_process = subprocess.run(["svn", "add", file, "--parents"], capture_output=True)
if add_process.stderr != b'': if add_process.stderr != b"":
print(f"Warning: file {file} could not be added.") print(f"Warning: file {file} could not be added.")
if verbose: if verbose:
print(f"Added {file}") print(f"Added {file}")
if __name__ == '__main__': if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--verbose", help="Print reverted files.", action='store_true') parser.add_argument("--verbose", help="Print reverted files.", action="store_true")
args = parser.parse_args() args = parser.parse_args()
need_revert = check_diff(get_diff(), args.verbose) need_revert = check_diff(get_diff(), args.verbose)
revert_files(need_revert, args.verbose) revert_files(need_revert, args.verbose)

View File

@ -16,7 +16,10 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>. # along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import sys, os, re, multiprocessing import sys
import os
import re
import multiprocessing
from i18n_helper import l10nFolderName, projectRootDirectory from i18n_helper import l10nFolderName, projectRootDirectory
from i18n_helper.catalog import Catalog from i18n_helper.catalog import Catalog
@ -27,14 +30,17 @@ VERBOSE = 0
class MessageChecker: class MessageChecker:
"""Checks all messages in a catalog against a regex.""" """Checks all messages in a catalog against a regex."""
def __init__(self, human_name, regex): def __init__(self, human_name, regex):
self.regex = re.compile(regex, re.IGNORECASE) self.regex = re.compile(regex, re.IGNORECASE)
self.human_name = human_name self.human_name = human_name
def check(self, inputFilePath, templateMessage, translatedCatalogs): def check(self, inputFilePath, templateMessage, translatedCatalogs):
patterns = set(self.regex.findall( patterns = set(
templateMessage.id[0] if templateMessage.pluralizable else templateMessage.id self.regex.findall(
)) templateMessage.id[0] if templateMessage.pluralizable else templateMessage.id
)
)
# As a sanity check, verify that the template message is coherent. # As a sanity check, verify that the template message is coherent.
# Note that these tend to be false positives. # Note that these tend to be false positives.
@ -42,23 +48,32 @@ class MessageChecker:
if templateMessage.pluralizable: if templateMessage.pluralizable:
pluralUrls = set(self.regex.findall(templateMessage.id[1])) pluralUrls = set(self.regex.findall(templateMessage.id[1]))
if pluralUrls.difference(patterns): if pluralUrls.difference(patterns):
print(f"{inputFilePath} - Different {self.human_name} in singular and plural source strings " print(
f"for '{templateMessage}' in '{inputFilePath}'") f"{inputFilePath} - Different {self.human_name} in singular and plural source strings "
f"for '{templateMessage}' in '{inputFilePath}'"
)
for translationCatalog in translatedCatalogs: for translationCatalog in translatedCatalogs:
translationMessage = translationCatalog.get( translationMessage = translationCatalog.get(
templateMessage.id, templateMessage.context) templateMessage.id, templateMessage.context
)
if not translationMessage: if not translationMessage:
continue continue
translatedPatterns = set(self.regex.findall( translatedPatterns = set(
translationMessage.string[0] if translationMessage.pluralizable else translationMessage.string self.regex.findall(
)) translationMessage.string[0]
if translationMessage.pluralizable
else translationMessage.string
)
)
unknown_patterns = translatedPatterns.difference(patterns) unknown_patterns = translatedPatterns.difference(patterns)
if unknown_patterns: if unknown_patterns:
print(f'{inputFilePath} - {translationCatalog.locale}: ' print(
f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns])} in the translation ' f'{inputFilePath} - {translationCatalog.locale}: '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in patterns])}') f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns])} in the translation '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in patterns])}'
)
if templateMessage.pluralizable and translationMessage.pluralizable: if templateMessage.pluralizable and translationMessage.pluralizable:
for indx, val in enumerate(translationMessage.string): for indx, val in enumerate(translationMessage.string):
@ -67,9 +82,12 @@ class MessageChecker:
translatedPatternsMulti = set(self.regex.findall(val)) translatedPatternsMulti = set(self.regex.findall(val))
unknown_patterns_multi = translatedPatternsMulti.difference(pluralUrls) unknown_patterns_multi = translatedPatternsMulti.difference(pluralUrls)
if unknown_patterns_multi: if unknown_patterns_multi:
print(f'{inputFilePath} - {translationCatalog.locale}: ' print(
f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns_multi])} in the pluralised translation ' f'{inputFilePath} - {translationCatalog.locale}: '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in pluralUrls])}') f'Found unknown {self.human_name} {", ".join(["`" + x + "`" for x in unknown_patterns_multi])} in the pluralised translation '
f'which do not match any of the URLs in the template: {", ".join(["`" + x + "`" for x in pluralUrls])}'
)
def check_translations(inputFilePath): def check_translations(inputFilePath):
if VERBOSE: if VERBOSE:
@ -100,23 +118,29 @@ def check_translations(inputFilePath):
def main(): def main():
print("\n\tWARNING: Remember to regenerate the POT files with “updateTemplates.py” " print(
"before you run this script.\n\tPOT files are not in the repository.\n") "\n\tWARNING: Remember to regenerate the POT files with “updateTemplates.py” "
"before you run this script.\n\tPOT files are not in the repository.\n"
)
foundPots = 0 foundPots = 0
for root, folders, filenames in os.walk(projectRootDirectory): for root, folders, filenames in os.walk(projectRootDirectory):
for filename in filenames: for filename in filenames:
if len(filename) > 4 and filename[-4:] == ".pot" and os.path.basename(root) == l10nFolderName: if (
len(filename) > 4
and filename[-4:] == ".pot"
and os.path.basename(root) == l10nFolderName
):
foundPots += 1 foundPots += 1
multiprocessing.Process( multiprocessing.Process(
target=check_translations, target=check_translations, args=(os.path.join(root, filename),)
args=(os.path.join(root, filename), )
).start() ).start()
if foundPots == 0: if foundPots == 0:
print( print(
"This script did not work because no '.pot' files were found. " "This script did not work because no '.pot' files were found. "
"Please run 'updateTemplates.py' to generate the '.pot' files, " "Please run 'updateTemplates.py' to generate the '.pot' files, "
"and run 'pullTranslations.py' to pull the latest translations from Transifex. " "and run 'pullTranslations.py' to pull the latest translations from Transifex. "
"Then you can run this script to check for spam in translations.") "Then you can run this script to check for spam in translations."
)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -26,10 +26,15 @@ However that needs to be fixed on the transifex side, see rP25896. For now
strip the e-mails using this script. strip the e-mails using this script.
""" """
import sys, os, glob, re, fileinput import sys
import os
import glob
import re
import fileinput
from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory
def main(): def main():
translatorMatch = re.compile(r"^(#\s+[^,<]*)\s+<.*>(.*)") translatorMatch = re.compile(r"^(#\s+[^,<]*)\s+<.*>(.*)")
lastTranslatorMatch = re.compile(r"^(\"Last-Translator:[^,<]*)\s+<.*>(.*)") lastTranslatorMatch = re.compile(r"^(\"Last-Translator:[^,<]*)\s+<.*>(.*)")
@ -43,7 +48,9 @@ def main():
for file in files: for file in files:
usernames = [] usernames = []
reached = False reached = False
for line in fileinput.input(file.replace("\\", "/"), inplace=True, encoding="utf-8"): for line in fileinput.input(
file.replace("\\", "/"), inplace=True, encoding="utf-8"
):
if reached: if reached:
if line == "# \n": if line == "# \n":
line = "" line = ""
@ -61,5 +68,6 @@ def main():
reached = True reached = True
sys.stdout.write(line) sys.stdout.write(line)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -29,7 +29,9 @@ Translatable strings will be extracted from the generated file, so this should b
once before updateTemplates.py. once before updateTemplates.py.
""" """
import json, os, re import json
import os
import re
from collections import defaultdict from collections import defaultdict
from pathlib import Path from pathlib import Path
@ -44,13 +46,23 @@ for root, folders, filenames in os.walk(projectRootDirectory):
if os.path.exists(os.path.join(root, folder, transifexClientFolder)): if os.path.exists(os.path.join(root, folder, transifexClientFolder)):
poLocations.append(os.path.join(root, folder)) poLocations.append(os.path.join(root, folder))
creditsLocation = os.path.join(projectRootDirectory, 'binaries', 'data', 'mods', 'public', 'gui', 'credits', 'texts', 'translators.json') creditsLocation = os.path.join(
projectRootDirectory,
"binaries",
"data",
"mods",
"public",
"gui",
"credits",
"texts",
"translators.json",
)
# This dictionary will hold creditors lists for each language, indexed by code # This dictionary will hold creditors lists for each language, indexed by code
langsLists = defaultdict(list) langsLists = defaultdict(list)
# Create the new JSON data # Create the new JSON data
newJSONData = {'Title': 'Translators', 'Content': []} newJSONData = {"Title": "Translators", "Content": []}
# Now go through the list of languages and search the .po files for people # Now go through the list of languages and search the .po files for people
@ -60,7 +72,7 @@ deletedUsernameMatch = re.compile(r"[0-9a-f]{32}(_[0-9a-f]{7})?")
# Search # Search
for location in poLocations: for location in poLocations:
files = Path(location).glob('*.po') files = Path(location).glob("*.po")
for file in files: for file in files:
lang = file.stem.split(".")[0] lang = file.stem.split(".")[0]
@ -69,7 +81,7 @@ for location in poLocations:
if lang == "debug" or lang == "long": if lang == "debug" or lang == "long":
continue continue
with file.open(encoding='utf-8') as poFile: with file.open(encoding="utf-8") as poFile:
reached = False reached = False
for line in poFile: for line in poFile:
if reached: if reached:
@ -80,7 +92,7 @@ for location in poLocations:
username = m.group(1) username = m.group(1)
if not deletedUsernameMatch.fullmatch(username): if not deletedUsernameMatch.fullmatch(username):
langsLists[lang].append(username) langsLists[lang].append(username)
if line.strip() == '# Translators:': if line.strip() == "# Translators:":
reached = True reached = True
# Sort translator names and remove duplicates # Sort translator names and remove duplicates
@ -100,18 +112,18 @@ for langCode, langList in sorted(langsLists.items()):
try: try:
lang_name = Locale.parse(langCode).english_name lang_name = Locale.parse(langCode).english_name
except UnknownLocaleError: except UnknownLocaleError:
lang_name = Locale.parse('en').languages.get(langCode) lang_name = Locale.parse("en").languages.get(langCode)
if not lang_name: if not lang_name:
raise raise
translators = [{'name': name} for name in langList] translators = [{"name": name} for name in langList]
newJSONData['Content'].append({'LangName': lang_name, 'List': translators}) newJSONData["Content"].append({"LangName": lang_name, "List": translators})
# Sort languages by their English names # Sort languages by their English names
newJSONData['Content'] = sorted(newJSONData['Content'], key=lambda x: x['LangName']) newJSONData["Content"] = sorted(newJSONData["Content"], key=lambda x: x["LangName"])
# Save the JSON data to the credits file # Save the JSON data to the credits file
creditsFile = open(creditsLocation, 'w', encoding='utf-8') creditsFile = open(creditsLocation, "w", encoding="utf-8")
json.dump(newJSONData, creditsFile, indent=4) json.dump(newJSONData, creditsFile, indent=4)
creditsFile.close() creditsFile.close()

View File

@ -20,14 +20,17 @@
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs, re, os, sys import codecs
import re
import os
import sys
import json as jsonParser import json as jsonParser
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from textwrap import dedent from textwrap import dedent
def pathmatch(mask, path): def pathmatch(mask, path):
""" Matches paths to a mask, where the mask supports * and **. """Matches paths to a mask, where the mask supports * and **.
Paths use / as the separator Paths use / as the separator
* matches a sequence of characters without /. * matches a sequence of characters without /.
@ -45,13 +48,11 @@ def pathmatch(mask, path):
else: else:
p = p + re.escape(s[i]) p = p + re.escape(s[i])
p = p + "$" p = p + "$"
return re.match(p, path) != None return re.match(p, path) is not None
class Extractor(object): class Extractor(object):
def __init__(self, directoryPath, filemasks, options): def __init__(self, directoryPath, filemasks, options):
self.directoryPath = directoryPath self.directoryPath = directoryPath
self.options = options self.options = options
@ -62,9 +63,8 @@ class Extractor(object):
self.includeMasks = filemasks self.includeMasks = filemasks
self.excludeMasks = [] self.excludeMasks = []
def run(self): def run(self):
""" Extracts messages. """Extracts messages.
:return: An iterator over ``(message, plural, context, (location, pos), comment)`` tuples. :return: An iterator over ``(message, plural, context, (location, pos), comment)`` tuples.
:rtype: ``iterator`` :rtype: ``iterator``
@ -73,12 +73,14 @@ class Extractor(object):
directoryAbsolutePath = os.path.abspath(self.directoryPath) directoryAbsolutePath = os.path.abspath(self.directoryPath)
for root, folders, filenames in os.walk(directoryAbsolutePath): for root, folders, filenames in os.walk(directoryAbsolutePath):
for subdir in folders: for subdir in folders:
if subdir.startswith('.') or subdir.startswith('_'): if subdir.startswith(".") or subdir.startswith("_"):
folders.remove(subdir) folders.remove(subdir)
folders.sort() folders.sort()
filenames.sort() filenames.sort()
for filename in filenames: for filename in filenames:
filename = os.path.relpath(os.path.join(root, filename), self.directoryPath).replace(os.sep, '/') filename = os.path.relpath(
os.path.join(root, filename), self.directoryPath
).replace(os.sep, "/")
for filemask in self.excludeMasks: for filemask in self.excludeMasks:
if pathmatch(filemask, filename): if pathmatch(filemask, filename):
break break
@ -86,7 +88,13 @@ class Extractor(object):
for filemask in self.includeMasks: for filemask in self.includeMasks:
if pathmatch(filemask, filename): if pathmatch(filemask, filename):
filepath = os.path.join(directoryAbsolutePath, filename) filepath = os.path.join(directoryAbsolutePath, filename)
for message, plural, context, position, comments in self.extractFromFile(filepath): for (
message,
plural,
context,
position,
comments,
) in self.extractFromFile(filepath):
if empty_string_pattern.match(message): if empty_string_pattern.match(message):
continue continue
@ -94,9 +102,8 @@ class Extractor(object):
filename = "\u2068" + filename + "\u2069" filename = "\u2068" + filename + "\u2069"
yield message, plural, context, (filename, position), comments yield message, plural, context, (filename, position), comments
def extractFromFile(self, filepath): def extractFromFile(self, filepath):
""" Extracts messages from a specific file. """Extracts messages from a specific file.
:return: An iterator over ``(message, plural, context, position, comments)`` tuples. :return: An iterator over ``(message, plural, context, position, comments)`` tuples.
:rtype: ``iterator`` :rtype: ``iterator``
@ -104,17 +111,17 @@ class Extractor(object):
pass pass
class javascript(Extractor): class javascript(Extractor):
""" Extract messages from JavaScript source code. """Extract messages from JavaScript source code."""
"""
empty_msgid_warning = ( '%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") ' empty_msgid_warning = (
'returns the header entry with meta information, not the empty string.' ) '%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
"returns the header entry with meta information, not the empty string."
)
def extractJavascriptFromFile(self, fileObject): def extractJavascriptFromFile(self, fileObject):
from babel.messages.jslexer import tokenize, unquote_string from babel.messages.jslexer import tokenize, unquote_string
funcname = message_lineno = None funcname = message_lineno = None
messages = [] messages = []
last_argument = None last_argument = None
@ -122,21 +129,21 @@ class javascript(Extractor):
concatenate_next = False concatenate_next = False
last_token = None last_token = None
call_stack = -1 call_stack = -1
comment_tags = self.options.get('commentTags', []) comment_tags = self.options.get("commentTags", [])
keywords = self.options.get('keywords', {}).keys() keywords = self.options.get("keywords", {}).keys()
for token in tokenize(fileObject.read(), dotted=False): for token in tokenize(fileObject.read(), dotted=False):
if token.type == 'operator' and \ if token.type == "operator" and (
(token.value == '(' or (call_stack != -1 and \ token.value == "("
(token.value == '[' or token.value == '{'))): or (call_stack != -1 and (token.value == "[" or token.value == "{"))
):
if funcname: if funcname:
message_lineno = token.lineno message_lineno = token.lineno
call_stack += 1 call_stack += 1
elif call_stack == -1 and token.type == 'linecomment': elif call_stack == -1 and token.type == "linecomment":
value = token.value[2:].strip() value = token.value[2:].strip()
if translator_comments and \ if translator_comments and translator_comments[-1][0] == token.lineno - 1:
translator_comments[-1][0] == token.lineno - 1:
translator_comments.append((token.lineno, value)) translator_comments.append((token.lineno, value))
continue continue
@ -145,7 +152,7 @@ class javascript(Extractor):
translator_comments.append((token.lineno, value.strip())) translator_comments.append((token.lineno, value.strip()))
break break
elif token.type == 'multilinecomment': elif token.type == "multilinecomment":
# only one multi-line comment may preceed a translation # only one multi-line comment may preceed a translation
translator_comments = [] translator_comments = []
value = token.value[2:-2].strip() value = token.value[2:-2].strip()
@ -154,14 +161,13 @@ class javascript(Extractor):
lines = value.splitlines() lines = value.splitlines()
if lines: if lines:
lines[0] = lines[0].strip() lines[0] = lines[0].strip()
lines[1:] = dedent('\n'.join(lines[1:])).splitlines() lines[1:] = dedent("\n".join(lines[1:])).splitlines()
for offset, line in enumerate(lines): for offset, line in enumerate(lines):
translator_comments.append((token.lineno + offset, translator_comments.append((token.lineno + offset, line))
line))
break break
elif funcname and call_stack == 0: elif funcname and call_stack == 0:
if token.type == 'operator' and token.value == ')': if token.type == "operator" and token.value == ")":
if last_argument is not None: if last_argument is not None:
messages.append(last_argument) messages.append(last_argument)
if len(messages) > 1: if len(messages) > 1:
@ -173,13 +179,16 @@ class javascript(Extractor):
# Comments don't apply unless they immediately precede the # Comments don't apply unless they immediately precede the
# message # message
if translator_comments and \ if translator_comments and translator_comments[-1][0] < message_lineno - 1:
translator_comments[-1][0] < message_lineno - 1:
translator_comments = [] translator_comments = []
if messages is not None: if messages is not None:
yield (message_lineno, funcname, messages, yield (
[comment[1] for comment in translator_comments]) message_lineno,
funcname,
messages,
[comment[1] for comment in translator_comments],
)
funcname = message_lineno = last_argument = None funcname = message_lineno = last_argument = None
concatenate_next = False concatenate_next = False
@ -187,47 +196,54 @@ class javascript(Extractor):
messages = [] messages = []
call_stack = -1 call_stack = -1
elif token.type == 'string': elif token.type == "string":
new_value = unquote_string(token.value) new_value = unquote_string(token.value)
if concatenate_next: if concatenate_next:
last_argument = (last_argument or '') + new_value last_argument = (last_argument or "") + new_value
concatenate_next = False concatenate_next = False
else: else:
last_argument = new_value last_argument = new_value
elif token.type == 'operator': elif token.type == "operator":
if token.value == ',': if token.value == ",":
if last_argument is not None: if last_argument is not None:
messages.append(last_argument) messages.append(last_argument)
last_argument = None last_argument = None
else: else:
messages.append(None) messages.append(None)
concatenate_next = False concatenate_next = False
elif token.value == '+': elif token.value == "+":
concatenate_next = True concatenate_next = True
elif call_stack > 0 and token.type == 'operator' and \ elif (
(token.value == ')' or token.value == ']' or token.value == '}'): call_stack > 0
and token.type == "operator"
and (token.value == ")" or token.value == "]" or token.value == "}")
):
call_stack -= 1 call_stack -= 1
elif funcname and call_stack == -1: elif funcname and call_stack == -1:
funcname = None funcname = None
elif call_stack == -1 and token.type == 'name' and \ elif (
token.value in keywords and \ call_stack == -1
(last_token is None or last_token.type != 'name' or and token.type == "name"
last_token.value != 'function'): and token.value in keywords
and (
last_token is None
or last_token.type != "name"
or last_token.value != "function"
)
):
funcname = token.value funcname = token.value
last_token = token last_token = token
def extractFromFile(self, filepath): def extractFromFile(self, filepath):
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject:
with codecs.open(filepath, 'r', encoding='utf-8-sig') as fileObject:
for lineno, funcname, messages, comments in self.extractJavascriptFromFile(fileObject): for lineno, funcname, messages, comments in self.extractJavascriptFromFile(fileObject):
if funcname: if funcname:
spec = self.options.get('keywords', {})[funcname] or (1,) spec = self.options.get("keywords", {})[funcname] or (1,)
else: else:
spec = (1,) spec = (1,)
if not isinstance(messages, (list, tuple)): if not isinstance(messages, (list, tuple)):
@ -265,8 +281,10 @@ class javascript(Extractor):
first_msg_index = spec[0] - 1 first_msg_index = spec[0] - 1
if not messages[first_msg_index]: if not messages[first_msg_index]:
# An empty string msgid isn't valid, emit a warning # An empty string msgid isn't valid, emit a warning
where = '%s:%i' % (hasattr(fileObject, 'name') and \ where = "%s:%i" % (
fileObject.name or '(unknown)', lineno) hasattr(fileObject, "name") and fileObject.name or "(unknown)",
lineno,
)
print(self.empty_msgid_warning % where, file=sys.stderr) print(self.empty_msgid_warning % where, file=sys.stderr)
continue continue
@ -279,20 +297,17 @@ class javascript(Extractor):
yield message, plural, context, lineno, comments yield message, plural, context, lineno, comments
class cpp(javascript): class cpp(javascript):
""" Extract messages from C++ source code. """Extract messages from C++ source code."""
"""
pass pass
class txt(Extractor): class txt(Extractor):
""" Extract messages from plain text files. """Extract messages from plain text files."""
"""
def extractFromFile(self, filepath): def extractFromFile(self, filepath):
with codecs.open(filepath, "r", encoding='utf-8-sig') as fileObject: with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject:
lineno = 0 lineno = 0
for line in [line.strip("\n\r") for line in fileObject.readlines()]: for line in [line.strip("\n\r") for line in fileObject.readlines()]:
lineno += 1 lineno += 1
@ -300,10 +315,8 @@ class txt(Extractor):
yield line, None, None, lineno, [] yield line, None, None, lineno, []
class json(Extractor): class json(Extractor):
""" Extract messages from JSON files. """Extract messages from JSON files."""
"""
def __init__(self, directoryPath=None, filemasks=[], options={}): def __init__(self, directoryPath=None, filemasks=[], options={}):
super(json, self).__init__(directoryPath, filemasks, options) super(json, self).__init__(directoryPath, filemasks, options)
@ -318,7 +331,7 @@ class json(Extractor):
self.comments = self.options.get("comments", []) self.comments = self.options.get("comments", [])
def extractFromFile(self, filepath): def extractFromFile(self, filepath):
with codecs.open(filepath, "r", 'utf-8') as fileObject: with codecs.open(filepath, "r", "utf-8") as fileObject:
for message, context in self.extractFromString(fileObject.read()): for message, context in self.extractFromString(fileObject.read()):
yield message, None, context, None, self.comments yield message, None, context, None, self.comments
@ -326,14 +339,16 @@ class json(Extractor):
jsonDocument = jsonParser.loads(string) jsonDocument = jsonParser.loads(string)
if isinstance(jsonDocument, list): if isinstance(jsonDocument, list):
for message, context in self.parseList(jsonDocument): for message, context in self.parseList(jsonDocument):
if message: # Skip empty strings. if message: # Skip empty strings.
yield message, context yield message, context
elif isinstance(jsonDocument, dict): elif isinstance(jsonDocument, dict):
for message, context in self.parseDictionary(jsonDocument): for message, context in self.parseDictionary(jsonDocument):
if message: # Skip empty strings. if message: # Skip empty strings.
yield message, context yield message, context
else: else:
raise Exception("Unexpected JSON document parent structure (not a list or a dictionary). You must extend the JSON extractor to support it.") raise Exception(
"Unexpected JSON document parent structure (not a list or a dictionary). You must extend the JSON extractor to support it."
)
def parseList(self, itemsList): def parseList(self, itemsList):
index = 0 index = 0
@ -356,8 +371,13 @@ class json(Extractor):
yield message, context yield message, context
elif isinstance(dictionary[keyword], dict): elif isinstance(dictionary[keyword], dict):
extract = None extract = None
if "extractFromInnerKeys" in self.keywords[keyword] and self.keywords[keyword]["extractFromInnerKeys"]: if (
for message, context in self.extractDictionaryInnerKeys(dictionary[keyword], keyword): "extractFromInnerKeys" in self.keywords[keyword]
and self.keywords[keyword]["extractFromInnerKeys"]
):
for message, context in self.extractDictionaryInnerKeys(
dictionary[keyword], keyword
):
yield message, context yield message, context
else: else:
extract = self.extractDictionary(dictionary[keyword], keyword) extract = self.extractDictionary(dictionary[keyword], keyword)
@ -386,7 +406,7 @@ class json(Extractor):
if isinstance(listItem, str): if isinstance(listItem, str):
yield self.extractString(listItem, keyword) yield self.extractString(listItem, keyword)
elif isinstance(listItem, dict): elif isinstance(listItem, dict):
extract = self.extractDictionary(dictionary[keyword], keyword) extract = self.extractDictionary(listItem[keyword], keyword)
if extract: if extract:
yield extract yield extract
index += 1 index += 1
@ -420,8 +440,7 @@ class json(Extractor):
class xml(Extractor): class xml(Extractor):
""" Extract messages from XML files. """Extract messages from XML files."""
"""
def __init__(self, directoryPath, filemasks, options): def __init__(self, directoryPath, filemasks, options):
super(xml, self).__init__(directoryPath, filemasks, options) super(xml, self).__init__(directoryPath, filemasks, options)
@ -435,7 +454,8 @@ class xml(Extractor):
def extractFromFile(self, filepath): def extractFromFile(self, filepath):
from lxml import etree from lxml import etree
with codecs.open(filepath, "r", encoding='utf-8-sig') as fileObject:
with codecs.open(filepath, "r", encoding="utf-8-sig") as fileObject:
xmlDocument = etree.parse(fileObject) xmlDocument = etree.parse(fileObject)
for keyword in self.keywords: for keyword in self.keywords:
for element in xmlDocument.iter(keyword): for element in xmlDocument.iter(keyword):
@ -457,7 +477,9 @@ class xml(Extractor):
context = self.keywords[keyword]["customContext"] context = self.keywords[keyword]["customContext"]
if "comment" in element.attrib: if "comment" in element.attrib:
comment = element.get("comment") comment = element.get("comment")
comment = u" ".join(comment.split()) # Remove tabs, line breaks and unecessary spaces. comment = " ".join(
comment.split()
) # Remove tabs, line breaks and unecessary spaces.
comments.append(comment) comments.append(comment)
if "splitOnWhitespace" in self.keywords[keyword]: if "splitOnWhitespace" in self.keywords[keyword]:
for splitText in element.text.split(): for splitText in element.text.split():
@ -470,21 +492,22 @@ class xml(Extractor):
# Hack from http://stackoverflow.com/a/2819788 # Hack from http://stackoverflow.com/a/2819788
class FakeSectionHeader(object): class FakeSectionHeader(object):
def __init__(self, fp): def __init__(self, fp):
self.fp = fp self.fp = fp
self.sechead = '[root]\n' self.sechead = "[root]\n"
def readline(self): def readline(self):
if self.sechead: if self.sechead:
try: return self.sechead try:
finally: self.sechead = None return self.sechead
else: return self.fp.readline() finally:
self.sechead = None
else:
return self.fp.readline()
class ini(Extractor): class ini(Extractor):
""" Extract messages from INI files. """Extract messages from INI files."""
"""
def __init__(self, directoryPath, filemasks, options): def __init__(self, directoryPath, filemasks, options):
super(ini, self).__init__(directoryPath, filemasks, options) super(ini, self).__init__(directoryPath, filemasks, options)
@ -492,6 +515,7 @@ class ini(Extractor):
def extractFromFile(self, filepath): def extractFromFile(self, filepath):
import ConfigParser import ConfigParser
config = ConfigParser.RawConfigParser() config = ConfigParser.RawConfigParser()
config.readfp(FakeSectionHeader(open(filepath))) config.readfp(FakeSectionHeader(open(filepath)))
for keyword in self.keywords: for keyword in self.keywords:

View File

@ -26,16 +26,16 @@ from i18n_helper.catalog import Catalog
from i18n_helper.globber import getCatalogs from i18n_helper.globber import getCatalogs
DEBUG_PREFIX = 'X_X ' DEBUG_PREFIX = "X_X "
def generate_long_strings(root_path, input_file_name, output_file_name, languages=None): def generate_long_strings(root_path, input_file_name, output_file_name, languages=None):
""" """
Generate the 'long strings' debug catalog. Generate the 'long strings' debug catalog.
This catalog contains the longest singular and plural string, This catalog contains the longest singular and plural string,
found amongst all translated languages or a filtered subset. found amongst all translated languages or a filtered subset.
It can be used to check if GUI elements are large enough. It can be used to check if GUI elements are large enough.
The catalog is long.*.po The catalog is long.*.po
""" """
print("Generating", output_file_name) print("Generating", output_file_name)
input_file_path = os.path.join(root_path, input_file_name) input_file_path = os.path.join(root_path, input_file_name)
@ -48,8 +48,11 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
# Fill catalog with English strings. # Fill catalog with English strings.
for message in template_catalog: for message in template_catalog:
long_string_catalog.add( long_string_catalog.add(
id=message.id, string=message.id, context=message.context, id=message.id,
auto_comments=message.auto_comments) string=message.id,
context=message.context,
auto_comments=message.auto_comments,
)
# Load existing translation catalogs. # Load existing translation catalogs.
existing_translation_catalogs = getCatalogs(input_file_path, languages) existing_translation_catalogs = getCatalogs(input_file_path, languages)
@ -58,18 +61,23 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
for translation_catalog in existing_translation_catalogs: for translation_catalog in existing_translation_catalogs:
for long_string_catalog_message in long_string_catalog: for long_string_catalog_message in long_string_catalog:
translation_message = translation_catalog.get( translation_message = translation_catalog.get(
long_string_catalog_message.id, long_string_catalog_message.context) long_string_catalog_message.id, long_string_catalog_message.context
)
if not translation_message or not translation_message.string: if not translation_message or not translation_message.string:
continue continue
if not long_string_catalog_message.pluralizable or not translation_message.pluralizable: if (
not long_string_catalog_message.pluralizable
or not translation_message.pluralizable
):
if len(translation_message.string) > len(long_string_catalog_message.string): if len(translation_message.string) > len(long_string_catalog_message.string):
long_string_catalog_message.string = translation_message.string long_string_catalog_message.string = translation_message.string
continue continue
longest_singular_string = translation_message.string[0] longest_singular_string = translation_message.string[0]
longest_plural_string = translation_message.string[1 if len( longest_plural_string = translation_message.string[
translation_message.string) > 1 else 0] 1 if len(translation_message.string) > 1 else 0
]
candidate_singular_string = long_string_catalog_message.string[0] candidate_singular_string = long_string_catalog_message.string[0]
# There might be between 0 and infinite plural forms. # There might be between 0 and infinite plural forms.
@ -88,17 +96,19 @@ def generate_long_strings(root_path, input_file_name, output_file_name, language
if changed: if changed:
long_string_catalog_message.string = [ long_string_catalog_message.string = [
longest_singular_string, longest_plural_string] longest_singular_string,
longest_plural_string,
]
translation_message = long_string_catalog_message translation_message = long_string_catalog_message
long_string_catalog.writeTo(output_file_path) long_string_catalog.writeTo(output_file_path)
def generate_debug(root_path, input_file_name, output_file_name): def generate_debug(root_path, input_file_name, output_file_name):
""" """
Generate a debug catalog to identify untranslated strings. Generate a debug catalog to identify untranslated strings.
This prefixes all strings with DEBUG_PREFIX, to easily identify This prefixes all strings with DEBUG_PREFIX, to easily identify
untranslated strings while still making the game navigable. untranslated strings while still making the game navigable.
The catalog is debug.*.po The catalog is debug.*.po
""" """
print("Generating", output_file_name) print("Generating", output_file_name)
input_file_path = os.path.join(root_path, input_file_name) input_file_path = os.path.join(root_path, input_file_name)
@ -114,28 +124,34 @@ def generate_debug(root_path, input_file_name, output_file_name):
id=message.id, id=message.id,
string=(DEBUG_PREFIX + message.id[0],), string=(DEBUG_PREFIX + message.id[0],),
context=message.context, context=message.context,
auto_comments=message.auto_comments) auto_comments=message.auto_comments,
)
else: else:
out_catalog.add( out_catalog.add(
id=message.id, id=message.id,
string=DEBUG_PREFIX + message.id, string=DEBUG_PREFIX + message.id,
context=message.context, context=message.context,
auto_comments=message.auto_comments) auto_comments=message.auto_comments,
)
out_catalog.writeTo(output_file_path) out_catalog.writeTo(output_file_path)
def main(): def main():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--debug", parser.add_argument(
help="Generate debug localisation to identify non-translated strings.", "--debug",
action="store_true") help="Generate debug localisation to identify non-translated strings.",
parser.add_argument("--long", action="store_true",
help="Generate 'long strings' localisation to identify GUI elements too small.", )
action="store_true") parser.add_argument(
parser.add_argument("--languages", "--long",
nargs="+", help="Generate 'long strings' localisation to identify GUI elements too small.",
help="For long strings, restrict to these languages") action="store_true",
)
parser.add_argument(
"--languages", nargs="+", help="For long strings, restrict to these languages"
)
args = parser.parse_args() args = parser.parse_args()
if not args.debug and not args.long: if not args.debug and not args.long:
@ -145,24 +161,28 @@ def main():
found_pot_files = 0 found_pot_files = 0
for root, _, filenames in os.walk(projectRootDirectory): for root, _, filenames in os.walk(projectRootDirectory):
for filename in filenames: for filename in filenames:
if len(filename) > 4 and filename[-4:] == ".pot" and os.path.basename(root) == l10nFolderName: if (
len(filename) > 4
and filename[-4:] == ".pot"
and os.path.basename(root) == l10nFolderName
):
found_pot_files += 1 found_pot_files += 1
if args.debug: if args.debug:
multiprocessing.Process( multiprocessing.Process(
target=generate_debug, target=generate_debug, args=(root, filename, "debug." + filename[:-1])
args=(root, filename, "debug." + filename[:-1])
).start() ).start()
if args.long: if args.long:
multiprocessing.Process( multiprocessing.Process(
target=generate_long_strings, target=generate_long_strings,
args=(root, filename, "long." + args=(root, filename, "long." + filename[:-1], args.languages),
filename[:-1], args.languages)
).start() ).start()
if found_pot_files == 0: if found_pot_files == 0:
print("This script did not work because no ‘.pot’ files were found. " print(
"Please, run ‘updateTemplates.py’ to generate the ‘.pot’ files, and run ‘pullTranslations.py’ to pull the latest translations from Transifex. " "This script did not work because no ‘.pot’ files were found. "
"Then you can run this script to generate ‘.po’ files with obvious debug strings.") "Please, run ‘updateTemplates.py’ to generate the ‘.pot’ files, and run ‘pullTranslations.py’ to pull the latest translations from Transifex. "
"Then you can run this script to generate ‘.po’ files with obvious debug strings."
)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -3,4 +3,6 @@ import os
l10nFolderName = "l10n" l10nFolderName = "l10n"
transifexClientFolder = ".tx" transifexClientFolder = ".tx"
l10nToolsDirectory = os.path.dirname(os.path.realpath(__file__)) l10nToolsDirectory = os.path.dirname(os.path.realpath(__file__))
projectRootDirectory = os.path.abspath(os.path.join(l10nToolsDirectory, os.pardir, os.pardir, os.pardir, os.pardir)) projectRootDirectory = os.path.abspath(
os.path.join(l10nToolsDirectory, os.pardir, os.pardir, os.pardir, os.pardir)
)

View File

@ -1,14 +1,19 @@
"""Wrapper around babel Catalog / .po handling""" """Wrapper around babel Catalog / .po handling"""
from datetime import datetime from datetime import datetime
from babel.messages.catalog import Catalog as BabelCatalog from babel.messages.catalog import Catalog as BabelCatalog
from babel.messages.pofile import read_po, write_po from babel.messages.pofile import read_po, write_po
class Catalog(BabelCatalog): class Catalog(BabelCatalog):
"""Wraps a BabelCatalog for convenience.""" """Wraps a BabelCatalog for convenience."""
def __init__(self, *args, project=None, copyright_holder=None, **other_kwargs): def __init__(self, *args, project=None, copyright_holder=None, **other_kwargs):
date = datetime.now() date = datetime.now()
super().__init__(*args, header_comment=( super().__init__(
*args,
header_comment=(
f"# Translation template for {project}.\n" f"# Translation template for {project}.\n"
f"# Copyright (C) {date.year} {copyright_holder}\n" f"# Copyright (C) {date.year} {copyright_holder}\n"
f"# This file is distributed under the same license as the {project} project." f"# This file is distributed under the same license as the {project} project."
@ -18,7 +23,8 @@ class Catalog(BabelCatalog):
charset="utf-8", charset="utf-8",
creation_date=date, creation_date=date,
revision_date=date, revision_date=date,
**other_kwargs) **other_kwargs,
)
self._project = project self._project = project
@BabelCatalog.mime_headers.getter @BabelCatalog.mime_headers.getter
@ -31,14 +37,15 @@ class Catalog(BabelCatalog):
"MIME-Version", "MIME-Version",
"Content-Type", "Content-Type",
"Content-Transfer-Encoding", "Content-Transfer-Encoding",
"Plural-Forms"}: "Plural-Forms",
}:
headers.append((name, value)) headers.append((name, value))
return [('Project-Id-Version', self._project)] + headers return [("Project-Id-Version", self._project)] + headers
@staticmethod @staticmethod
def readFrom(file_path, locale = None): def readFrom(file_path, locale=None):
return read_po(open(file_path, "r+",encoding="utf-8"), locale=locale) return read_po(open(file_path, "r+", encoding="utf-8"), locale=locale)
def writeTo(self, file_path): def writeTo(self, file_path):
return write_po( return write_po(

View File

@ -1,10 +1,12 @@
"""Utils to list .po""" """Utils to list .po"""
import os import os
from typing import List from typing import List
from i18n_helper.catalog import Catalog from i18n_helper.catalog import Catalog
def getCatalogs(inputFilePath, filters : List[str] = None) -> List[Catalog]:
def getCatalogs(inputFilePath, filters: List[str] = None) -> List[Catalog]:
"""Returns a list of "real" catalogs (.po) in the given folder.""" """Returns a list of "real" catalogs (.po) in the given folder."""
existingTranslationCatalogs = [] existingTranslationCatalogs = []
l10nFolderPath = os.path.dirname(inputFilePath) l10nFolderPath = os.path.dirname(inputFilePath)
@ -17,6 +19,9 @@ def getCatalogs(inputFilePath, filters : List[str] = None) -> List[Catalog]:
continue continue
if not filters or filename.split(".")[0] in filters: if not filters or filename.split(".")[0] in filters:
existingTranslationCatalogs.append( existingTranslationCatalogs.append(
Catalog.readFrom(os.path.join(l10nFolderPath, filename), locale=filename.split('.')[0])) Catalog.readFrom(
os.path.join(l10nFolderPath, filename), locale=filename.split(".")[0]
)
)
return existingTranslationCatalogs return existingTranslationCatalogs

View File

@ -21,6 +21,7 @@ import subprocess
from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory from i18n_helper import l10nFolderName, transifexClientFolder, projectRootDirectory
def main(): def main():
for root, folders, _ in os.walk(projectRootDirectory): for root, folders, _ in os.walk(projectRootDirectory):
for folder in folders: for folder in folders:

View File

@ -1,11 +1,9 @@
import io import io
import pytest import pytest
from checkDiff import check_diff from checkDiff import check_diff
from unittest import mock
from types import SimpleNamespace
PATCHES = [ PATCHES = [
""" """
Index: binaries/data/l10n/en_GB.engine.po Index: binaries/data/l10n/en_GB.engine.po
=================================================================== ===================================================================
--- binaries/data/l10n/en_GB.engine.po --- binaries/data/l10n/en_GB.engine.po
@ -21,7 +19,7 @@ Index: binaries/data/l10n/en_GB.engine.po
msgid "The incoming stream version is unsupported" msgid "The incoming stream version is unsupported"
""", """,
""" """
Index: binaries/data/l10n/en_GB.engine.po Index: binaries/data/l10n/en_GB.engine.po
=================================================================== ===================================================================
--- binaries/data/l10n/en_GB.engine.po --- binaries/data/l10n/en_GB.engine.po
@ -33,7 +31,7 @@ Index: binaries/data/l10n/en_GB.engine.po
msgid "Stream error" msgid "Stream error"
msgstr "Stream error" msgstr "Stream error"
""", """,
""" """
Index: binaries/data/l10n/en_GB.engine.po Index: binaries/data/l10n/en_GB.engine.po
=================================================================== ===================================================================
--- binaries/data/l10n/en_GB.engine.po --- binaries/data/l10n/en_GB.engine.po
@ -65,7 +63,7 @@ Index: binaries/data/l10n/en_GB_3.engine.po
msgid "Stream error" msgid "Stream error"
msgstr "Stream error" msgstr "Stream error"
""", """,
""" """
Index: binaries/data/l10n/bar.engine.po Index: binaries/data/l10n/bar.engine.po
=================================================================== ===================================================================
--- binaries/data/l10n/bar.engine.po --- binaries/data/l10n/bar.engine.po
@ -86,16 +84,17 @@ Index: binaries/data/l10n/bar.engine.po
"Language-Team: Bavarian (http://www.transifex.com/wildfire-games/0ad/language/bar/)\n" "Language-Team: Bavarian (http://www.transifex.com/wildfire-games/0ad/language/bar/)\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n" "Content-Type: text/plain; charset=UTF-8\n"
""" """,
] ]
PATCHES_EXPECT_REVERT = [ PATCHES_EXPECT_REVERT = [
set(), set(),
{"binaries/data/l10n/en_GB.engine.po"}, {"binaries/data/l10n/en_GB.engine.po"},
{"binaries/data/l10n/en_GB.engine.po", "binaries/data/l10n/en_GB_3.engine.po"}, {"binaries/data/l10n/en_GB.engine.po", "binaries/data/l10n/en_GB_3.engine.po"},
{"binaries/data/l10n/bar.engine.po"} {"binaries/data/l10n/bar.engine.po"},
] ]
@pytest.fixture(params=zip(PATCHES, PATCHES_EXPECT_REVERT)) @pytest.fixture(params=zip(PATCHES, PATCHES_EXPECT_REVERT))
def patch(request): def patch(request):
return [io.StringIO(request.param[0]), request.param[1]] return [io.StringIO(request.param[0]), request.param[1]]

View File

@ -16,42 +16,56 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>. # along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
import json, os import json
import os
import multiprocessing import multiprocessing
from importlib import import_module from importlib import import_module
from lxml import etree
from i18n_helper import l10nFolderName, projectRootDirectory from i18n_helper import l10nFolderName, projectRootDirectory
from i18n_helper.catalog import Catalog from i18n_helper.catalog import Catalog
from extractors import extractors
messagesFilename = "messages.json" messagesFilename = "messages.json"
def warnAboutUntouchedMods(): def warnAboutUntouchedMods():
""" """
Warn about mods that are not properly configured to get their messages extracted. Warn about mods that are not properly configured to get their messages extracted.
""" """
modsRootFolder = os.path.join(projectRootDirectory, "binaries", "data", "mods") modsRootFolder = os.path.join(projectRootDirectory, "binaries", "data", "mods")
untouchedMods = {} untouchedMods = {}
for modFolder in os.listdir(modsRootFolder): for modFolder in os.listdir(modsRootFolder):
if modFolder[0] != "_" and modFolder[0] != '.': if modFolder[0] != "_" and modFolder[0] != ".":
if not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName)): if not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName)):
untouchedMods[modFolder] = "There is no '{folderName}' folder in the root folder of this mod.".format(folderName=l10nFolderName) untouchedMods[modFolder] = (
elif not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName, messagesFilename)): "There is no '{folderName}' folder in the root folder of this mod.".format(
untouchedMods[modFolder] = "There is no '{filename}' file within the '{folderName}' folder in the root folder of this mod.".format(folderName=l10nFolderName, filename=messagesFilename) folderName=l10nFolderName
)
)
elif not os.path.exists(
os.path.join(modsRootFolder, modFolder, l10nFolderName, messagesFilename)
):
untouchedMods[modFolder] = (
"There is no '{filename}' file within the '{folderName}' folder in the root folder of this mod.".format(
folderName=l10nFolderName, filename=messagesFilename
)
)
if untouchedMods: if untouchedMods:
print("" print("" "Warning: No messages were extracted from the following mods:" "")
"Warning: No messages were extracted from the following mods:"
"")
for mod in untouchedMods: for mod in untouchedMods:
print("{modName}: {warningMessage}".format(modName=mod, warningMessage=untouchedMods[mod])) print(
print("" "{modName}: {warningMessage}".format(
modName=mod, warningMessage=untouchedMods[mod]
)
)
print(
""
f"For this script to extract messages from a mod folder, this mod folder must contain a '{l10nFolderName}' " f"For this script to extract messages from a mod folder, this mod folder must contain a '{l10nFolderName}' "
f"folder, and this folder must contain a '{messagesFilename}' file that describes how to extract messages for the " f"folder, and this folder must contain a '{messagesFilename}' file that describes how to extract messages for the "
f"mod. See the folder of the main mod ('public') for an example, and see the documentation for more " f"mod. See the folder of the main mod ('public') for an example, and see the documentation for more "
f"information." f"information."
) )
def generatePOT(templateSettings, rootPath): def generatePOT(templateSettings, rootPath):
if "skip" in templateSettings and templateSettings["skip"] == "yes": if "skip" in templateSettings and templateSettings["skip"] == "yes":
@ -64,7 +78,7 @@ def generatePOT(templateSettings, rootPath):
template = Catalog( template = Catalog(
project=templateSettings["project"], project=templateSettings["project"],
copyright_holder=templateSettings["copyrightHolder"], copyright_holder=templateSettings["copyrightHolder"],
locale='en', locale="en",
) )
for rule in templateSettings["rules"]: for rule in templateSettings["rules"]:
@ -72,7 +86,7 @@ def generatePOT(templateSettings, rootPath):
return return
options = rule.get("options", {}) options = rule.get("options", {})
extractorClass = getattr(import_module("extractors.extractors"), rule['extractor']) extractorClass = getattr(import_module("extractors.extractors"), rule["extractor"])
extractor = extractorClass(inputRootPath, rule["filemasks"], options) extractor = extractorClass(inputRootPath, rule["filemasks"], options)
formatFlag = None formatFlag = None
if "format" in options: if "format" in options:
@ -84,31 +98,34 @@ def generatePOT(templateSettings, rootPath):
id=message_id, id=message_id,
context=context, context=context,
auto_comments=comments, auto_comments=comments,
flags=[formatFlag] if formatFlag and message.find("%") != -1 else [] flags=[formatFlag] if formatFlag and message.find("%") != -1 else [],
) )
saved_message.locations.append(location) saved_message.locations.append(location)
saved_message.flags.discard('python-format') saved_message.flags.discard("python-format")
template.writeTo(os.path.join(rootPath, templateSettings["output"])) template.writeTo(os.path.join(rootPath, templateSettings["output"]))
print(u"Generated \"{}\" with {} messages.".format(templateSettings["output"], len(template))) print('Generated "{}" with {} messages.'.format(templateSettings["output"], len(template)))
def generateTemplatesForMessagesFile(messagesFilePath): def generateTemplatesForMessagesFile(messagesFilePath):
with open(messagesFilePath, "r") as fileObject:
with open(messagesFilePath, 'r') as fileObject:
settings = json.load(fileObject) settings = json.load(fileObject)
for templateSettings in settings: for templateSettings in settings:
multiprocessing.Process( multiprocessing.Process(
target=generatePOT, target=generatePOT, args=(templateSettings, os.path.dirname(messagesFilePath))
args=(templateSettings, os.path.dirname(messagesFilePath))
).start() ).start()
def main(): def main():
import argparse import argparse
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--scandir", help="Directory to start scanning for l10n folders in. " parser.add_argument(
"Type '.' for current working directory") "--scandir",
help="Directory to start scanning for l10n folders in. "
"Type '.' for current working directory",
)
args = parser.parse_args() args = parser.parse_args()
for root, folders, filenames in os.walk(args.scandir or projectRootDirectory): for root, folders, filenames in os.walk(args.scandir or projectRootDirectory):
for folder in folders: for folder in folders:

View File

@ -23,118 +23,148 @@
# THE SOFTWARE. # THE SOFTWARE.
import argparse import argparse
import io
import os import os
import struct import struct
import sys import sys
parser = argparse.ArgumentParser(description="Convert maps compatible with 0 A.D. version Alpha XVIII (A18) to maps compatible with version Alpha XIX (A19), or the other way around.") parser = argparse.ArgumentParser(
description="Convert maps compatible with 0 A.D. version Alpha XVIII (A18) to maps compatible with version Alpha XIX (A19), or the other way around."
)
parser.add_argument("--reverse", action="store_true", help="Make an A19 map compatible with A18 (note that conversion will fail if mountains are too high)") parser.add_argument(
parser.add_argument("--no-version-bump", action="store_true", help="Don't change the version number of the map") "--reverse",
parser.add_argument("--no-color-spelling", action="store_true", help="Don't change the spelling of color and colour") action="store_true",
help="Make an A19 map compatible with A18 (note that conversion will fail if mountains are too high)",
)
parser.add_argument(
"--no-version-bump", action="store_true", help="Don't change the version number of the map"
)
parser.add_argument(
"--no-color-spelling",
action="store_true",
help="Don't change the spelling of color and colour",
)
parser.add_argument("--no-height-change", action="store_true", help="Don't change the heightmap") parser.add_argument("--no-height-change", action="store_true", help="Don't change the heightmap")
parser.add_argument("files", nargs="+", help="XML file to process (use wildcards '*' to select multiple files)") parser.add_argument(
"files", nargs="+", help="XML file to process (use wildcards '*' to select multiple files)"
)
args = parser.parse_args() args = parser.parse_args()
HEIGHTMAP_BIT_SHIFT = 3 HEIGHTMAP_BIT_SHIFT = 3
for xmlFile in args.files: for xmlFile in args.files:
pmpFile = xmlFile[:-3] + "pmp" pmpFile = xmlFile[:-3] + "pmp"
print("Processing " + xmlFile + " ...") print("Processing " + xmlFile + " ...")
if os.path.isfile(pmpFile): if os.path.isfile(pmpFile):
with open(pmpFile, "rb") as f1, open(pmpFile + "~", "wb") as f2: with open(pmpFile, "rb") as f1, open(pmpFile + "~", "wb") as f2:
# 4 bytes PSMP to start the file # 4 bytes PSMP to start the file
f2.write(f1.read(4)) f2.write(f1.read(4))
# 4 bytes to encode the version of the file format # 4 bytes to encode the version of the file format
version = struct.unpack("<I", f1.read(4))[0] version = struct.unpack("<I", f1.read(4))[0]
if args.no_version_bump: if args.no_version_bump:
f2.write(struct.pack("<I", version)) f2.write(struct.pack("<I", version))
else: else:
if args.reverse: if args.reverse:
if version != 6: if version != 6:
print("Warning: File " + pmpFile + " was not at version 6, while a negative version bump was requested.\nABORTING ...") print(
continue "Warning: File "
f2.write(struct.pack("<I", version-1)) + pmpFile
else: + " was not at version 6, while a negative version bump was requested.\nABORTING ..."
if version != 5: )
print("Warning: File " + pmpFile + " was not at version 5, while a version bump was requested.\nABORTING ...") continue
continue f2.write(struct.pack("<I", version - 1))
f2.write(struct.pack("<I", version+1)) else:
if version != 5:
print(
"Warning: File "
+ pmpFile
+ " was not at version 5, while a version bump was requested.\nABORTING ..."
)
continue
f2.write(struct.pack("<I", version + 1))
# 4 bytes a for file size (which shouldn't change) # 4 bytes a for file size (which shouldn't change)
f2.write(f1.read(4)) f2.write(f1.read(4))
# 4 bytes to encode the map size # 4 bytes to encode the map size
map_size = struct.unpack("<I", f1.read(4))[0] map_size = struct.unpack("<I", f1.read(4))[0]
f2.write(struct.pack("<I", map_size)) f2.write(struct.pack("<I", map_size))
# half all heights using the shift '>>' operator # half all heights using the shift '>>' operator
if args.no_height_change: if args.no_height_change:
def height_transform(h):
return h
else:
if args.reverse:
def height_transform(h):
return h << HEIGHTMAP_BIT_SHIFT
else:
def height_transform(h):
return h >> HEIGHTMAP_BIT_SHIFT
for i in range(0, (map_size*16+1)*(map_size*16+1)):
height = struct.unpack("<H", f1.read(2))[0]
f2.write(struct.pack("<H", height_transform(height)))
# copy the rest of the file
byte = f1.read(1)
while byte != b"":
f2.write(byte)
byte = f1.read(1)
f2.close() def height_transform(h):
f1.close() return h
else:
if args.reverse:
# replace the old file, comment to see both files def height_transform(h):
os.remove(pmpFile) return h << HEIGHTMAP_BIT_SHIFT
os.rename(pmpFile + "~", pmpFile) else:
def height_transform(h):
return h >> HEIGHTMAP_BIT_SHIFT
if os.path.isfile(xmlFile): for i in range(0, (map_size * 16 + 1) * (map_size * 16 + 1)):
with open(xmlFile, "r") as f1, open(xmlFile + "~", "w") as f2: height = struct.unpack("<H", f1.read(2))[0]
data = f1.read() f2.write(struct.pack("<H", height_transform(height)))
# bump version number (rely on how Atlas formats the XML) # copy the rest of the file
if not args.no_version_bump: byte = f1.read(1)
if args.reverse: while byte != b"":
if data.find('<Scenario version="6">') == -1: f2.write(byte)
print("Warning: File " + xmlFile + " was not at version 6, while a negative version bump was requested.\nABORTING ...") byte = f1.read(1)
sys.exit()
else:
data = data.replace('<Scenario version="6">', '<Scenario version="5">')
else:
if data.find('<Scenario version="5">') == -1:
print("Warning: File " + xmlFile + " was not at version 5, while a version bump was requested.\nABORTING ...")
sys.exit()
else:
data = data.replace('<Scenario version="5">', '<Scenario version="6">')
# transform the color keys f2.close()
if not args.no_color_spelling: f1.close()
if args.reverse:
data = data.replace("color", "colour").replace("Color", "Colour")
else:
data = data.replace("colour", "color").replace("Colour", "Color")
f2.write(data)
f1.close()
f2.close()
# replace the old file, comment to see both files # replace the old file, comment to see both files
os.remove(xmlFile) os.remove(pmpFile)
os.rename(xmlFile + "~", xmlFile) os.rename(pmpFile + "~", pmpFile)
if os.path.isfile(xmlFile):
with open(xmlFile, "r") as f1, open(xmlFile + "~", "w") as f2:
data = f1.read()
# bump version number (rely on how Atlas formats the XML)
if not args.no_version_bump:
if args.reverse:
if data.find('<Scenario version="6">') == -1:
print(
"Warning: File "
+ xmlFile
+ " was not at version 6, while a negative version bump was requested.\nABORTING ..."
)
sys.exit()
else:
data = data.replace('<Scenario version="6">', '<Scenario version="5">')
else:
if data.find('<Scenario version="5">') == -1:
print(
"Warning: File "
+ xmlFile
+ " was not at version 5, while a version bump was requested.\nABORTING ..."
)
sys.exit()
else:
data = data.replace('<Scenario version="5">', '<Scenario version="6">')
# transform the color keys
if not args.no_color_spelling:
if args.reverse:
data = data.replace("color", "colour").replace("Color", "Colour")
else:
data = data.replace("colour", "color").replace("Colour", "Color")
f2.write(data)
f1.close()
f2.close()
# replace the old file, comment to see both files
os.remove(xmlFile)
os.rename(xmlFile + "~", xmlFile)

View File

@ -4,31 +4,36 @@ import zero_ad
# First, we will define some helper functions we will use later. # First, we will define some helper functions we will use later.
import math import math
def dist (p1, p2):
def dist(p1, p2):
return math.sqrt(sum((math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2)))) return math.sqrt(sum((math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2))))
def center(units): def center(units):
sum_position = map(sum, zip(*map(lambda u: u.position(), units))) sum_position = map(sum, zip(*map(lambda u: u.position(), units)))
return [x/len(units) for x in sum_position] return [x / len(units) for x in sum_position]
def closest(units, position): def closest(units, position):
dists = (dist(unit.position(), position) for unit in units) dists = (dist(unit.position(), position) for unit in units)
index = 0 index = 0
min_dist = next(dists) min_dist = next(dists)
for (i, d) in enumerate(dists): for i, d in enumerate(dists):
if d < min_dist: if d < min_dist:
index = i index = i
min_dist = d min_dist = d
return units[index] return units[index]
# Connect to a 0 AD game server listening at localhost:6000 # Connect to a 0 AD game server listening at localhost:6000
game = zero_ad.ZeroAD('http://localhost:6000') game = zero_ad.ZeroAD("http://localhost:6000")
# Load the Arcadia map # Load the Arcadia map
samples_dir = path.dirname(path.realpath(__file__)) samples_dir = path.dirname(path.realpath(__file__))
scenario_config_path = path.join(samples_dir, 'arcadia.json') scenario_config_path = path.join(samples_dir, "arcadia.json")
with open(scenario_config_path, 'r') as f: with open(scenario_config_path, "r") as f:
arcadia_config = f.read() arcadia_config = f.read()
state = game.reset(arcadia_config) state = game.reset(arcadia_config)
@ -37,15 +42,15 @@ state = game.reset(arcadia_config)
state = game.step() state = game.step()
# Units can be queried from the game state # Units can be queried from the game state
citizen_soldiers = state.units(owner=1, type='infantry') citizen_soldiers = state.units(owner=1, type="infantry")
# (including gaia units like trees or other resources) # (including gaia units like trees or other resources)
nearby_tree = closest(state.units(owner=0, type='tree'), center(citizen_soldiers)) nearby_tree = closest(state.units(owner=0, type="tree"), center(citizen_soldiers))
# Action commands can be created using zero_ad.actions # Action commands can be created using zero_ad.actions
collect_wood = zero_ad.actions.gather(citizen_soldiers, nearby_tree) collect_wood = zero_ad.actions.gather(citizen_soldiers, nearby_tree)
female_citizens = state.units(owner=1, type='female_citizen') female_citizens = state.units(owner=1, type="female_citizen")
house_tpl = 'structures/spart/house' house_tpl = "structures/spart/house"
x = 680 x = 680
z = 640 z = 640
build_house = zero_ad.actions.construct(female_citizens, house_tpl, x, z, autocontinue=True) build_house = zero_ad.actions.construct(female_citizens, house_tpl, x, z, autocontinue=True)
@ -58,20 +63,24 @@ female_id = female_citizens[0].id()
female_citizen = state.unit(female_id) female_citizen = state.unit(female_id)
# A variety of unit information can be queried from the unit: # A variety of unit information can be queried from the unit:
print('female citizen\'s max health is', female_citizen.max_health()) print("female citizen's max health is", female_citizen.max_health())
# Raw data for units and game states are available via the data attribute # Raw data for units and game states are available via the data attribute
print(female_citizen.data) print(female_citizen.data)
# Units can be built using the "train action" # Units can be built using the "train action"
civic_center = state.units(owner=1, type="civil_centre")[0] civic_center = state.units(owner=1, type="civil_centre")[0]
spearman_type = 'units/spart/infantry_spearman_b' spearman_type = "units/spart/infantry_spearman_b"
train_spearmen = zero_ad.actions.train([civic_center], spearman_type) train_spearmen = zero_ad.actions.train([civic_center], spearman_type)
state = game.step([train_spearmen]) state = game.step([train_spearmen])
# Let's step the engine until the house has been built # Let's step the engine until the house has been built
is_unit_busy = lambda state, unit_id: len(state.unit(unit_id).data['unitAIOrderData']) > 0 def is_unit_busy(state, unit_id):
return len(state.unit(unit_id).data["unitAIOrderData"]) > 0
while is_unit_busy(state, female_id): while is_unit_busy(state, female_id):
state = game.step() state = game.step()
@ -85,14 +94,16 @@ for _ in range(150):
state = game.step() state = game.step()
# Let's attack with our entire military # Let's attack with our entire military
state = game.step([zero_ad.actions.chat('An attack is coming!')]) state = game.step([zero_ad.actions.chat("An attack is coming!")])
while len(state.units(owner=2, type='unit')) > 0: while len(state.units(owner=2, type="unit")) > 0:
attack_units = [ unit for unit in state.units(owner=1, type='unit') if 'female' not in unit.type() ] attack_units = [
target = closest(state.units(owner=2, type='unit'), center(attack_units)) unit for unit in state.units(owner=1, type="unit") if "female" not in unit.type()
]
target = closest(state.units(owner=2, type="unit"), center(attack_units))
state = game.step([zero_ad.actions.attack(attack_units, target)]) state = game.step([zero_ad.actions.attack(attack_units, target)])
while state.unit(target.id()): while state.unit(target.id()):
state = game.step() state = game.step()
game.step([zero_ad.actions.chat('The enemies have been vanquished. Our home is safe again.')]) game.step([zero_ad.actions.chat("The enemies have been vanquished. Our home is safe again.")])

View File

@ -1,13 +1,14 @@
import os
from setuptools import setup from setuptools import setup
setup(name='zero_ad', setup(
version='0.0.1', name="zero_ad",
description='Python client for 0 AD', version="0.0.1",
url='https://code.wildfiregames.com', description="Python client for 0 AD",
author='Brian Broll', url="https://code.wildfiregames.com",
author_email='brian.broll@gmail.com', author="Brian Broll",
install_requires=[], author_email="brian.broll@gmail.com",
license='MIT', install_requires=[],
packages=['zero_ad'], license="MIT",
zip_safe=False) packages=["zero_ad"],
zip_safe=False,
)

View File

@ -1,35 +1,38 @@
import zero_ad import zero_ad
import json
import math import math
from os import path from os import path
game = zero_ad.ZeroAD('http://localhost:6000') game = zero_ad.ZeroAD("http://localhost:6000")
scriptdir = path.dirname(path.realpath(__file__)) scriptdir = path.dirname(path.realpath(__file__))
with open(path.join(scriptdir, '..', 'samples', 'arcadia.json'), 'r') as f: with open(path.join(scriptdir, "..", "samples", "arcadia.json"), "r") as f:
config = f.read() config = f.read()
def dist (p1, p2):
def dist(p1, p2):
return math.sqrt(sum((math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2)))) return math.sqrt(sum((math.pow(x2 - x1, 2) for (x1, x2) in zip(p1, p2))))
def center(units): def center(units):
sum_position = map(sum, zip(*map(lambda u: u.position(), units))) sum_position = map(sum, zip(*map(lambda u: u.position(), units)))
return [x/len(units) for x in sum_position] return [x / len(units) for x in sum_position]
def closest(units, position): def closest(units, position):
dists = (dist(unit.position(), position) for unit in units) dists = (dist(unit.position(), position) for unit in units)
index = 0 index = 0
min_dist = next(dists) min_dist = next(dists)
for (i, d) in enumerate(dists): for i, d in enumerate(dists):
if d < min_dist: if d < min_dist:
index = i index = i
min_dist = d min_dist = d
return units[index] return units[index]
def test_construct(): def test_construct():
state = game.reset(config) state = game.reset(config)
female_citizens = state.units(owner=1, type='female_citizen') female_citizens = state.units(owner=1, type="female_citizen")
house_tpl = 'structures/spart/house' house_tpl = "structures/spart/house"
house_count = len(state.units(owner=1, type=house_tpl)) house_count = len(state.units(owner=1, type=house_tpl))
x = 680 x = 680
z = 640 z = 640
@ -39,21 +42,23 @@ def test_construct():
while len(state.units(owner=1, type=house_tpl)) == house_count: while len(state.units(owner=1, type=house_tpl)) == house_count:
state = game.step() state = game.step()
def test_gather(): def test_gather():
state = game.reset(config) state = game.reset(config)
female_citizen = state.units(owner=1, type='female_citizen')[0] female_citizen = state.units(owner=1, type="female_citizen")[0]
trees = state.units(owner=0, type='tree') state.units(owner=0, type="tree")
nearby_tree = closest(state.units(owner=0, type='tree'), female_citizen.position()) nearby_tree = closest(state.units(owner=0, type="tree"), female_citizen.position())
collect_wood = zero_ad.actions.gather([female_citizen], nearby_tree) collect_wood = zero_ad.actions.gather([female_citizen], nearby_tree)
state = game.step([collect_wood]) state = game.step([collect_wood])
while len(state.unit(female_citizen.id()).data['resourceCarrying']) == 0: while len(state.unit(female_citizen.id()).data["resourceCarrying"]) == 0:
state = game.step() state = game.step()
def test_train(): def test_train():
state = game.reset(config) state = game.reset(config)
civic_centers = state.units(owner=1, type="civil_centre") civic_centers = state.units(owner=1, type="civil_centre")
spearman_type = 'units/spart/infantry_spearman_b' spearman_type = "units/spart/infantry_spearman_b"
spearman_count = len(state.units(owner=1, type=spearman_type)) spearman_count = len(state.units(owner=1, type=spearman_type))
train_spearmen = zero_ad.actions.train(civic_centers, spearman_type) train_spearmen = zero_ad.actions.train(civic_centers, spearman_type)
@ -61,9 +66,10 @@ def test_train():
while len(state.units(owner=1, type=spearman_type)) == spearman_count: while len(state.units(owner=1, type=spearman_type)) == spearman_count:
state = game.step() state = game.step()
def test_walk(): def test_walk():
state = game.reset(config) state = game.reset(config)
female_citizens = state.units(owner=1, type='female_citizen') female_citizens = state.units(owner=1, type="female_citizen")
x = 680 x = 680
z = 640 z = 640
initial_distance = dist(center(female_citizens), [x, z]) initial_distance = dist(center(female_citizens), [x, z])
@ -73,13 +79,14 @@ def test_walk():
distance = initial_distance distance = initial_distance
while distance >= initial_distance: while distance >= initial_distance:
state = game.step() state = game.step()
female_citizens = state.units(owner=1, type='female_citizen') female_citizens = state.units(owner=1, type="female_citizen")
distance = dist(center(female_citizens), [x, z]) distance = dist(center(female_citizens), [x, z])
def test_attack(): def test_attack():
state = game.reset(config) state = game.reset(config)
unit = state.units(owner=1, type='cavalry')[0] unit = state.units(owner=1, type="cavalry")[0]
target = state.units(owner=2, type='female_citizen')[0] target = state.units(owner=2, type="female_citizen")[0]
initial_health_target = target.health() initial_health_target = target.health()
initial_health_unit = unit.health() initial_health_unit = unit.health()
@ -87,11 +94,13 @@ def test_attack():
attack = zero_ad.actions.attack([unit], target) attack = zero_ad.actions.attack([unit], target)
state = game.step([attack]) state = game.step([attack])
while (state.unit(target.id()).health() >= initial_health_target while (state.unit(target.id()).health() >= initial_health_target) and (
) and (state.unit(unit.id()).health() >= initial_health_unit): state.unit(unit.id()).health() >= initial_health_unit
):
state = game.step() state = game.step()
def test_chat(): def test_chat():
state = game.reset(config) game.reset(config)
chat = zero_ad.actions.chat('hello world!!') chat = zero_ad.actions.chat("hello world!!")
state = game.step([chat]) game.step([chat])

View File

@ -1,44 +1,48 @@
import zero_ad import zero_ad
import json
import math
from os import path from os import path
game = zero_ad.ZeroAD('http://localhost:6000') game = zero_ad.ZeroAD("http://localhost:6000")
scriptdir = path.dirname(path.realpath(__file__)) scriptdir = path.dirname(path.realpath(__file__))
with open(path.join(scriptdir, '..', 'samples', 'arcadia.json'), 'r') as f: with open(path.join(scriptdir, "..", "samples", "arcadia.json"), "r") as f:
config = f.read() config = f.read()
with open(path.join(scriptdir, 'fastactions.js'), 'r') as f: with open(path.join(scriptdir, "fastactions.js"), "r") as f:
fastactions = f.read() fastactions = f.read()
def test_return_object(): def test_return_object():
state = game.reset(config) game.reset(config)
result = game.evaluate('({"hello": "world"})') result = game.evaluate('({"hello": "world"})')
assert type(result) is dict assert type(result) is dict
assert result['hello'] == 'world' assert result["hello"] == "world"
def test_return_null(): def test_return_null():
result = game.evaluate('null') result = game.evaluate("null")
assert result == None assert result is None
def test_return_string(): def test_return_string():
state = game.reset(config) game.reset(config)
result = game.evaluate('"cat"') result = game.evaluate('"cat"')
assert result == 'cat' assert result == "cat"
def test_fastactions(): def test_fastactions():
state = game.reset(config) state = game.reset(config)
game.evaluate(fastactions) game.evaluate(fastactions)
female_citizens = state.units(owner=1, type='female_citizen') female_citizens = state.units(owner=1, type="female_citizen")
house_tpl = 'structures/spart/house' house_tpl = "structures/spart/house"
house_count = len(state.units(owner=1, type=house_tpl)) len(state.units(owner=1, type=house_tpl))
x = 680 x = 680
z = 640 z = 640
build_house = zero_ad.actions.construct(female_citizens, house_tpl, x, z, autocontinue=True) build_house = zero_ad.actions.construct(female_citizens, house_tpl, x, z, autocontinue=True)
# Check that they start building the house # Check that they start building the house
state = game.step([build_house]) state = game.step([build_house])
step_count = 0
new_house = lambda _=None: state.units(owner=1, type=house_tpl)[0] def new_house(_=None):
return state.units(owner=1, type=house_tpl)[0]
initial_health = new_house().health(ratio=True) initial_health = new_house().health(ratio=True)
while new_house().health(ratio=True) == initial_health: while new_house().health(ratio=True) == initial_health:
state = game.step() state = game.step()

View File

@ -1,4 +1,5 @@
from . import actions from . import actions # noqa: F401
from . import environment from . import environment
ZeroAD = environment.ZeroAD ZeroAD = environment.ZeroAD
GameState = environment.GameState GameState = environment.GameState

View File

@ -1,63 +1,57 @@
def construct(units, template, x, z, angle=0, autorepair=True, autocontinue=True, queued=False): def construct(units, template, x, z, angle=0, autorepair=True, autocontinue=True, queued=False):
unit_ids = [ unit.id() for unit in units ] unit_ids = [unit.id() for unit in units]
return { return {
'type': 'construct', "type": "construct",
'entities': unit_ids, "entities": unit_ids,
'template': template, "template": template,
'x': x, "x": x,
'z': z, "z": z,
'angle': angle, "angle": angle,
'autorepair': autorepair, "autorepair": autorepair,
'autocontinue': autocontinue, "autocontinue": autocontinue,
'queued': queued, "queued": queued,
} }
def gather(units, target, queued=False): def gather(units, target, queued=False):
unit_ids = [ unit.id() for unit in units ] unit_ids = [unit.id() for unit in units]
return { return {
'type': 'gather', "type": "gather",
'entities': unit_ids, "entities": unit_ids,
'target': target.id(), "target": target.id(),
'queued': queued, "queued": queued,
} }
def train(entities, unit_type, count=1): def train(entities, unit_type, count=1):
entity_ids = [ unit.id() for unit in entities ] entity_ids = [unit.id() for unit in entities]
return { return {
'type': 'train', "type": "train",
'entities': entity_ids, "entities": entity_ids,
'template': unit_type, "template": unit_type,
'count': count, "count": count,
} }
def chat(message): def chat(message):
return { return {"type": "aichat", "message": message}
'type': 'aichat',
'message': message
}
def reveal_map(): def reveal_map():
return { return {"type": "reveal-map", "enable": True}
'type': 'reveal-map',
'enable': True
}
def walk(units, x, z, queued=False): def walk(units, x, z, queued=False):
ids = [ unit.id() for unit in units ] ids = [unit.id() for unit in units]
return { return {"type": "walk", "entities": ids, "x": x, "z": z, "queued": queued}
'type': 'walk',
'entities': ids,
'x': x,
'z': z,
'queued': queued
}
def attack(units, target, queued=False, allow_capture=True): def attack(units, target, queued=False, allow_capture=True):
unit_ids = [ unit.id() for unit in units ] unit_ids = [unit.id() for unit in units]
return { return {
'type': 'attack', "type": "attack",
'entities': unit_ids, "entities": unit_ids,
'target': target.id(), "target": target.id(),
'allowCapture': allow_capture, "allowCapture": allow_capture,
'queued': queued "queued": queued,
} }

View File

@ -1,33 +1,33 @@
import urllib
from urllib import request from urllib import request
import json import json
class RLAPI():
class RLAPI:
def __init__(self, url): def __init__(self, url):
self.url = url self.url = url
def post(self, route, data): def post(self, route, data):
response = request.urlopen(url=f'{self.url}/{route}', data=bytes(data, 'utf8')) response = request.urlopen(url=f"{self.url}/{route}", data=bytes(data, "utf8"))
return response.read() return response.read()
def step(self, commands): def step(self, commands):
post_data = '\n'.join((f'{player};{json.dumps(action)}' for (player, action) in commands)) post_data = "\n".join((f"{player};{json.dumps(action)}" for (player, action) in commands))
return self.post('step', post_data) return self.post("step", post_data)
def reset(self, scenario_config, player_id, save_replay): def reset(self, scenario_config, player_id, save_replay):
path = 'reset?' path = "reset?"
if save_replay: if save_replay:
path += 'saveReplay=1&' path += "saveReplay=1&"
if player_id: if player_id:
path += f'playerID={player_id}&' path += f"playerID={player_id}&"
return self.post(path, scenario_config) return self.post(path, scenario_config)
def get_templates(self, names): def get_templates(self, names):
post_data = '\n'.join(names) post_data = "\n".join(names)
response = self.post('templates', post_data) response = self.post("templates", post_data)
return zip(names, response.decode().split('\n')) return zip(names, response.decode().split("\n"))
def evaluate(self, code): def evaluate(self, code):
response = self.post('evaluate', code) response = self.post("evaluate", code)
return json.loads(response.decode()) return json.loads(response.decode())

View File

@ -1,11 +1,11 @@
from .api import RLAPI from .api import RLAPI
import json import json
import math
from xml.etree import ElementTree from xml.etree import ElementTree
from itertools import cycle from itertools import cycle
class ZeroAD():
def __init__(self, uri='http://localhost:6000'): class ZeroAD:
def __init__(self, uri="http://localhost:6000"):
self.api = RLAPI(uri) self.api = RLAPI(uri)
self.current_state = None self.current_state = None
self.cache = {} self.cache = {}
@ -20,7 +20,7 @@ class ZeroAD():
self.current_state = GameState(json.loads(state_json), self) self.current_state = GameState(json.loads(state_json), self)
return self.current_state return self.current_state
def reset(self, config='', save_replay=False, player_id=1): def reset(self, config="", save_replay=False, player_id=1):
state_json = self.api.reset(config, player_id, save_replay) state_json = self.api.reset(config, player_id, save_replay)
self.current_state = GameState(json.loads(state_json), self) self.current_state = GameState(json.loads(state_json), self)
return self.current_state return self.current_state
@ -33,7 +33,7 @@ class ZeroAD():
def get_templates(self, names): def get_templates(self, names):
templates = self.api.get_templates(names) templates = self.api.get_templates(names)
return [ (name, EntityTemplate(content)) for (name, content) in templates ] return [(name, EntityTemplate(content)) for (name, content) in templates]
def update_templates(self, types=[]): def update_templates(self, types=[]):
all_types = list(set([unit.type() for unit in self.current_state.units()])) all_types = list(set([unit.type() for unit in self.current_state.units()]))
@ -41,54 +41,60 @@ class ZeroAD():
template_pairs = self.get_templates(all_types) template_pairs = self.get_templates(all_types)
self.cache = {} self.cache = {}
for (name, tpl) in template_pairs: for name, tpl in template_pairs:
self.cache[name] = tpl self.cache[name] = tpl
return template_pairs return template_pairs
class GameState():
class GameState:
def __init__(self, data, game): def __init__(self, data, game):
self.data = data self.data = data
self.game = game self.game = game
self.mapSize = self.data['mapSize'] self.mapSize = self.data["mapSize"]
def units(self, owner=None, type=None): def units(self, owner=None, type=None):
filter_fn = lambda e: (owner is None or e['owner'] == owner) and \ def filter_fn(e):
(type is None or type in e['template']) return (owner is None or e["owner"] == owner) and (
return [ Entity(e, self.game) for e in self.data['entities'].values() if filter_fn(e) ] type is None or type in e["template"]
)
return [Entity(e, self.game) for e in self.data["entities"].values() if filter_fn(e)]
def unit(self, id): def unit(self, id):
id = str(id) id = str(id)
return Entity(self.data['entities'][id], self.game) if id in self.data['entities'] else None return (
Entity(self.data["entities"][id], self.game) if id in self.data["entities"] else None
)
class Entity():
class Entity:
def __init__(self, data, game): def __init__(self, data, game):
self.data = data self.data = data
self.game = game self.game = game
self.template = self.game.cache.get(self.type(), None) self.template = self.game.cache.get(self.type(), None)
def type(self): def type(self):
return self.data['template'] return self.data["template"]
def id(self): def id(self):
return self.data['id'] return self.data["id"]
def owner(self): def owner(self):
return self.data['owner'] return self.data["owner"]
def max_health(self): def max_health(self):
template = self.get_template() template = self.get_template()
return float(template.get('Health/Max')) return float(template.get("Health/Max"))
def health(self, ratio=False): def health(self, ratio=False):
if ratio: if ratio:
return self.data['hitpoints']/self.max_health() return self.data["hitpoints"] / self.max_health()
return self.data['hitpoints'] return self.data["hitpoints"]
def position(self): def position(self):
return self.data['position'] return self.data["position"]
def get_template(self): def get_template(self):
if self.template is None: if self.template is None:
@ -97,9 +103,10 @@ class Entity():
return self.template return self.template
class EntityTemplate():
class EntityTemplate:
def __init__(self, xml): def __init__(self, xml):
self.data = ElementTree.fromstring(f'<Entity>{xml}</Entity>') self.data = ElementTree.fromstring(f"<Entity>{xml}</Entity>")
def get(self, path): def get(self, path):
node = self.data.find(path) node = self.data.find(path)
@ -113,4 +120,4 @@ class EntityTemplate():
return node is not None return node is not None
def __str__(self): def __str__(self):
return ElementTree.tostring(self.data).decode('utf-8') return ElementTree.tostring(self.data).decode("utf-8")

View File

@ -22,14 +22,12 @@
# THE SOFTWARE. # THE SOFTWARE.
import argparse import argparse
import datetime
import hashlib import hashlib
import itertools import itertools
import json import json
import os import os
import subprocess import subprocess
import sys import sys
import time
import yaml import yaml
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
@ -40,29 +38,32 @@ def execute(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate() out, err = process.communicate()
except: except:
sys.stderr.write('Failed to run command: {}\n'.format(' '.join(command))) sys.stderr.write("Failed to run command: {}\n".format(" ".join(command)))
raise raise
return process.returncode, out, err return process.returncode, out, err
def calculate_hash(path): def calculate_hash(path):
assert os.path.isfile(path) assert os.path.isfile(path)
with open(path, 'rb') as handle: with open(path, "rb") as handle:
return hashlib.sha1(handle.read()).hexdigest() return hashlib.sha1(handle.read()).hexdigest()
def compare_spirv(path1, path2): def compare_spirv(path1, path2):
with open(path1, 'rb') as handle: with open(path1, "rb") as handle:
spirv1 = handle.read() spirv1 = handle.read()
with open(path2, 'rb') as handle: with open(path2, "rb") as handle:
spirv2 = handle.read() spirv2 = handle.read()
return spirv1 == spirv2 return spirv1 == spirv2
def resolve_if(defines, expression): def resolve_if(defines, expression):
for item in expression.strip().split('||'): for item in expression.strip().split("||"):
item = item.strip() item = item.strip()
assert len(item) > 1 assert len(item) > 1
name = item name = item
invert = False invert = False
if name[0] == '!': if name[0] == "!":
invert = True invert = True
name = item[1:] name = item[1:]
assert item[1].isalpha() assert item[1].isalpha()
@ -70,210 +71,267 @@ def resolve_if(defines, expression):
assert item[0].isalpha() assert item[0].isalpha()
found_define = False found_define = False
for define in defines: for define in defines:
if define['name'] == name: if define["name"] == name:
assert define['value'] == 'UNDEFINED' or define['value'] == '0' or define['value'] == '1' assert (
define["value"] == "UNDEFINED"
or define["value"] == "0"
or define["value"] == "1"
)
if invert: if invert:
if define['value'] != '1': if define["value"] != "1":
return True return True
found_define = True found_define = True
else: else:
if define['value'] == '1': if define["value"] == "1":
return True return True
if invert and not found_define: if invert and not found_define:
return True return True
return False return False
def compile_and_reflect(input_mod_path, output_mod_path, dependencies, stage, path, out_path, defines):
def compile_and_reflect(
input_mod_path, output_mod_path, dependencies, stage, path, out_path, defines
):
keep_debug = False keep_debug = False
input_path = os.path.normpath(path) input_path = os.path.normpath(path)
output_path = os.path.normpath(out_path) output_path = os.path.normpath(out_path)
command = [ command = [
'glslc', '-x', 'glsl', '--target-env=vulkan1.1', '-std=450core', "glslc",
'-I', os.path.join(input_mod_path, 'shaders', 'glsl'), "-x",
"glsl",
"--target-env=vulkan1.1",
"-std=450core",
"-I",
os.path.join(input_mod_path, "shaders", "glsl"),
] ]
for dependency in dependencies: for dependency in dependencies:
if dependency != input_mod_path: if dependency != input_mod_path:
command += ['-I', os.path.join(dependency, 'shaders', 'glsl')] command += ["-I", os.path.join(dependency, "shaders", "glsl")]
command += [ command += [
'-fshader-stage=' + stage, '-O', input_path, "-fshader-stage=" + stage,
"-O",
input_path,
] ]
use_descriptor_indexing = False use_descriptor_indexing = False
for define in defines: for define in defines:
if define['value'] == 'UNDEFINED': if define["value"] == "UNDEFINED":
continue continue
assert ' ' not in define['value'] assert " " not in define["value"]
command.append('-D{}={}'.format(define['name'], define['value'])) command.append("-D{}={}".format(define["name"], define["value"]))
if define['name'] == 'USE_DESCRIPTOR_INDEXING': if define["name"] == "USE_DESCRIPTOR_INDEXING":
use_descriptor_indexing = True use_descriptor_indexing = True
command.append('-D{}={}'.format('USE_SPIRV', '1')) command.append("-D{}={}".format("USE_SPIRV", "1"))
command.append('-DSTAGE_{}={}'.format(stage.upper(), '1')) command.append("-DSTAGE_{}={}".format(stage.upper(), "1"))
command += ['-o', output_path] command += ["-o", output_path]
# Compile the shader with debug information to see names in reflection. # Compile the shader with debug information to see names in reflection.
ret, out, err = execute(command + ['-g']) ret, out, err = execute(command + ["-g"])
if ret: if ret:
sys.stderr.write('Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n'.format( sys.stderr.write(
ret, ' '.join(command), input_path, output_path, err)) "Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
preprocessor_output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'preprocessed_file.glsl')) ret, " ".join(command), input_path, output_path, err
execute(command[:-2] + ['-g', '-E', '-o', preprocessor_output_path]) )
)
preprocessor_output_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "preprocessed_file.glsl")
)
execute(command[:-2] + ["-g", "-E", "-o", preprocessor_output_path])
raise ValueError(err) raise ValueError(err)
ret, out, err = execute(['spirv-reflect', '-y','-v', '1', output_path]) ret, out, err = execute(["spirv-reflect", "-y", "-v", "1", output_path])
if ret: if ret:
sys.stderr.write('Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n'.format( sys.stderr.write(
ret, ' '.join(command), input_path, output_path, err)) "Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
ret, " ".join(command), input_path, output_path, err
)
)
raise ValueError(err) raise ValueError(err)
# Reflect the result SPIRV. # Reflect the result SPIRV.
data = yaml.safe_load(out) data = yaml.safe_load(out)
module = data['module'] module = data["module"]
interface_variables = [] interface_variables = []
if 'all_interface_variables' in data and data['all_interface_variables']: if "all_interface_variables" in data and data["all_interface_variables"]:
interface_variables = data['all_interface_variables'] interface_variables = data["all_interface_variables"]
push_constants = [] push_constants = []
vertex_attributes = [] vertex_attributes = []
if 'push_constants' in module and module['push_constants']: if "push_constants" in module and module["push_constants"]:
assert len(module['push_constants']) == 1 assert len(module["push_constants"]) == 1
def add_push_constants(node, push_constants): def add_push_constants(node, push_constants):
if ('members' in node) and node['members']: if ("members" in node) and node["members"]:
for member in node['members']: for member in node["members"]:
add_push_constants(member, push_constants) add_push_constants(member, push_constants)
else: else:
assert node['absolute_offset'] + node['size'] <= 128 assert node["absolute_offset"] + node["size"] <= 128
push_constants.append({ push_constants.append(
'name': node['name'], {
'offset': node['absolute_offset'], "name": node["name"],
'size': node['size'], "offset": node["absolute_offset"],
}) "size": node["size"],
assert module['push_constants'][0]['type_description']['type_name'] == 'DrawUniforms' }
assert module['push_constants'][0]['size'] <= 128 )
add_push_constants(module['push_constants'][0], push_constants)
assert module["push_constants"][0]["type_description"]["type_name"] == "DrawUniforms"
assert module["push_constants"][0]["size"] <= 128
add_push_constants(module["push_constants"][0], push_constants)
descriptor_sets = [] descriptor_sets = []
if 'descriptor_sets' in module and module['descriptor_sets']: if "descriptor_sets" in module and module["descriptor_sets"]:
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1
VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7
for descriptor_set in module['descriptor_sets']: for descriptor_set in module["descriptor_sets"]:
UNIFORM_SET = 1 if use_descriptor_indexing else 0 UNIFORM_SET = 1 if use_descriptor_indexing else 0
STORAGE_SET = 2 STORAGE_SET = 2
bindings = [] bindings = []
if descriptor_set['set'] == UNIFORM_SET: if descriptor_set["set"] == UNIFORM_SET:
assert descriptor_set['binding_count'] > 0 assert descriptor_set["binding_count"] > 0
for binding in descriptor_set['bindings']: for binding in descriptor_set["bindings"]:
assert binding['set'] == UNIFORM_SET assert binding["set"] == UNIFORM_SET
block = binding['block'] block = binding["block"]
members = [] members = []
for member in block['members']: for member in block["members"]:
members.append({ members.append(
'name': member['name'], {
'offset': member['absolute_offset'], "name": member["name"],
'size': member['size'], "offset": member["absolute_offset"],
}) "size": member["size"],
bindings.append({ }
'binding': binding['binding'], )
'type': 'uniform', bindings.append(
'size': block['size'], {
'members': members "binding": binding["binding"],
}) "type": "uniform",
binding = descriptor_set['bindings'][0] "size": block["size"],
assert binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER "members": members,
elif descriptor_set['set'] == STORAGE_SET: }
assert descriptor_set['binding_count'] > 0 )
for binding in descriptor_set['bindings']: binding = descriptor_set["bindings"][0]
is_storage_image = binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE assert binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
is_storage_buffer = binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER elif descriptor_set["set"] == STORAGE_SET:
assert descriptor_set["binding_count"] > 0
for binding in descriptor_set["bindings"]:
is_storage_image = (
binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
)
is_storage_buffer = (
binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
)
assert is_storage_image or is_storage_buffer assert is_storage_image or is_storage_buffer
assert binding['descriptor_type'] == descriptor_set['bindings'][0]['descriptor_type'] assert (
assert binding['image']['arrayed'] == 0 binding["descriptor_type"]
assert binding['image']['ms'] == 0 == descriptor_set["bindings"][0]["descriptor_type"]
bindingType = 'storageImage' )
assert binding["image"]["arrayed"] == 0
assert binding["image"]["ms"] == 0
bindingType = "storageImage"
if is_storage_buffer: if is_storage_buffer:
bindingType = 'storageBuffer' bindingType = "storageBuffer"
bindings.append({ bindings.append(
'binding': binding['binding'], {
'type': bindingType, "binding": binding["binding"],
'name': binding['name'], "type": bindingType,
}) "name": binding["name"],
}
)
else: else:
if use_descriptor_indexing: if use_descriptor_indexing:
if descriptor_set['set'] == 0: if descriptor_set["set"] == 0:
assert descriptor_set['binding_count'] >= 1 assert descriptor_set["binding_count"] >= 1
for binding in descriptor_set['bindings']: for binding in descriptor_set["bindings"]:
assert binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER assert (
assert binding['array']['dims'][0] == 16384 binding["descriptor_type"]
if binding['binding'] == 0: == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
assert binding['name'] == 'textures2D' )
elif binding['binding'] == 1: assert binding["array"]["dims"][0] == 16384
assert binding['name'] == 'texturesCube' if binding["binding"] == 0:
elif binding['binding'] == 2: assert binding["name"] == "textures2D"
assert binding['name'] == 'texturesShadow' elif binding["binding"] == 1:
assert binding["name"] == "texturesCube"
elif binding["binding"] == 2:
assert binding["name"] == "texturesShadow"
else: else:
assert False assert False
else: else:
assert descriptor_set['binding_count'] > 0 assert descriptor_set["binding_count"] > 0
for binding in descriptor_set['bindings']: for binding in descriptor_set["bindings"]:
assert binding['descriptor_type'] == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER assert (
assert binding['image']['sampled'] == 1 binding["descriptor_type"] == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
assert binding['image']['arrayed'] == 0 )
assert binding['image']['ms'] == 0 assert binding["image"]["sampled"] == 1
sampler_type = 'sampler{}D'.format(binding['image']['dim'] + 1) assert binding["image"]["arrayed"] == 0
if binding['image']['dim'] == 3: assert binding["image"]["ms"] == 0
sampler_type = 'samplerCube' sampler_type = "sampler{}D".format(binding["image"]["dim"] + 1)
bindings.append({ if binding["image"]["dim"] == 3:
'binding': binding['binding'], sampler_type = "samplerCube"
'type': sampler_type, bindings.append(
'name': binding['name'], {
}) "binding": binding["binding"],
descriptor_sets.append({ "type": sampler_type,
'set': descriptor_set['set'], "name": binding["name"],
'bindings': bindings, }
}) )
if stage == 'vertex': descriptor_sets.append(
{
"set": descriptor_set["set"],
"bindings": bindings,
}
)
if stage == "vertex":
for variable in interface_variables: for variable in interface_variables:
if variable['storage_class'] == 1: if variable["storage_class"] == 1:
# Input. # Input.
vertex_attributes.append({ vertex_attributes.append(
'name': variable['name'], {
'location': variable['location'], "name": variable["name"],
}) "location": variable["location"],
}
)
# Compile the final version without debug information. # Compile the final version without debug information.
if not keep_debug: if not keep_debug:
ret, out, err = execute(command) ret, out, err = execute(command)
if ret: if ret:
sys.stderr.write('Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n'.format( sys.stderr.write(
ret, ' '.join(command), input_path, output_path, err)) "Command returned {}:\nCommand: {}\nInput path: {}\nOutput path: {}\nError: {}\n".format(
ret, " ".join(command), input_path, output_path, err
)
)
raise ValueError(err) raise ValueError(err)
return { return {
'push_constants': push_constants, "push_constants": push_constants,
'vertex_attributes': vertex_attributes, "vertex_attributes": vertex_attributes,
'descriptor_sets': descriptor_sets, "descriptor_sets": descriptor_sets,
} }
def output_xml_tree(tree, path): def output_xml_tree(tree, path):
''' We use a simple custom printer to have the same output for all platforms.''' """We use a simple custom printer to have the same output for all platforms."""
with open(path, 'wt') as handle: with open(path, "wt") as handle:
handle.write('<?xml version="1.0" encoding="utf-8"?>\n') handle.write('<?xml version="1.0" encoding="utf-8"?>\n')
handle.write('<!-- DO NOT EDIT: GENERATED BY SCRIPT {} -->\n'.format(os.path.basename(__file__))) handle.write(
"<!-- DO NOT EDIT: GENERATED BY SCRIPT {} -->\n".format(os.path.basename(__file__))
)
def output_xml_node(node, handle, depth): def output_xml_node(node, handle, depth):
indent = '\t' * depth indent = "\t" * depth
attributes = '' attributes = ""
for attribute_name in sorted(node.attrib.keys()): for attribute_name in sorted(node.attrib.keys()):
attributes += ' {}="{}"'.format(attribute_name, node.attrib[attribute_name]) attributes += ' {}="{}"'.format(attribute_name, node.attrib[attribute_name])
if len(node) > 0: if len(node) > 0:
handle.write('{}<{}{}>\n'.format(indent, node.tag, attributes)) handle.write("{}<{}{}>\n".format(indent, node.tag, attributes))
for child in node: for child in node:
output_xml_node(child, handle, depth + 1) output_xml_node(child, handle, depth + 1)
handle.write('{}</{}>\n'.format(indent, node.tag)) handle.write("{}</{}>\n".format(indent, node.tag))
else: else:
handle.write('{}<{}{}/>\n'.format(indent, node.tag, attributes)) handle.write("{}<{}{}/>\n".format(indent, node.tag, attributes))
output_xml_node(tree.getroot(), handle, 0) output_xml_node(tree.getroot(), handle, 0)
def build(rules, input_mod_path, output_mod_path, dependencies, program_name): def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
sys.stdout.write('Program "{}"\n'.format(program_name)) sys.stdout.write('Program "{}"\n'.format(program_name))
if rules and program_name not in rules: if rules and program_name not in rules:
sys.stdout.write(' Skip.\n') sys.stdout.write(" Skip.\n")
return return
sys.stdout.write(' Building.\n') sys.stdout.write(" Building.\n")
rebuild = False rebuild = False
@ -281,64 +339,76 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
program_defines = [] program_defines = []
shaders = [] shaders = []
tree = ET.parse(os.path.join(input_mod_path, 'shaders', 'glsl', program_name + '.xml')) tree = ET.parse(os.path.join(input_mod_path, "shaders", "glsl", program_name + ".xml"))
root = tree.getroot() root = tree.getroot()
for element in root: for element in root:
element_tag = element.tag element_tag = element.tag
if element_tag == 'defines': if element_tag == "defines":
for child in element: for child in element:
values = [] values = []
for value in child: for value in child:
values.append({ values.append(
'name': child.attrib['name'], {
'value': value.text, "name": child.attrib["name"],
}) "value": value.text,
}
)
defines.append(values) defines.append(values)
elif element_tag == 'define': elif element_tag == "define":
program_defines.append({'name': element.attrib['name'], 'value': element.attrib['value']}) program_defines.append(
elif element_tag == 'vertex': {"name": element.attrib["name"], "value": element.attrib["value"]}
)
elif element_tag == "vertex":
streams = [] streams = []
for shader_child in element: for shader_child in element:
assert shader_child.tag == 'stream' assert shader_child.tag == "stream"
streams.append({ streams.append(
'name': shader_child.attrib['name'], {
'attribute': shader_child.attrib['attribute'], "name": shader_child.attrib["name"],
}) "attribute": shader_child.attrib["attribute"],
if 'if' in shader_child.attrib: }
streams[-1]['if'] = shader_child.attrib['if'] )
shaders.append({ if "if" in shader_child.attrib:
'type': 'vertex', streams[-1]["if"] = shader_child.attrib["if"]
'file': element.attrib['file'], shaders.append(
'streams': streams, {
}) "type": "vertex",
elif element_tag == 'fragment': "file": element.attrib["file"],
shaders.append({ "streams": streams,
'type': 'fragment', }
'file': element.attrib['file'], )
}) elif element_tag == "fragment":
elif element_tag == 'compute': shaders.append(
shaders.append({ {
'type': 'compute', "type": "fragment",
'file': element.attrib['file'], "file": element.attrib["file"],
}) }
)
elif element_tag == "compute":
shaders.append(
{
"type": "compute",
"file": element.attrib["file"],
}
)
else: else:
raise ValueError('Unsupported element tag: "{}"'.format(element_tag)) raise ValueError('Unsupported element tag: "{}"'.format(element_tag))
stage_extension = { stage_extension = {
'vertex': '.vs', "vertex": ".vs",
'fragment': '.fs', "fragment": ".fs",
'geometry': '.gs', "geometry": ".gs",
'compute': '.cs', "compute": ".cs",
} }
output_spirv_mod_path = os.path.join(output_mod_path, 'shaders', 'spirv') output_spirv_mod_path = os.path.join(output_mod_path, "shaders", "spirv")
if not os.path.isdir(output_spirv_mod_path): if not os.path.isdir(output_spirv_mod_path):
os.mkdir(output_spirv_mod_path) os.mkdir(output_spirv_mod_path)
root = ET.Element('programs') root = ET.Element("programs")
if 'combinations' in rules[program_name]: if "combinations" in rules[program_name]:
combinations = rules[program_name]['combinations'] combinations = rules[program_name]["combinations"]
else: else:
combinations = list(itertools.product(*defines)) combinations = list(itertools.product(*defines))
@ -346,36 +416,36 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
for index, combination in enumerate(combinations): for index, combination in enumerate(combinations):
assert index < 10000 assert index < 10000
program_path = 'spirv/' + program_name + ('_%04d' % index) + '.xml' program_path = "spirv/" + program_name + ("_%04d" % index) + ".xml"
programs_element = ET.SubElement(root, 'program') programs_element = ET.SubElement(root, "program")
programs_element.set('type', 'spirv') programs_element.set("type", "spirv")
programs_element.set('file', program_path) programs_element.set("file", program_path)
defines_element = ET.SubElement(programs_element, 'defines') defines_element = ET.SubElement(programs_element, "defines")
for define in combination: for define in combination:
if define['value'] == 'UNDEFINED': if define["value"] == "UNDEFINED":
continue continue
define_element = ET.SubElement(defines_element, 'define') define_element = ET.SubElement(defines_element, "define")
define_element.set('name', define['name']) define_element.set("name", define["name"])
define_element.set('value', define['value']) define_element.set("value", define["value"])
if not rebuild and os.path.isfile(os.path.join(output_mod_path, 'shaders', program_path)): if not rebuild and os.path.isfile(os.path.join(output_mod_path, "shaders", program_path)):
continue continue
program_root = ET.Element('program') program_root = ET.Element("program")
program_root.set('type', 'spirv') program_root.set("type", "spirv")
for shader in shaders: for shader in shaders:
extension = stage_extension[shader['type']] extension = stage_extension[shader["type"]]
file_name = program_name + ('_%04d' % index) + extension + '.spv' file_name = program_name + ("_%04d" % index) + extension + ".spv"
output_spirv_path = os.path.join(output_spirv_mod_path, file_name) output_spirv_path = os.path.join(output_spirv_mod_path, file_name)
input_glsl_path = os.path.join(input_mod_path, 'shaders', shader['file']) input_glsl_path = os.path.join(input_mod_path, "shaders", shader["file"])
# Some shader programs might use vs and fs shaders from different mods. # Some shader programs might use vs and fs shaders from different mods.
if not os.path.isfile(input_glsl_path): if not os.path.isfile(input_glsl_path):
input_glsl_path = None input_glsl_path = None
for dependency in dependencies: for dependency in dependencies:
fallback_input_path = os.path.join(dependency, 'shaders', shader['file']) fallback_input_path = os.path.join(dependency, "shaders", shader["file"])
if os.path.isfile(fallback_input_path): if os.path.isfile(fallback_input_path):
input_glsl_path = fallback_input_path input_glsl_path = fallback_input_path
break break
@ -385,10 +455,11 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
input_mod_path, input_mod_path,
output_mod_path, output_mod_path,
dependencies, dependencies,
shader['type'], shader["type"],
input_glsl_path, input_glsl_path,
output_spirv_path, output_spirv_path,
combination + program_defines) combination + program_defines,
)
spirv_hash = calculate_hash(output_spirv_path) spirv_hash = calculate_hash(output_spirv_path)
if spirv_hash not in hashed_cache: if spirv_hash not in hashed_cache:
@ -406,77 +477,95 @@ def build(rules, input_mod_path, output_mod_path, dependencies, program_name):
else: else:
hashed_cache[spirv_hash].append(file_name) hashed_cache[spirv_hash].append(file_name)
shader_element = ET.SubElement(program_root, shader['type']) shader_element = ET.SubElement(program_root, shader["type"])
shader_element.set('file', 'spirv/' + file_name) shader_element.set("file", "spirv/" + file_name)
if shader['type'] == 'vertex': if shader["type"] == "vertex":
for stream in shader['streams']: for stream in shader["streams"]:
if 'if' in stream and not resolve_if(combination, stream['if']): if "if" in stream and not resolve_if(combination, stream["if"]):
continue continue
found_vertex_attribute = False found_vertex_attribute = False
for vertex_attribute in reflection['vertex_attributes']: for vertex_attribute in reflection["vertex_attributes"]:
if vertex_attribute['name'] == stream['attribute']: if vertex_attribute["name"] == stream["attribute"]:
found_vertex_attribute = True found_vertex_attribute = True
break break
if not found_vertex_attribute and stream['attribute'] == 'a_tangent': if not found_vertex_attribute and stream["attribute"] == "a_tangent":
continue continue
if not found_vertex_attribute: if not found_vertex_attribute:
sys.stderr.write('Vertex attribute not found: {}\n'.format(stream['attribute'])) sys.stderr.write(
"Vertex attribute not found: {}\n".format(stream["attribute"])
)
assert found_vertex_attribute assert found_vertex_attribute
stream_element = ET.SubElement(shader_element, 'stream') stream_element = ET.SubElement(shader_element, "stream")
stream_element.set('name', stream['name']) stream_element.set("name", stream["name"])
stream_element.set('attribute', stream['attribute']) stream_element.set("attribute", stream["attribute"])
for vertex_attribute in reflection['vertex_attributes']: for vertex_attribute in reflection["vertex_attributes"]:
if vertex_attribute['name'] == stream['attribute']: if vertex_attribute["name"] == stream["attribute"]:
stream_element.set('location', vertex_attribute['location']) stream_element.set("location", vertex_attribute["location"])
break break
for push_constant in reflection['push_constants']: for push_constant in reflection["push_constants"]:
push_constant_element = ET.SubElement(shader_element, 'push_constant') push_constant_element = ET.SubElement(shader_element, "push_constant")
push_constant_element.set('name', push_constant['name']) push_constant_element.set("name", push_constant["name"])
push_constant_element.set('size', push_constant['size']) push_constant_element.set("size", push_constant["size"])
push_constant_element.set('offset', push_constant['offset']) push_constant_element.set("offset", push_constant["offset"])
descriptor_sets_element = ET.SubElement(shader_element, 'descriptor_sets') descriptor_sets_element = ET.SubElement(shader_element, "descriptor_sets")
for descriptor_set in reflection['descriptor_sets']: for descriptor_set in reflection["descriptor_sets"]:
descriptor_set_element = ET.SubElement(descriptor_sets_element, 'descriptor_set') descriptor_set_element = ET.SubElement(descriptor_sets_element, "descriptor_set")
descriptor_set_element.set('set', descriptor_set['set']) descriptor_set_element.set("set", descriptor_set["set"])
for binding in descriptor_set['bindings']: for binding in descriptor_set["bindings"]:
binding_element = ET.SubElement(descriptor_set_element, 'binding') binding_element = ET.SubElement(descriptor_set_element, "binding")
binding_element.set('type', binding['type']) binding_element.set("type", binding["type"])
binding_element.set('binding', binding['binding']) binding_element.set("binding", binding["binding"])
if binding['type'] == 'uniform': if binding["type"] == "uniform":
binding_element.set('size', binding['size']) binding_element.set("size", binding["size"])
for member in binding['members']: for member in binding["members"]:
member_element = ET.SubElement(binding_element, 'member') member_element = ET.SubElement(binding_element, "member")
member_element.set('name', member['name']) member_element.set("name", member["name"])
member_element.set('size', member['size']) member_element.set("size", member["size"])
member_element.set('offset', member['offset']) member_element.set("offset", member["offset"])
elif binding['type'].startswith('sampler'): elif binding["type"].startswith("sampler"):
binding_element.set('name', binding['name']) binding_element.set("name", binding["name"])
elif binding['type'].startswith('storage'): elif binding["type"].startswith("storage"):
binding_element.set('name', binding['name']) binding_element.set("name", binding["name"])
program_tree = ET.ElementTree(program_root) program_tree = ET.ElementTree(program_root)
output_xml_tree(program_tree, os.path.join(output_mod_path, 'shaders', program_path)) output_xml_tree(program_tree, os.path.join(output_mod_path, "shaders", program_path))
tree = ET.ElementTree(root) tree = ET.ElementTree(root)
output_xml_tree(tree, os.path.join(output_mod_path, 'shaders', 'spirv', program_name + '.xml')) output_xml_tree(tree, os.path.join(output_mod_path, "shaders", "spirv", program_name + ".xml"))
def run(): def run():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('input_mod_path', help='a path to a directory with input mod with GLSL shaders like binaries/data/mods/public') parser.add_argument(
parser.add_argument('rules_path', help='a path to JSON with rules') "input_mod_path",
parser.add_argument('output_mod_path', help='a path to a directory with mod to store SPIR-V shaders like binaries/data/mods/spirv') help="a path to a directory with input mod with GLSL shaders like binaries/data/mods/public",
parser.add_argument('-d', '--dependency', action='append', help='a path to a directory with a dependency mod (at least modmod should present as dependency)', required=True) )
parser.add_argument('-p', '--program_name', help='a shader program name (in case of presence the only program will be compiled)', default=None) parser.add_argument("rules_path", help="a path to JSON with rules")
parser.add_argument(
"output_mod_path",
help="a path to a directory with mod to store SPIR-V shaders like binaries/data/mods/spirv",
)
parser.add_argument(
"-d",
"--dependency",
action="append",
help="a path to a directory with a dependency mod (at least modmod should present as dependency)",
required=True,
)
parser.add_argument(
"-p",
"--program_name",
help="a shader program name (in case of presence the only program will be compiled)",
default=None,
)
args = parser.parse_args() args = parser.parse_args()
if not os.path.isfile(args.rules_path): if not os.path.isfile(args.rules_path):
sys.stderr.write('Rules "{}" are not found\n'.format(args.rules_path)) sys.stderr.write('Rules "{}" are not found\n'.format(args.rules_path))
return return
with open(args.rules_path, 'rt') as handle: with open(args.rules_path, "rt") as handle:
rules = json.load(handle) rules = json.load(handle)
if not os.path.isdir(args.input_mod_path): if not os.path.isdir(args.input_mod_path):
@ -487,7 +576,7 @@ def run():
sys.stderr.write('Output mod path "{}" is not a directory\n'.format(args.output_mod_path)) sys.stderr.write('Output mod path "{}" is not a directory\n'.format(args.output_mod_path))
return return
mod_shaders_path = os.path.join(args.input_mod_path, 'shaders', 'glsl') mod_shaders_path = os.path.join(args.input_mod_path, "shaders", "glsl")
if not os.path.isdir(mod_shaders_path): if not os.path.isdir(mod_shaders_path):
sys.stderr.write('Directory "{}" was not found\n'.format(mod_shaders_path)) sys.stderr.write('Directory "{}" was not found\n'.format(mod_shaders_path))
return return
@ -497,11 +586,11 @@ def run():
if not args.program_name: if not args.program_name:
for file_name in os.listdir(mod_shaders_path): for file_name in os.listdir(mod_shaders_path):
name, ext = os.path.splitext(file_name) name, ext = os.path.splitext(file_name)
if ext.lower() == '.xml': if ext.lower() == ".xml":
build(rules, args.input_mod_path, args.output_mod_path, args.dependency, name) build(rules, args.input_mod_path, args.output_mod_path, args.dependency, name)
else: else:
build(rules, args.input_mod_path, args.output_mod_path, args.dependency, args.program_name) build(rules, args.input_mod_path, args.output_mod_path, args.dependency, args.program_name)
if __name__ == '__main__':
run()
if __name__ == "__main__":
run()

View File

@ -22,15 +22,16 @@
# THE SOFTWARE. # THE SOFTWARE.
import sys import sys
sys.path
sys.path.append('../entity')
from scriptlib import SimulTemplateEntity
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
from pathlib import Path from pathlib import Path
import os import os
import glob import glob
sys.path.append("../entity")
from scriptlib import SimulTemplateEntity # noqa: E402
AttackTypes = ["Hack", "Pierce", "Crush", "Poison", "Fire"] AttackTypes = ["Hack", "Pierce", "Crush", "Poison", "Fire"]
Resources = ["food", "wood", "stone", "metal"] Resources = ["food", "wood", "stone", "metal"]
@ -93,13 +94,14 @@ AddSortingOverlay = True
# This is the path to the /templates/ folder to consider. Change this for mod # This is the path to the /templates/ folder to consider. Change this for mod
# support. # support.
modsFolder = Path(__file__).resolve().parents[3] / 'binaries' / 'data' / 'mods' modsFolder = Path(__file__).resolve().parents[3] / "binaries" / "data" / "mods"
basePath = modsFolder / 'public' / 'simulation' / 'templates' basePath = modsFolder / "public" / "simulation" / "templates"
# For performance purposes, cache opened templates files. # For performance purposes, cache opened templates files.
globalTemplatesList = {} globalTemplatesList = {}
sim_entity = SimulTemplateEntity(modsFolder, None) sim_entity = SimulTemplateEntity(modsFolder, None)
def htbout(file, balise, value): def htbout(file, balise, value):
file.write("<" + balise + ">" + value + "</" + balise + ">\n") file.write("<" + balise + ">" + value + "</" + balise + ">\n")
@ -113,7 +115,9 @@ def fastParse(template_name):
if template_name in globalTemplatesList: if template_name in globalTemplatesList:
return globalTemplatesList[template_name] return globalTemplatesList[template_name]
parent_string = ET.parse(template_name).getroot().get("parent") parent_string = ET.parse(template_name).getroot().get("parent")
globalTemplatesList[template_name] = sim_entity.load_inherited('simulation/templates/', str(template_name), ['public']) globalTemplatesList[template_name] = sim_entity.load_inherited(
"simulation/templates/", str(template_name), ["public"]
)
globalTemplatesList[template_name].set("parent", parent_string) globalTemplatesList[template_name].set("parent", parent_string)
return globalTemplatesList[template_name] return globalTemplatesList[template_name]
@ -126,7 +130,9 @@ def getParents(template_name):
parents = set() parents = set()
for parent in parents_string.split("|"): for parent in parents_string.split("|"):
parents.add(parent) parents.add(parent)
for element in getParents(sim_entity.get_file('simulation/templates/', parent + ".xml", 'public')): for element in getParents(
sim_entity.get_file("simulation/templates/", parent + ".xml", "public")
):
parents.add(element) parents.add(element)
return parents return parents
@ -135,13 +141,14 @@ def getParents(template_name):
def ExtractValue(value): def ExtractValue(value):
return float(value.text) if value is not None else 0.0 return float(value.text) if value is not None else 0.0
# This function checks that a template has the given parent. # This function checks that a template has the given parent.
def hasParentTemplate(template_name, parentName): def hasParentTemplate(template_name, parentName):
return any(parentName == parent + '.xml' for parent in getParents(template_name)) return any(parentName == parent + ".xml" for parent in getParents(template_name))
def CalcUnit(UnitName, existingUnit=None): def CalcUnit(UnitName, existingUnit=None):
if existingUnit != None: if existingUnit is not None:
unit = existingUnit unit = existingUnit
else: else:
unit = { unit = {
@ -188,23 +195,23 @@ def CalcUnit(UnitName, existingUnit=None):
for type in list(resource_cost): for type in list(resource_cost):
unit["Cost"][type.tag] = ExtractValue(type) unit["Cost"][type.tag] = ExtractValue(type)
if Template.find("./Attack/Melee") is not None:
if Template.find("./Attack/Melee") != None:
unit["RepeatRate"]["Melee"] = ExtractValue(Template.find("./Attack/Melee/RepeatTime")) unit["RepeatRate"]["Melee"] = ExtractValue(Template.find("./Attack/Melee/RepeatTime"))
unit["PrepRate"]["Melee"] = ExtractValue(Template.find("./Attack/Melee/PrepareTime")) unit["PrepRate"]["Melee"] = ExtractValue(Template.find("./Attack/Melee/PrepareTime"))
for atttype in AttackTypes: for atttype in AttackTypes:
unit["Attack"]["Melee"][atttype] = ExtractValue( Template.find("./Attack/Melee/Damage/" + atttype)) unit["Attack"]["Melee"][atttype] = ExtractValue(
Template.find("./Attack/Melee/Damage/" + atttype)
)
attack_melee_bonus = Template.find("./Attack/Melee/Bonuses") attack_melee_bonus = Template.find("./Attack/Melee/Bonuses")
if attack_melee_bonus is not None: if attack_melee_bonus is not None:
for Bonus in attack_melee_bonus: for Bonus in attack_melee_bonus:
Against = [] Against = []
CivAg = [] CivAg = []
if Bonus.find("Classes") != None \ if Bonus.find("Classes") is not None and Bonus.find("Classes").text is not None:
and Bonus.find("Classes").text != None:
Against = Bonus.find("Classes").text.split(" ") Against = Bonus.find("Classes").text.split(" ")
if Bonus.find("Civ") != None and Bonus.find("Civ").text != None: if Bonus.find("Civ") is not None and Bonus.find("Civ").text is not None:
CivAg = Bonus.find("Civ").text.split(" ") CivAg = Bonus.find("Civ").text.split(" ")
Val = float(Bonus.find("Multiplier").text) Val = float(Bonus.find("Multiplier").text)
unit["AttackBonuses"][Bonus.tag] = { unit["AttackBonuses"][Bonus.tag] = {
@ -223,7 +230,7 @@ def CalcUnit(UnitName, existingUnit=None):
unit["Restricted"].pop(newClasses.index(elem)) unit["Restricted"].pop(newClasses.index(elem))
unit["Restricted"] += newClasses unit["Restricted"] += newClasses
elif Template.find("./Attack/Ranged") != None: elif Template.find("./Attack/Ranged") is not None:
unit["Ranged"] = True unit["Ranged"] = True
unit["Range"] = ExtractValue(Template.find("./Attack/Ranged/MaxRange")) unit["Range"] = ExtractValue(Template.find("./Attack/Ranged/MaxRange"))
unit["Spread"] = ExtractValue(Template.find("./Attack/Ranged/Projectile/Spread")) unit["Spread"] = ExtractValue(Template.find("./Attack/Ranged/Projectile/Spread"))
@ -231,16 +238,17 @@ def CalcUnit(UnitName, existingUnit=None):
unit["PrepRate"]["Ranged"] = ExtractValue(Template.find("./Attack/Ranged/PrepareTime")) unit["PrepRate"]["Ranged"] = ExtractValue(Template.find("./Attack/Ranged/PrepareTime"))
for atttype in AttackTypes: for atttype in AttackTypes:
unit["Attack"]["Ranged"][atttype] = ExtractValue(Template.find("./Attack/Ranged/Damage/" + atttype) ) unit["Attack"]["Ranged"][atttype] = ExtractValue(
Template.find("./Attack/Ranged/Damage/" + atttype)
)
if Template.find("./Attack/Ranged/Bonuses") != None: if Template.find("./Attack/Ranged/Bonuses") is not None:
for Bonus in Template.find("./Attack/Ranged/Bonuses"): for Bonus in Template.find("./Attack/Ranged/Bonuses"):
Against = [] Against = []
CivAg = [] CivAg = []
if Bonus.find("Classes") != None \ if Bonus.find("Classes") is not None and Bonus.find("Classes").text is not None:
and Bonus.find("Classes").text != None:
Against = Bonus.find("Classes").text.split(" ") Against = Bonus.find("Classes").text.split(" ")
if Bonus.find("Civ") != None and Bonus.find("Civ").text != None: if Bonus.find("Civ") is not None and Bonus.find("Civ").text is not None:
CivAg = Bonus.find("Civ").text.split(" ") CivAg = Bonus.find("Civ").text.split(" ")
Val = float(Bonus.find("Multiplier").text) Val = float(Bonus.find("Multiplier").text)
unit["AttackBonuses"][Bonus.tag] = { unit["AttackBonuses"][Bonus.tag] = {
@ -248,9 +256,8 @@ def CalcUnit(UnitName, existingUnit=None):
"Civs": CivAg, "Civs": CivAg,
"Multiplier": Val, "Multiplier": Val,
} }
if Template.find("./Attack/Melee/RestrictedClasses") != None: if Template.find("./Attack/Melee/RestrictedClasses") is not None:
newClasses = Template.find("./Attack/Melee/RestrictedClasses")\ newClasses = Template.find("./Attack/Melee/RestrictedClasses").text.split(" ")
.text.split(" ")
for elem in newClasses: for elem in newClasses:
if elem.find("-") != -1: if elem.find("-") != -1:
newClasses.pop(newClasses.index(elem)) newClasses.pop(newClasses.index(elem))
@ -258,19 +265,17 @@ def CalcUnit(UnitName, existingUnit=None):
unit["Restricted"].pop(newClasses.index(elem)) unit["Restricted"].pop(newClasses.index(elem))
unit["Restricted"] += newClasses unit["Restricted"] += newClasses
if Template.find("Resistance") != None: if Template.find("Resistance") is not None:
for atttype in AttackTypes: for atttype in AttackTypes:
unit["Resistance"][atttype] = ExtractValue(Template.find( unit["Resistance"][atttype] = ExtractValue(
"./Resistance/Entity/Damage/" + atttype Template.find("./Resistance/Entity/Damage/" + atttype)
)) )
if Template.find("./UnitMotion") is not None:
if Template.find("./UnitMotion/WalkSpeed") is not None:
if Template.find("./UnitMotion") != None:
if Template.find("./UnitMotion/WalkSpeed") != None:
unit["WalkSpeed"] = ExtractValue(Template.find("./UnitMotion/WalkSpeed")) unit["WalkSpeed"] = ExtractValue(Template.find("./UnitMotion/WalkSpeed"))
if Template.find("./Identity/VisibleClasses") != None: if Template.find("./Identity/VisibleClasses") is not None:
newClasses = Template.find("./Identity/VisibleClasses").text.split(" ") newClasses = Template.find("./Identity/VisibleClasses").text.split(" ")
for elem in newClasses: for elem in newClasses:
if elem.find("-") != -1: if elem.find("-") != -1:
@ -279,7 +284,7 @@ def CalcUnit(UnitName, existingUnit=None):
unit["Classes"].pop(newClasses.index(elem)) unit["Classes"].pop(newClasses.index(elem))
unit["Classes"] += newClasses unit["Classes"] += newClasses
if Template.find("./Identity/Classes") != None: if Template.find("./Identity/Classes") is not None:
newClasses = Template.find("./Identity/Classes").text.split(" ") newClasses = Template.find("./Identity/Classes").text.split(" ")
for elem in newClasses: for elem in newClasses:
if elem.find("-") != -1: if elem.find("-") != -1:
@ -308,28 +313,23 @@ def WriteUnit(Name, UnitDict):
+ "%</td>" + "%</td>"
) )
attType = "Ranged" if UnitDict["Ranged"] == True else "Melee" attType = "Ranged" if UnitDict["Ranged"] is True else "Melee"
if UnitDict["RepeatRate"][attType] != "0": if UnitDict["RepeatRate"][attType] != "0":
for atype in AttackTypes: for atype in AttackTypes:
repeatTime = float(UnitDict["RepeatRate"][attType]) / 1000.0 repeatTime = float(UnitDict["RepeatRate"][attType]) / 1000.0
ret += ( ret += (
"<td>" "<td>"
+ str("%.1f" % ( + str("%.1f" % (float(UnitDict["Attack"][attType][atype]) / repeatTime))
float(UnitDict["Attack"][attType][atype]) / repeatTime + "</td>"
)) + "</td>"
) )
ret += ( ret += "<td>" + str("%.1f" % (float(UnitDict["RepeatRate"][attType]) / 1000.0)) + "</td>"
"<td>"
+ str("%.1f" % (float(UnitDict["RepeatRate"][attType]) / 1000.0))
+ "</td>"
)
else: else:
for atype in AttackTypes: for atype in AttackTypes:
ret += "<td> - </td>" ret += "<td> - </td>"
ret += "<td> - </td>" ret += "<td> - </td>"
if UnitDict["Ranged"] == True and UnitDict["Range"] > 0: if UnitDict["Ranged"] is True and UnitDict["Range"] > 0:
ret += "<td>" + str("%.1f" % float(UnitDict["Range"])) + "</td>" ret += "<td>" + str("%.1f" % float(UnitDict["Range"])) + "</td>"
spread = float(UnitDict["Spread"]) spread = float(UnitDict["Spread"])
ret += "<td>" + str("%.1f" % spread) + "</td>" ret += "<td>" + str("%.1f" % spread) + "</td>"
@ -337,11 +337,9 @@ def WriteUnit(Name, UnitDict):
ret += "<td> - </td><td> - </td>" ret += "<td> - </td><td> - </td>"
for rtype in Resources: for rtype in Resources:
ret += "<td>" + str("%.0f" % ret += "<td>" + str("%.0f" % float(UnitDict["Cost"][rtype])) + "</td>"
float(UnitDict["Cost"][rtype])) + "</td>"
ret += "<td>" + str("%.0f" % ret += "<td>" + str("%.0f" % float(UnitDict["Cost"]["population"])) + "</td>"
float(UnitDict["Cost"]["population"])) + "</td>"
ret += '<td style="text-align:left;">' ret += '<td style="text-align:left;">'
for Bonus in UnitDict["AttackBonuses"]: for Bonus in UnitDict["AttackBonuses"]:
@ -362,11 +360,11 @@ def SortFn(A):
sortVal += 1 sortVal += 1
if classe in A[1]["Classes"]: if classe in A[1]["Classes"]:
break break
if ComparativeSortByChamp == True and A[0].find("champion") == -1: if ComparativeSortByChamp is True and A[0].find("champion") == -1:
sortVal -= 20 sortVal -= 20
if ComparativeSortByCav == True and A[0].find("cavalry") == -1: if ComparativeSortByCav is True and A[0].find("cavalry") == -1:
sortVal -= 10 sortVal -= 10
if A[1]["Civ"] != None and A[1]["Civ"] in Civs: if A[1]["Civ"] is not None and A[1]["Civ"] in Civs:
sortVal += 100 * Civs.index(A[1]["Civ"]) sortVal += 100 * Civs.index(A[1]["Civ"])
return sortVal return sortVal
@ -403,9 +401,7 @@ def WriteColouredDiff(file, diff, isChanged):
file.write( file.write(
"""<td><span style="color:rgb({});">{}</span></td> """<td><span style="color:rgb({});">{}</span></td>
""".format( """.format(rgb_str, cleverParse(diff))
rgb_str, cleverParse(diff)
)
) )
return isChanged return isChanged
@ -413,10 +409,14 @@ def WriteColouredDiff(file, diff, isChanged):
def computeUnitEfficiencyDiff(TemplatesByParent, Civs): def computeUnitEfficiencyDiff(TemplatesByParent, Civs):
efficiency_table = {} efficiency_table = {}
for parent in TemplatesByParent: for parent in TemplatesByParent:
for template in [template for template in TemplatesByParent[parent] if template[1]["Civ"] not in Civs]: for template in [
template for template in TemplatesByParent[parent] if template[1]["Civ"] not in Civs
]:
print(template) print(template)
TemplatesByParent[parent] = [template for template in TemplatesByParent[parent] if template[1]["Civ"] in Civs] TemplatesByParent[parent] = [
template for template in TemplatesByParent[parent] if template[1]["Civ"] in Civs
]
TemplatesByParent[parent].sort(key=lambda x: Civs.index(x[1]["Civ"])) TemplatesByParent[parent].sort(key=lambda x: Civs.index(x[1]["Civ"]))
for tp in TemplatesByParent[parent]: for tp in TemplatesByParent[parent]:
@ -426,15 +426,11 @@ def computeUnitEfficiencyDiff(TemplatesByParent, Civs):
efficiency_table[(parent, tp[0], "HP")] = diff efficiency_table[(parent, tp[0], "HP")] = diff
# Build Time # Build Time
diff = +1j + (int(tp[1]["BuildTime"]) - diff = +1j + (int(tp[1]["BuildTime"]) - int(templates[parent]["BuildTime"]))
int(templates[parent]["BuildTime"]))
efficiency_table[(parent, tp[0], "BuildTime")] = diff efficiency_table[(parent, tp[0], "BuildTime")] = diff
# walk speed # walk speed
diff = -1j + ( diff = -1j + (float(tp[1]["WalkSpeed"]) - float(templates[parent]["WalkSpeed"]))
float(tp[1]["WalkSpeed"]) -
float(templates[parent]["WalkSpeed"])
)
efficiency_table[(parent, tp[0], "WalkSpeed")] = diff efficiency_table[(parent, tp[0], "WalkSpeed")] = diff
# Resistance # Resistance
@ -446,54 +442,42 @@ def computeUnitEfficiencyDiff(TemplatesByParent, Civs):
efficiency_table[(parent, tp[0], "Resistance/" + atype)] = diff efficiency_table[(parent, tp[0], "Resistance/" + atype)] = diff
# Attack types (DPS) and rate. # Attack types (DPS) and rate.
attType = "Ranged" if tp[1]["Ranged"] == True else "Melee" attType = "Ranged" if tp[1]["Ranged"] is True else "Melee"
if tp[1]["RepeatRate"][attType] != "0": if tp[1]["RepeatRate"][attType] != "0":
for atype in AttackTypes: for atype in AttackTypes:
myDPS = float(tp[1]["Attack"][attType][atype]) / ( myDPS = float(tp[1]["Attack"][attType][atype]) / (
float(tp[1]["RepeatRate"][attType]) / 1000.0 float(tp[1]["RepeatRate"][attType]) / 1000.0
) )
parentDPS = float( parentDPS = float(templates[parent]["Attack"][attType][atype]) / (
templates[parent]["Attack"][attType][atype]) / (
float(templates[parent]["RepeatRate"][attType]) / 1000.0 float(templates[parent]["RepeatRate"][attType]) / 1000.0
) )
diff = -1j + (myDPS - parentDPS) diff = -1j + (myDPS - parentDPS)
efficiency_table[ efficiency_table[(parent, tp[0], "Attack/" + attType + "/" + atype)] = diff
(parent, tp[0], "Attack/" + attType + "/" + atype)
] = diff
diff = -1j + ( diff = -1j + (
float(tp[1]["RepeatRate"][attType]) / 1000.0 float(tp[1]["RepeatRate"][attType]) / 1000.0
- float(templates[parent]["RepeatRate"][attType]) / 1000.0 - float(templates[parent]["RepeatRate"][attType]) / 1000.0
) )
efficiency_table[ efficiency_table[
(parent, tp[0], "Attack/" + attType + "/" + atype + (parent, tp[0], "Attack/" + attType + "/" + atype + "/RepeatRate")
"/RepeatRate")
] = diff ] = diff
# range and spread # range and spread
if tp[1]["Ranged"] == True: if tp[1]["Ranged"] is True:
diff = -1j + ( diff = -1j + (float(tp[1]["Range"]) - float(templates[parent]["Range"]))
float(tp[1]["Range"]) - efficiency_table[(parent, tp[0], "Attack/" + attType + "/Ranged/Range")] = diff
float(templates[parent]["Range"])
)
efficiency_table[
(parent, tp[0], "Attack/" + attType + "/Ranged/Range")
] = diff
diff = (float(tp[1]["Spread"]) - diff = float(tp[1]["Spread"]) - float(templates[parent]["Spread"])
float(templates[parent]["Spread"])) efficiency_table[(parent, tp[0], "Attack/" + attType + "/Ranged/Spread")] = (
efficiency_table[ diff
(parent, tp[0], "Attack/" + attType + "/Ranged/Spread") )
] = diff
for rtype in Resources: for rtype in Resources:
diff = +1j + ( diff = +1j + (
float(tp[1]["Cost"][rtype]) float(tp[1]["Cost"][rtype]) - float(templates[parent]["Cost"][rtype])
- float(templates[parent]["Cost"][rtype])
) )
efficiency_table[(parent, tp[0], "Resources/" + rtype)] = diff efficiency_table[(parent, tp[0], "Resources/" + rtype)] = diff
diff = +1j + ( diff = +1j + (
float(tp[1]["Cost"]["population"]) float(tp[1]["Cost"]["population"]) - float(templates[parent]["Cost"]["population"])
- float(templates[parent]["Cost"]["population"])
) )
efficiency_table[(parent, tp[0], "Population")] = diff efficiency_table[(parent, tp[0], "Population")] = diff
@ -512,7 +496,7 @@ def computeTemplates(LoadTemplatesIfParent):
if hasParentTemplate(template, possParent): if hasParentTemplate(template, possParent):
found = True found = True
break break
if found == True: if found is True:
templates[template] = CalcUnit(template) templates[template] = CalcUnit(template)
os.chdir(pwd) os.chdir(pwd)
return templates return templates
@ -541,7 +525,6 @@ def computeCivTemplates(template: dict, Civs: list):
civ_list = list(glob.glob("units/" + Civ + "/*.xml")) civ_list = list(glob.glob("units/" + Civ + "/*.xml"))
for template in civ_list: for template in civ_list:
if os.path.isfile(template): if os.path.isfile(template):
# filter based on FilterOut # filter based on FilterOut
breakIt = False breakIt = False
for filter in FilterOut: for filter in FilterOut:
@ -601,17 +584,14 @@ CivTemplates = computeCivTemplates(templates, Civs)
TemplatesByParent = computeTemplatesByParent(templates, Civs, CivTemplates) TemplatesByParent = computeTemplatesByParent(templates, Civs, CivTemplates)
# Not used; use it for your own custom analysis # Not used; use it for your own custom analysis
efficiencyTable = computeUnitEfficiencyDiff( efficiencyTable = computeUnitEfficiencyDiff(TemplatesByParent, Civs)
TemplatesByParent, Civs
)
############################################################ ############################################################
def writeHTML(): def writeHTML():
"""Create the HTML file""" """Create the HTML file"""
f = open( f = open(
os.path.realpath(__file__).replace("unitTables.py", "") os.path.realpath(__file__).replace("unitTables.py", "") + "unit_summary_table.html",
+ "unit_summary_table.html",
"w", "w",
) )
@ -699,10 +679,7 @@ differences between the two.
TemplatesByParent[parent].sort(key=lambda x: Civs.index(x[1]["Civ"])) TemplatesByParent[parent].sort(key=lambda x: Civs.index(x[1]["Civ"]))
for tp in TemplatesByParent[parent]: for tp in TemplatesByParent[parent]:
isChanged = False isChanged = False
ff = open( ff = open(os.path.realpath(__file__).replace("unitTables.py", "") + ".cache", "w")
os.path.realpath(__file__).replace("unitTables.py", "") +
".cache", "w"
)
ff.write("<tr>") ff.write("<tr>")
ff.write( ff.write(
@ -711,9 +688,7 @@ differences between the two.
+ "</th>" + "</th>"
) )
ff.write( ff.write(
'<td class="Sub">' '<td class="Sub">' + tp[0].replace(".xml", "").replace("units/", "") + "</td>"
+ tp[0].replace(".xml", "").replace("units/", "")
+ "</td>"
) )
# HP # HP
@ -721,15 +696,11 @@ differences between the two.
isChanged = WriteColouredDiff(ff, diff, isChanged) isChanged = WriteColouredDiff(ff, diff, isChanged)
# Build Time # Build Time
diff = +1j + (int(tp[1]["BuildTime"]) - diff = +1j + (int(tp[1]["BuildTime"]) - int(templates[parent]["BuildTime"]))
int(templates[parent]["BuildTime"]))
isChanged = WriteColouredDiff(ff, diff, isChanged) isChanged = WriteColouredDiff(ff, diff, isChanged)
# walk speed # walk speed
diff = -1j + ( diff = -1j + (float(tp[1]["WalkSpeed"]) - float(templates[parent]["WalkSpeed"]))
float(tp[1]["WalkSpeed"]) -
float(templates[parent]["WalkSpeed"])
)
isChanged = WriteColouredDiff(ff, diff, isChanged) isChanged = WriteColouredDiff(ff, diff, isChanged)
# Resistance # Resistance
@ -741,19 +712,16 @@ differences between the two.
isChanged = WriteColouredDiff(ff, diff, isChanged) isChanged = WriteColouredDiff(ff, diff, isChanged)
# Attack types (DPS) and rate. # Attack types (DPS) and rate.
attType = "Ranged" if tp[1]["Ranged"] == True else "Melee" attType = "Ranged" if tp[1]["Ranged"] is True else "Melee"
if tp[1]["RepeatRate"][attType] != "0": if tp[1]["RepeatRate"][attType] != "0":
for atype in AttackTypes: for atype in AttackTypes:
myDPS = float(tp[1]["Attack"][attType][atype]) / ( myDPS = float(tp[1]["Attack"][attType][atype]) / (
float(tp[1]["RepeatRate"][attType]) / 1000.0 float(tp[1]["RepeatRate"][attType]) / 1000.0
) )
parentDPS = float( parentDPS = float(templates[parent]["Attack"][attType][atype]) / (
templates[parent]["Attack"][attType][atype]) / (
float(templates[parent]["RepeatRate"][attType]) / 1000.0 float(templates[parent]["RepeatRate"][attType]) / 1000.0
) )
isChanged = WriteColouredDiff( isChanged = WriteColouredDiff(ff, -1j + (myDPS - parentDPS), isChanged)
ff, -1j + (myDPS - parentDPS), isChanged
)
isChanged = WriteColouredDiff( isChanged = WriteColouredDiff(
ff, ff,
-1j -1j
@ -764,32 +732,26 @@ differences between the two.
isChanged, isChanged,
) )
# range and spread # range and spread
if tp[1]["Ranged"] == True: if tp[1]["Ranged"] is True:
isChanged = WriteColouredDiff( isChanged = WriteColouredDiff(
ff, ff,
-1j -1j + (float(tp[1]["Range"]) - float(templates[parent]["Range"])),
+ (float(tp[1]["Range"]) -
float(templates[parent]["Range"])),
isChanged, isChanged,
) )
mySpread = float(tp[1]["Spread"]) mySpread = float(tp[1]["Spread"])
parentSpread = float(templates[parent]["Spread"]) parentSpread = float(templates[parent]["Spread"])
isChanged = WriteColouredDiff( isChanged = WriteColouredDiff(ff, +1j + (mySpread - parentSpread), isChanged)
ff, +1j + (mySpread - parentSpread), isChanged
)
else: else:
ff.write("<td><span style='color:rgb(200,200,200);'>-</span></td><td><span style='color:rgb(200,200,200);'>-</span></td>") ff.write(
"<td><span style='color:rgb(200,200,200);'>-</span></td><td><span style='color:rgb(200,200,200);'>-</span></td>"
)
else: else:
ff.write("<td></td><td></td><td></td><td></td><td></td><td></td>") ff.write("<td></td><td></td><td></td><td></td><td></td><td></td>")
for rtype in Resources: for rtype in Resources:
isChanged = WriteColouredDiff( isChanged = WriteColouredDiff(
ff, ff,
+1j +1j + (float(tp[1]["Cost"][rtype]) - float(templates[parent]["Cost"][rtype])),
+ (
float(tp[1]["Cost"][rtype])
- float(templates[parent]["Cost"][rtype])
),
isChanged, isChanged,
) )
@ -808,8 +770,7 @@ differences between the two.
ff.close() # to actually write into the file ff.close() # to actually write into the file
with open( with open(
os.path.realpath(__file__).replace("unitTables.py", "") + os.path.realpath(__file__).replace("unitTables.py", "") + ".cache", "r"
".cache", "r"
) as ff: ) as ff:
unitStr = ff.read() unitStr = ff.read()

View File

@ -1,13 +1,14 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from argparse import ArgumentParser from argparse import ArgumentParser
from pathlib import Path from pathlib import Path
from os.path import sep, join, realpath, exists, basename, dirname from os.path import join, realpath, exists, dirname
from json import load, loads from json import load
from re import split, match from re import match
from logging import getLogger, StreamHandler, INFO, WARNING, Filter, Formatter from logging import getLogger, StreamHandler, INFO, WARNING, Filter, Formatter
import lxml.etree import lxml.etree
import sys import sys
class SingleLevelFilter(Filter): class SingleLevelFilter(Filter):
def __init__(self, passlevel, reject): def __init__(self, passlevel, reject):
self.passlevel = passlevel self.passlevel = passlevel
@ -15,15 +16,17 @@ class SingleLevelFilter(Filter):
def filter(self, record): def filter(self, record):
if self.reject: if self.reject:
return (record.levelno != self.passlevel) return record.levelno != self.passlevel
else: else:
return (record.levelno == self.passlevel) return record.levelno == self.passlevel
class VFS_File: class VFS_File:
def __init__(self, mod_name, vfs_path): def __init__(self, mod_name, vfs_path):
self.mod_name = mod_name self.mod_name = mod_name
self.vfs_path = vfs_path self.vfs_path = vfs_path
class RelaxNGValidator: class RelaxNGValidator:
def __init__(self, vfs_root, mods=None, verbose=False): def __init__(self, vfs_root, mods=None, verbose=False):
self.mods = mods if mods is not None else [] self.mods = mods if mods is not None else []
@ -38,17 +41,18 @@ class RelaxNGValidator:
# create a console handler, seems nicer to Windows and for future uses # create a console handler, seems nicer to Windows and for future uses
ch = StreamHandler(sys.stdout) ch = StreamHandler(sys.stdout)
ch.setLevel(INFO) ch.setLevel(INFO)
ch.setFormatter(Formatter('%(levelname)s - %(message)s')) ch.setFormatter(Formatter("%(levelname)s - %(message)s"))
f1 = SingleLevelFilter(INFO, False) f1 = SingleLevelFilter(INFO, False)
ch.addFilter(f1) ch.addFilter(f1)
logger.addHandler(ch) logger.addHandler(ch)
errorch = StreamHandler(sys.stderr) errorch = StreamHandler(sys.stderr)
errorch.setLevel(WARNING) errorch.setLevel(WARNING)
errorch.setFormatter(Formatter('%(levelname)s - %(message)s')) errorch.setFormatter(Formatter("%(levelname)s - %(message)s"))
logger.addHandler(errorch) logger.addHandler(errorch)
self.logger = logger self.logger = logger
self.inError = False
def run (self): def run(self):
self.validate_actors() self.validate_actors()
self.validate_variants() self.validate_variants()
self.validate_guis() self.validate_guis()
@ -59,13 +63,14 @@ class RelaxNGValidator:
self.validate_soundgroups() self.validate_soundgroups()
self.validate_terrains() self.validate_terrains()
self.validate_textures() self.validate_textures()
return self.inError
def main(self): def main(self):
""" Program entry point, parses command line arguments and launches the validation """ """Program entry point, parses command line arguments and launches the validation"""
# ordered uniq mods (dict maintains ordered keys from python 3.6) # ordered uniq mods (dict maintains ordered keys from python 3.6)
self.logger.info(f"Checking {'|'.join(self.mods)}'s integrity.") self.logger.info(f"Checking {'|'.join(self.mods)}'s integrity.")
self.logger.info(f"The following mods will be loaded: {'|'.join(self.mods)}.") self.logger.info(f"The following mods will be loaded: {'|'.join(self.mods)}.")
self.run() return self.run()
def find_files(self, vfs_root, mods, vfs_path, *ext_list): def find_files(self, vfs_root, mods, vfs_path, *ext_list):
""" """
@ -73,88 +78,115 @@ class RelaxNGValidator:
- Path relative to the mod base - Path relative to the mod base
- full Path - full Path
""" """
full_exts = ['.' + ext for ext in ext_list] full_exts = ["." + ext for ext in ext_list]
def find_recursive(dp, base): def find_recursive(dp, base):
"""(relative Path, full Path) generator""" """(relative Path, full Path) generator"""
if dp.is_dir(): if dp.is_dir():
if dp.name != '.svn' and dp.name != '.git' and not dp.name.endswith('~'): if dp.name != ".svn" and dp.name != ".git" and not dp.name.endswith("~"):
for fp in dp.iterdir(): for fp in dp.iterdir():
yield from find_recursive(fp, base) yield from find_recursive(fp, base)
elif dp.suffix in full_exts: elif dp.suffix in full_exts:
relative_file_path = dp.relative_to(base) relative_file_path = dp.relative_to(base)
yield (relative_file_path, dp.resolve()) yield (relative_file_path, dp.resolve())
return [(rp, fp) for mod in mods for (rp, fp) in find_recursive(vfs_root / mod / vfs_path, vfs_root / mod)]
return [
(rp, fp)
for mod in mods
for (rp, fp) in find_recursive(vfs_root / mod / vfs_path, vfs_root / mod)
]
def validate_actors(self): def validate_actors(self):
self.logger.info('Validating actors...') self.logger.info("Validating actors...")
files = self.find_files(self.vfs_root, self.mods, 'art/actors/', 'xml') files = self.find_files(self.vfs_root, self.mods, "art/actors/", "xml")
self.validate_files('actors', files, 'art/actors/actor.rng') self.validate_files("actors", files, "art/actors/actor.rng")
def validate_variants(self): def validate_variants(self):
self.logger.info("Validating variants...") self.logger.info("Validating variants...")
files = self.find_files(self.vfs_root, self.mods, 'art/variants/', 'xml') files = self.find_files(self.vfs_root, self.mods, "art/variants/", "xml")
self.validate_files('variant', files, 'art/variants/variant.rng') self.validate_files("variant", files, "art/variants/variant.rng")
def validate_guis(self): def validate_guis(self):
self.logger.info("Validating gui files...") self.logger.info("Validating gui files...")
pages = [file for file in self.find_files(self.vfs_root, self.mods, 'gui/', 'xml') if match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(file[0]))] pages = [
self.validate_files('gui page', pages, 'gui/gui_page.rng') file
xmls = [file for file in self.find_files(self.vfs_root, self.mods, 'gui/', 'xml') if not match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(file[0]))] for file in self.find_files(self.vfs_root, self.mods, "gui/", "xml")
self.validate_files('gui xml', xmls, 'gui/gui.rng') if match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(file[0]))
]
self.validate_files("gui page", pages, "gui/gui_page.rng")
xmls = [
file
for file in self.find_files(self.vfs_root, self.mods, "gui/", "xml")
if not match(r".*[\\\/]page(_[^.\/\\]+)?\.xml$", str(file[0]))
]
self.validate_files("gui xml", xmls, "gui/gui.rng")
def validate_maps(self): def validate_maps(self):
self.logger.info("Validating maps...") self.logger.info("Validating maps...")
files = self.find_files(self.vfs_root, self.mods, 'maps/scenarios/', 'xml') files = self.find_files(self.vfs_root, self.mods, "maps/scenarios/", "xml")
self.validate_files('map', files, 'maps/scenario.rng') self.validate_files("map", files, "maps/scenario.rng")
files = self.find_files(self.vfs_root, self.mods, 'maps/skirmishes/', 'xml') files = self.find_files(self.vfs_root, self.mods, "maps/skirmishes/", "xml")
self.validate_files('map', files, 'maps/scenario.rng') self.validate_files("map", files, "maps/scenario.rng")
def validate_materials(self): def validate_materials(self):
self.logger.info("Validating materials...") self.logger.info("Validating materials...")
files = self.find_files(self.vfs_root, self.mods, 'art/materials/', 'xml') files = self.find_files(self.vfs_root, self.mods, "art/materials/", "xml")
self.validate_files('material', files, 'art/materials/material.rng') self.validate_files("material", files, "art/materials/material.rng")
def validate_particles(self): def validate_particles(self):
self.logger.info("Validating particles...") self.logger.info("Validating particles...")
files = self.find_files(self.vfs_root, self.mods, 'art/particles/', 'xml') files = self.find_files(self.vfs_root, self.mods, "art/particles/", "xml")
self.validate_files('particle', files, 'art/particles/particle.rng') self.validate_files("particle", files, "art/particles/particle.rng")
def validate_simulation(self): def validate_simulation(self):
self.logger.info("Validating simulation...") self.logger.info("Validating simulation...")
file = self.find_files(self.vfs_root, self.mods, 'simulation/data/pathfinder', 'xml') file = self.find_files(self.vfs_root, self.mods, "simulation/data/pathfinder", "xml")
self.validate_files('pathfinder', file, 'simulation/data/pathfinder.rng') self.validate_files("pathfinder", file, "simulation/data/pathfinder.rng")
file = self.find_files(self.vfs_root, self.mods, 'simulation/data/territorymanager', 'xml') file = self.find_files(self.vfs_root, self.mods, "simulation/data/territorymanager", "xml")
self.validate_files('territory manager', file, 'simulation/data/territorymanager.rng') self.validate_files("territory manager", file, "simulation/data/territorymanager.rng")
def validate_soundgroups(self): def validate_soundgroups(self):
self.logger.info("Validating soundgroups...") self.logger.info("Validating soundgroups...")
files = self.find_files(self.vfs_root, self.mods, 'audio/', 'xml') files = self.find_files(self.vfs_root, self.mods, "audio/", "xml")
self.validate_files('sound group', files, 'audio/sound_group.rng') self.validate_files("sound group", files, "audio/sound_group.rng")
def validate_terrains(self): def validate_terrains(self):
self.logger.info("Validating terrains...") self.logger.info("Validating terrains...")
terrains = [file for file in self.find_files(self.vfs_root, self.mods, 'art/terrains/', 'xml') if 'terrains.xml' in str(file[0])] terrains = [
self.validate_files('terrain', terrains, 'art/terrains/terrain.rng') file
terrains_textures = [file for file in self.find_files(self.vfs_root, self.mods, 'art/terrains/', 'xml') if 'terrains.xml' not in str(file[0])] for file in self.find_files(self.vfs_root, self.mods, "art/terrains/", "xml")
self.validate_files('terrain texture', terrains_textures, 'art/terrains/terrain_texture.rng') if "terrains.xml" in str(file[0])
]
self.validate_files("terrain", terrains, "art/terrains/terrain.rng")
terrains_textures = [
file
for file in self.find_files(self.vfs_root, self.mods, "art/terrains/", "xml")
if "terrains.xml" not in str(file[0])
]
self.validate_files(
"terrain texture", terrains_textures, "art/terrains/terrain_texture.rng"
)
def validate_textures(self): def validate_textures(self):
self.logger.info("Validating textures...") self.logger.info("Validating textures...")
files = [file for file in self.find_files(self.vfs_root, self.mods, 'art/textures/', 'xml') if 'textures.xml' in str(file[0])] files = [
self.validate_files('texture', files, 'art/textures/texture.rng') file
for file in self.find_files(self.vfs_root, self.mods, "art/textures/", "xml")
if "textures.xml" in str(file[0])
]
self.validate_files("texture", files, "art/textures/texture.rng")
def get_physical_path(self, mod_name, vfs_path): def get_physical_path(self, mod_name, vfs_path):
return realpath(join(self.vfs_root, mod_name, vfs_path)) return realpath(join(self.vfs_root, mod_name, vfs_path))
def get_relaxng_file(self, schemapath): def get_relaxng_file(self, schemapath):
"""We look for the highest priority mod relax NG file""" """We look for the highest priority mod relax NG file"""
for mod in self.mods: for mod in self.mods:
relax_ng_path = self.get_physical_path(mod, schemapath) relax_ng_path = self.get_physical_path(mod, schemapath)
if exists(relax_ng_path): if exists(relax_ng_path):
return relax_ng_path return relax_ng_path
return "" return ""
def validate_files(self, name, files, schemapath): def validate_files(self, name, files, schemapath):
relax_ng_path = self.get_relaxng_file(schemapath) relax_ng_path = self.get_relaxng_file(schemapath)
@ -177,31 +209,46 @@ class RelaxNGValidator:
self.logger.info(f"{error_count} {name} validation errors") self.logger.info(f"{error_count} {name} validation errors")
elif error_count > 0: elif error_count > 0:
self.logger.error(f"{error_count} {name} validation errors") self.logger.error(f"{error_count} {name} validation errors")
self.inError = True
def get_mod_dependencies(vfs_root, *mods): def get_mod_dependencies(vfs_root, *mods):
modjsondeps = [] modjsondeps = []
for mod in mods: for mod in mods:
mod_json_path = Path(vfs_root) / mod / 'mod.json' mod_json_path = Path(vfs_root) / mod / "mod.json"
if not exists(mod_json_path): if not exists(mod_json_path):
continue continue
with open(mod_json_path, encoding='utf-8') as f: with open(mod_json_path, encoding="utf-8") as f:
modjson = load(f) modjson = load(f)
# 0ad's folder isn't named like the mod. # 0ad's folder isn't named like the mod.
modjsondeps.extend(['public' if '0ad' in dep else dep for dep in modjson.get('dependencies', [])]) modjsondeps.extend(
["public" if "0ad" in dep else dep for dep in modjson.get("dependencies", [])]
)
return modjsondeps return modjsondeps
if __name__ == '__main__':
if __name__ == "__main__":
script_dir = dirname(realpath(__file__)) script_dir = dirname(realpath(__file__))
default_root = join(script_dir, '..', '..', '..', 'binaries', 'data', 'mods') default_root = join(script_dir, "..", "..", "..", "binaries", "data", "mods")
ap = ArgumentParser(description="Validates XML files againt their Relax NG schemas") ap = ArgumentParser(description="Validates XML files againt their Relax NG schemas")
ap.add_argument('-r', '--root', action='store', dest='root', default=default_root) ap.add_argument("-r", "--root", action="store", dest="root", default=default_root)
ap.add_argument('-v', '--verbose', action='store_true', default=True, ap.add_argument(
help="Log validation errors.") "-v", "--verbose", action="store_true", default=True, help="Log validation errors."
ap.add_argument('-m', '--mods', metavar="MOD", dest='mods', nargs='+', default=['public'], )
help="specify which mods to check. Default to public and mod.") ap.add_argument(
"-m",
"--mods",
metavar="MOD",
dest="mods",
nargs="+",
default=["public"],
help="specify which mods to check. Default to public and mod.",
)
args = ap.parse_args() args = ap.parse_args()
mods = list(dict.fromkeys([*args.mods, *get_mod_dependencies(args.root, *args.mods), 'mod']).keys()) mods = list(
dict.fromkeys([*args.mods, *get_mod_dependencies(args.root, *args.mods), "mod"]).keys()
)
relax_ng_validator = RelaxNGValidator(args.root, mods=mods, verbose=args.verbose) relax_ng_validator = RelaxNGValidator(args.root, mods=mods, verbose=args.verbose)
relax_ng_validator.main() if not relax_ng_validator.main():
sys.exit(1)

View File

@ -6,6 +6,7 @@ import re
import xml.etree.ElementTree import xml.etree.ElementTree
from logging import getLogger, StreamHandler, INFO, WARNING, Formatter, Filter from logging import getLogger, StreamHandler, INFO, WARNING, Formatter, Filter
class SingleLevelFilter(Filter): class SingleLevelFilter(Filter):
def __init__(self, passlevel, reject): def __init__(self, passlevel, reject):
self.passlevel = passlevel self.passlevel = passlevel
@ -13,9 +14,10 @@ class SingleLevelFilter(Filter):
def filter(self, record): def filter(self, record):
if self.reject: if self.reject:
return (record.levelno != self.passlevel) return record.levelno != self.passlevel
else: else:
return (record.levelno == self.passlevel) return record.levelno == self.passlevel
class Actor: class Actor:
def __init__(self, mod_name, vfs_path): def __init__(self, mod_name, vfs_path):
@ -23,7 +25,7 @@ class Actor:
self.vfs_path = vfs_path self.vfs_path = vfs_path
self.name = os.path.basename(vfs_path) self.name = os.path.basename(vfs_path)
self.textures = [] self.textures = []
self.material = '' self.material = ""
self.logger = getLogger(__name__) self.logger = getLogger(__name__)
def read(self, physical_path): def read(self, physical_path):
@ -34,17 +36,17 @@ class Actor:
return False return False
root = tree.getroot() root = tree.getroot()
# Special case: particles don't need a diffuse texture. # Special case: particles don't need a diffuse texture.
if len(root.findall('.//particles')) > 0: if len(root.findall(".//particles")) > 0:
self.textures.append("baseTex") self.textures.append("baseTex")
for element in root.findall('.//material'): for element in root.findall(".//material"):
self.material = element.text self.material = element.text
for element in root.findall('.//texture'): for element in root.findall(".//texture"):
self.textures.append(element.get('name')) self.textures.append(element.get("name"))
for element in root.findall('.//variant'): for element in root.findall(".//variant"):
file = element.get('file') file = element.get("file")
if file: if file:
self.read_variant(physical_path, os.path.join('art', 'variants', file)) self.read_variant(physical_path, os.path.join("art", "variants", file))
return True return True
def read_variant(self, actor_physical_path, relative_path): def read_variant(self, actor_physical_path, relative_path):
@ -56,12 +58,12 @@ class Actor:
return False return False
root = tree.getroot() root = tree.getroot()
file = root.get('file') file = root.get("file")
if file: if file:
self.read_variant(actor_physical_path, os.path.join('art', 'variants', file)) self.read_variant(actor_physical_path, os.path.join("art", "variants", file))
for element in root.findall('.//texture'): for element in root.findall(".//texture"):
self.textures.append(element.get('name')) self.textures.append(element.get("name"))
class Material: class Material:
@ -77,8 +79,8 @@ class Material:
except xml.etree.ElementTree.ParseError as err: except xml.etree.ElementTree.ParseError as err:
self.logger.error('"%s": %s' % (physical_path, err.msg)) self.logger.error('"%s": %s' % (physical_path, err.msg))
return False return False
for element in root.findall('.//required_texture'): for element in root.findall(".//required_texture"):
texture_name = element.get('name') texture_name = element.get("name")
self.required_textures.append(texture_name) self.required_textures.append(texture_name)
return True return True
@ -86,7 +88,7 @@ class Material:
class Validator: class Validator:
def __init__(self, vfs_root, mods=None): def __init__(self, vfs_root, mods=None):
if mods is None: if mods is None:
mods = ['mod', 'public'] mods = ["mod", "public"]
self.vfs_root = vfs_root self.vfs_root = vfs_root
self.mods = mods self.mods = mods
@ -102,15 +104,16 @@ class Validator:
# create a console handler, seems nicer to Windows and for future uses # create a console handler, seems nicer to Windows and for future uses
ch = StreamHandler(sys.stdout) ch = StreamHandler(sys.stdout)
ch.setLevel(INFO) ch.setLevel(INFO)
ch.setFormatter(Formatter('%(levelname)s - %(message)s')) ch.setFormatter(Formatter("%(levelname)s - %(message)s"))
f1 = SingleLevelFilter(INFO, False) f1 = SingleLevelFilter(INFO, False)
ch.addFilter(f1) ch.addFilter(f1)
logger.addHandler(ch) logger.addHandler(ch)
errorch = StreamHandler(sys.stderr) errorch = StreamHandler(sys.stderr)
errorch.setLevel(WARNING) errorch.setLevel(WARNING)
errorch.setFormatter(Formatter('%(levelname)s - %(message)s')) errorch.setFormatter(Formatter("%(levelname)s - %(message)s"))
logger.addHandler(errorch) logger.addHandler(errorch)
self.logger = logger self.logger = logger
self.inError = False
def get_mod_path(self, mod_name, vfs_path): def get_mod_path(self, mod_name, vfs_path):
return os.path.join(mod_name, vfs_path) return os.path.join(mod_name, vfs_path)
@ -124,17 +127,14 @@ class Validator:
if not os.path.isdir(physical_path): if not os.path.isdir(physical_path):
return result return result
for file_name in os.listdir(physical_path): for file_name in os.listdir(physical_path):
if file_name == '.git' or file_name == '.svn': if file_name == ".git" or file_name == ".svn":
continue continue
vfs_file_path = os.path.join(vfs_path, file_name) vfs_file_path = os.path.join(vfs_path, file_name)
physical_file_path = os.path.join(physical_path, file_name) physical_file_path = os.path.join(physical_path, file_name)
if os.path.isdir(physical_file_path): if os.path.isdir(physical_file_path):
result += self.find_mod_files(mod_name, vfs_file_path, pattern) result += self.find_mod_files(mod_name, vfs_file_path, pattern)
elif os.path.isfile(physical_file_path) and pattern.match(file_name): elif os.path.isfile(physical_file_path) and pattern.match(file_name):
result.append({ result.append({"mod_name": mod_name, "vfs_path": vfs_file_path})
'mod_name': mod_name,
'vfs_path': vfs_file_path
})
return result return result
def find_all_mods_files(self, vfs_path, pattern): def find_all_mods_files(self, vfs_path, pattern):
@ -144,66 +144,100 @@ class Validator:
return result return result
def find_materials(self, vfs_path): def find_materials(self, vfs_path):
self.logger.info('Collecting materials...') self.logger.info("Collecting materials...")
material_files = self.find_all_mods_files(vfs_path, re.compile(r'.*\.xml')) material_files = self.find_all_mods_files(vfs_path, re.compile(r".*\.xml"))
for material_file in material_files: for material_file in material_files:
material_name = os.path.basename(material_file['vfs_path']) material_name = os.path.basename(material_file["vfs_path"])
if material_name in self.materials: if material_name in self.materials:
continue continue
material = Material(material_file['mod_name'], material_file['vfs_path']) material = Material(material_file["mod_name"], material_file["vfs_path"])
if material.read(self.get_physical_path(material_file['mod_name'], material_file['vfs_path'])): if material.read(
self.get_physical_path(material_file["mod_name"], material_file["vfs_path"])
):
self.materials[material_name] = material self.materials[material_name] = material
else: else:
self.invalid_materials[material_name] = material self.invalid_materials[material_name] = material
def find_actors(self, vfs_path): def find_actors(self, vfs_path):
self.logger.info('Collecting actors...') self.logger.info("Collecting actors...")
actor_files = self.find_all_mods_files(vfs_path, re.compile(r'.*\.xml')) actor_files = self.find_all_mods_files(vfs_path, re.compile(r".*\.xml"))
for actor_file in actor_files: for actor_file in actor_files:
actor = Actor(actor_file['mod_name'], actor_file['vfs_path']) actor = Actor(actor_file["mod_name"], actor_file["vfs_path"])
if actor.read(self.get_physical_path(actor_file['mod_name'], actor_file['vfs_path'])): if actor.read(self.get_physical_path(actor_file["mod_name"], actor_file["vfs_path"])):
self.actors.append(actor) self.actors.append(actor)
def run(self): def run(self):
self.find_materials(os.path.join('art', 'materials')) self.find_materials(os.path.join("art", "materials"))
self.find_actors(os.path.join('art', 'actors')) self.find_actors(os.path.join("art", "actors"))
self.logger.info('Validating textures...') self.logger.info("Validating textures...")
for actor in self.actors: for actor in self.actors:
if not actor.material: if not actor.material:
continue continue
if actor.material not in self.materials and actor.material not in self.invalid_materials: if (
self.logger.error('"%s": unknown material "%s"' % ( actor.material not in self.materials
self.get_mod_path(actor.mod_name, actor.vfs_path), and actor.material not in self.invalid_materials
actor.material ):
)) self.logger.error(
'"%s": unknown material "%s"'
% (self.get_mod_path(actor.mod_name, actor.vfs_path), actor.material)
)
self.inError = True
if actor.material not in self.materials: if actor.material not in self.materials:
continue continue
material = self.materials[actor.material] material = self.materials[actor.material]
missing_textures = ', '.join(set([required_texture for required_texture in material.required_textures if required_texture not in actor.textures])) missing_textures = ", ".join(
set(
[
required_texture
for required_texture in material.required_textures
if required_texture not in actor.textures
]
)
)
if len(missing_textures) > 0: if len(missing_textures) > 0:
self.logger.error('"%s": actor does not contain required texture(s) "%s" from "%s"' % ( self.logger.error(
self.get_mod_path(actor.mod_name, actor.vfs_path), '"%s": actor does not contain required texture(s) "%s" from "%s"'
missing_textures, % (
material.name self.get_mod_path(actor.mod_name, actor.vfs_path),
)) missing_textures,
material.name,
)
)
self.inError = True
extra_textures = ', '.join(set([extra_texture for extra_texture in actor.textures if extra_texture not in material.required_textures])) extra_textures = ", ".join(
set(
[
extra_texture
for extra_texture in actor.textures
if extra_texture not in material.required_textures
]
)
)
if len(extra_textures) > 0: if len(extra_textures) > 0:
self.logger.warning('"%s": actor contains unnecessary texture(s) "%s" from "%s"' % ( self.logger.warning(
self.get_mod_path(actor.mod_name, actor.vfs_path), '"%s": actor contains unnecessary texture(s) "%s" from "%s"'
extra_textures, % (
material.name self.get_mod_path(actor.mod_name, actor.vfs_path),
)) extra_textures,
material.name,
)
)
self.inError = True
if __name__ == '__main__': return self.inError
if __name__ == "__main__":
script_dir = os.path.dirname(os.path.realpath(__file__)) script_dir = os.path.dirname(os.path.realpath(__file__))
default_root = os.path.join(script_dir, '..', '..', '..', 'binaries', 'data', 'mods') default_root = os.path.join(script_dir, "..", "..", "..", "binaries", "data", "mods")
parser = argparse.ArgumentParser(description='Actors/materials validator.') parser = argparse.ArgumentParser(description="Actors/materials validator.")
parser.add_argument('-r', '--root', action='store', dest='root', default=default_root) parser.add_argument("-r", "--root", action="store", dest="root", default=default_root)
parser.add_argument('-m', '--mods', action='store', dest='mods', default='mod,public') parser.add_argument("-m", "--mods", action="store", dest="mods", default="mod,public")
args = parser.parse_args() args = parser.parse_args()
validator = Validator(args.root, args.mods.split(',')) validator = Validator(args.root, args.mods.split(","))
validator.run() if not validator.run():
sys.exit(1)