Message extraction and localization
It includes the translation template files (POT) as well as translation files (PO) developer through the Transifex platform by our awesome translators. It also includes tools to generate the translation template files, generate a special translation file with the longest strigns of all translations, and a tool to download translations from Transifex into the right game folders automatically. Fixes #67 This was SVN commit r14955.
This commit is contained in:
parent
e05c8263c5
commit
64d204228a
8
binaries/data/l10n/.tx/config
Normal file
8
binaries/data/l10n/.tx/config
Normal file
@ -0,0 +1,8 @@
|
||||
[main]
|
||||
host = https://www.transifex.com
|
||||
|
||||
[0ad.engine]
|
||||
file_filter = <lang>.engine.po
|
||||
source_file = engine.pot
|
||||
source_lang = en
|
||||
|
30
binaries/data/l10n/messages.json
Normal file
30
binaries/data/l10n/messages.json
Normal file
@ -0,0 +1,30 @@
|
||||
[
|
||||
{
|
||||
"output": "engine.pot",
|
||||
"inputRoot": "../../../source",
|
||||
"project": "Pyrogenesis",
|
||||
"copyrightHolder": "Wildfire Games",
|
||||
"rules": [
|
||||
{
|
||||
"extractor": "cpp",
|
||||
"filemasks": {
|
||||
"includeMasks": ["**.cpp"],
|
||||
"excludeMasks": ["third_party/**", "tools/**"]
|
||||
},
|
||||
"options": {
|
||||
"keywords": {
|
||||
"Translate": [1],
|
||||
"TranslatePlural": [1, 2],
|
||||
"TranslateWithContext": [[1], 2],
|
||||
"TranslatePluralWithContext": [[1], 2, 3],
|
||||
"MarkForTranslation": [1],
|
||||
"MarkForTranslationWithContext": [[1], 2]
|
||||
},
|
||||
"commentTags": [
|
||||
"Translation:"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
8
binaries/data/mods/public/l10n/.tx/config
Normal file
8
binaries/data/mods/public/l10n/.tx/config
Normal file
@ -0,0 +1,8 @@
|
||||
[main]
|
||||
host = https://www.transifex.com
|
||||
|
||||
[0ad.public]
|
||||
file_filter = <lang>.public.po
|
||||
source_file = public.pot
|
||||
source_lang = en
|
||||
|
172
binaries/data/mods/public/l10n/messages.json
Normal file
172
binaries/data/mods/public/l10n/messages.json
Normal file
@ -0,0 +1,172 @@
|
||||
[
|
||||
{
|
||||
"output": "public.pot",
|
||||
"inputRoot": "..",
|
||||
"project": "0 A.D. — Empires Ascendant",
|
||||
"copyrightHolder": "Wildfire Games",
|
||||
"rules": [
|
||||
{
|
||||
"extractor": "javascript",
|
||||
"filemasks": ["**.js"],
|
||||
"options": {
|
||||
"keywords": {
|
||||
"translate": [1],
|
||||
"translatePlural": [1, 2],
|
||||
"translateWithContext": [[1], 2],
|
||||
"translatePluralWithContext": [[1], 2, 3],
|
||||
"markForTranslation": [1],
|
||||
"markForTranslationWithContext": [[1], 2]
|
||||
},
|
||||
"commentTags": [
|
||||
"Translation:"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "xml",
|
||||
"filemasks": ["gui/**.xml"],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": {
|
||||
"translatableAttribute": {
|
||||
"locationAttributes": ["id"]
|
||||
},
|
||||
"translate": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "txt",
|
||||
"filemasks": [
|
||||
"gui/manual/intro.txt",
|
||||
"gui/manual/userreport.txt",
|
||||
"gui/text/quotes.txt",
|
||||
"gui/splashscreen/splashscreen.txt",
|
||||
"gui/text/tips/**.txt"
|
||||
],
|
||||
"options": {
|
||||
"format": "none"
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "json",
|
||||
"filemasks": [
|
||||
"simulation/data/game_speeds.json",
|
||||
"simulation/data/player_defaults.json"
|
||||
],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": [
|
||||
"Name"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "json",
|
||||
"filemasks": [
|
||||
"simulation/data/map_sizes.json"
|
||||
],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": [
|
||||
"Name",
|
||||
"LongName"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "json",
|
||||
"filemasks": [
|
||||
"civs/**.json"
|
||||
],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": [
|
||||
"Name",
|
||||
"Description",
|
||||
"History",
|
||||
"Special",
|
||||
"AINames"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "json",
|
||||
"filemasks": [
|
||||
"maps/random/**.json"
|
||||
],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": [
|
||||
"Name",
|
||||
"Description"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "json",
|
||||
"filemasks": [
|
||||
"simulation/ai/**.json"
|
||||
],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": [
|
||||
"name",
|
||||
"description"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "json",
|
||||
"filemasks": [
|
||||
"simulation/data/technologies/**.json"
|
||||
],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": [
|
||||
"specificName",
|
||||
"genericName",
|
||||
"description",
|
||||
"tooltip",
|
||||
"requirementsTooltip"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "xml",
|
||||
"filemasks": ["simulation/templates/**.xml"],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": {
|
||||
"GenericName": {},
|
||||
"SpecificName": {},
|
||||
"Tooltip": {},
|
||||
"DisabledTooltip": {},
|
||||
"FormationName": {},
|
||||
"FromClass": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"extractor": "xml",
|
||||
"filemasks": [
|
||||
"maps/scenarios/**.xml",
|
||||
"maps/skirmishes/**.xml"
|
||||
],
|
||||
"options": {
|
||||
"format": "none",
|
||||
"keywords": {
|
||||
"ScriptSettings": {
|
||||
"extractJson": {
|
||||
"keywords": [
|
||||
"Name",
|
||||
"Description"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
@ -46,6 +46,10 @@ in particular, let us know and we can try to clarify it.
|
||||
unspecified (FontLoader.py)
|
||||
IBM CPL (Packer.py)
|
||||
|
||||
i18n
|
||||
GPLv2
|
||||
BSD (potter, fork of babel.messages; see http://babel.edgewall.org/wiki/License)
|
||||
|
||||
jsdebugger
|
||||
GPL version 2 (or later)
|
||||
Other - see js/lib/ace/LICENSE.TXT (js/lib/ace)
|
||||
|
156
source/tools/i18n/generateLongStringTranslations.py
Normal file
156
source/tools/i18n/generateLongStringTranslations.py
Normal file
@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding:utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 Wildfire Games.
|
||||
# This file is part of 0 A.D.
|
||||
#
|
||||
# 0 A.D. is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# 0 A.D. is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import codecs, json, os, sys, textwrap
|
||||
|
||||
from potter.catalog import Catalog, Message
|
||||
from potter.extract import getExtractorInstance
|
||||
from potter.pofile import read_po, write_po
|
||||
|
||||
|
||||
l10nToolsDirectory = os.path.dirname(os.path.realpath(__file__))
|
||||
projectRootDirectory = os.path.abspath(os.path.join(l10nToolsDirectory, os.pardir, os.pardir, os.pardir))
|
||||
l10nFolderName = "l10n"
|
||||
|
||||
|
||||
#def getAverageExpansionForEnglishString(string):
|
||||
#"""
|
||||
#Based on http://www.w3.org/International/articles/article-text-size.en
|
||||
#"""
|
||||
#length = len(string)
|
||||
#if len <= 10:
|
||||
#return length*3 # 200–300%
|
||||
#if len <= 20:
|
||||
#return length*2 # 180–200%
|
||||
#if len <= 30:
|
||||
#return length*1.8 # 160–180%
|
||||
#if len <= 50:
|
||||
#return length*1.6 # 140–160%
|
||||
#if len <= 70:
|
||||
#return length*1.7 # 151-170%
|
||||
|
||||
#return length*1.3 # 130%
|
||||
|
||||
|
||||
#def enlarge(string, surroundWithSpaces):
|
||||
#halfExpansion = int(getAverageExpansionForEnglishString(string)/2)
|
||||
#if surroundWithSpaces: halfExpansion -= 1
|
||||
|
||||
#outputString = "x"*halfExpansion
|
||||
#if surroundWithSpaces:
|
||||
#outputString += " "
|
||||
|
||||
#outputString += string
|
||||
|
||||
#if surroundWithSpaces:
|
||||
#outputString += " "
|
||||
#outputString += "x"*halfExpansion
|
||||
|
||||
#return outputString
|
||||
|
||||
|
||||
def generateLongStringTranslationFromPotIntoPo(inputFilePath, outputFilePath):
|
||||
|
||||
with codecs.open(inputFilePath, 'r', 'utf-8') as fileObject:
|
||||
templateCatalog = read_po(fileObject)
|
||||
|
||||
longStringCatalog = Catalog()
|
||||
|
||||
# Fill catalog with English strings.
|
||||
for message in templateCatalog:
|
||||
if message.pluralizable:
|
||||
singularString, pluralString = message.id
|
||||
message.string = (singularString, pluralString)
|
||||
else:
|
||||
message.string = message.id
|
||||
longStringCatalog[message.id] = message
|
||||
|
||||
# If language codes were specified on the command line, filder by those.
|
||||
filters = sys.argv[1:]
|
||||
|
||||
# Load existing translation catalogs.
|
||||
existingTranslationCatalogs = []
|
||||
l10nFolderPath = os.path.dirname(inputFilePath)
|
||||
|
||||
# .pot is one letter longer than .po, but the dot that separates the locale
|
||||
# code from the rest of the filename in .po files makes up for that.
|
||||
charactersToSkip = len(os.path.basename(inputFilePath))
|
||||
|
||||
for filename in os.listdir(l10nFolderPath):
|
||||
if len(filename) > 3 and filename[-3:] == ".po" and filename[:4] != "long":
|
||||
if not filters or filename[:-charactersToSkip] in filters:
|
||||
with codecs.open(os.path.join(l10nFolderPath, filename), 'r', 'utf-8') as fileObject:
|
||||
existingTranslationCatalogs.append(read_po(fileObject))
|
||||
|
||||
# If any existing translation has more characters than the average expansion, use that instead.
|
||||
for translationCatalog in existingTranslationCatalogs:
|
||||
for longStringCatalogMessage in longStringCatalog:
|
||||
translationMessage = translationCatalog.get(longStringCatalogMessage.id, longStringCatalogMessage.context)
|
||||
if translationMessage:
|
||||
if longStringCatalogMessage.pluralizable:
|
||||
currentSingularString, currentPluralString = longStringCatalogMessage.string
|
||||
longestSingularString = currentSingularString
|
||||
longestPluralString = currentPluralString
|
||||
|
||||
candidateSingularString = translationMessage.string[0]
|
||||
candidatePluralString = "" # There might be between 0 and infinite plural forms.
|
||||
for candidateString in translationMessage.string[1:]:
|
||||
if len(candidateString) > len(candidatePluralString): candidatePluralString = candidateString
|
||||
|
||||
changed = False
|
||||
if len(candidateSingularString) > len(currentSingularString):
|
||||
longestSingularString = candidateSingularString
|
||||
changed = True
|
||||
if len(candidatePluralString) > len(currentPluralString):
|
||||
longestPluralString = candidatePluralString
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
longStringCatalogMessage.string = (longestSingularString, longestPluralString)
|
||||
longStringCatalog[longStringCatalogMessage.id] = longStringCatalogMessage
|
||||
|
||||
else:
|
||||
if len(translationMessage.string) > len(longStringCatalogMessage.string):
|
||||
longStringCatalogMessage.string = translationMessage.string
|
||||
longStringCatalog[longStringCatalogMessage.id] = longStringCatalogMessage
|
||||
|
||||
|
||||
with codecs.open(outputFilePath, 'w', 'utf-8') as fileObject:
|
||||
write_po(fileObject, longStringCatalog)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
foundPots = 0
|
||||
for root, folders, filenames in os.walk(projectRootDirectory):
|
||||
root = root.decode("utf-8")
|
||||
for filename in filenames:
|
||||
if len(filename) > 4 and filename[-4:] == ".pot" and os.path.basename(root) == "l10n":
|
||||
foundPots += 1
|
||||
generateLongStringTranslationFromPotIntoPo(os.path.join(root, filename), os.path.join(root, "long." + filename[:-1]))
|
||||
if foundPots == 0:
|
||||
print(u"This script did not work because no ‘.pot’ files were found.")
|
||||
print(u"Please, run ‘updateTemplates.py’ to generate the ‘.pot’ files, and run ‘pullTranslations.py’ to pull the latest translations from Transifex.")
|
||||
print(u"Then you can run this script to generate ‘.po’ files with the longest strings.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
0
source/tools/i18n/potter/__init__.py
Normal file
0
source/tools/i18n/potter/__init__.py
Normal file
575
source/tools/i18n/potter/catalog.py
Normal file
575
source/tools/i18n/potter/catalog.py
Normal file
@ -0,0 +1,575 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2007-2011 Edgewall Software
|
||||
# Copyright (C) 2013 Wildfire Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
#
|
||||
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
# The name of the author may not be used to endorse or promote products derived from this software without specific
|
||||
# prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# This software consists of voluntary contributions made by many
|
||||
# individuals. For the exact contribution history, see the revision
|
||||
# history and logs:
|
||||
# • http://babel.edgewall.org/log/trunk/babel/messages
|
||||
# • http://trac.wildfiregames.com/browser/ps/trunk/source/tools/i18n/potter
|
||||
|
||||
"""Data structures for message catalogs."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from cgi import parse_header
|
||||
from datetime import datetime, time as time_
|
||||
from difflib import get_close_matches
|
||||
from email import message_from_string
|
||||
from copy import copy
|
||||
import re
|
||||
import time
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from potter.util import distinct, LOCALTZ, UTC, FixedOffsetTimezone
|
||||
|
||||
|
||||
__all__ = ['Message', 'Catalog']
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
PYTHON_FORMAT = re.compile(r"""(?x)
|
||||
\%
|
||||
(?:\(([\w]*)\))?
|
||||
(
|
||||
[-#0\ +]?(?:\*|[\d]+)?
|
||||
(?:\.(?:\*|[\d]+))?
|
||||
[hlL]?
|
||||
)
|
||||
([diouxXeEfFgGcrs%])
|
||||
""")
|
||||
|
||||
C_FORMAT = re.compile(r"""(?x)
|
||||
\%
|
||||
(\d+\$)?
|
||||
([-+ 0#]+)?
|
||||
(v|\*(\d+\$)?v)?
|
||||
0*
|
||||
(\d+|\*(\d+\$)?)?
|
||||
(\.(\d*|\*(\d+\$)?))?
|
||||
[hlqLV]?
|
||||
([%bcdefginopsuxDFOUX])
|
||||
""")
|
||||
|
||||
|
||||
class Message(object):
|
||||
"""Representation of a single message in a catalog."""
|
||||
|
||||
def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
|
||||
user_comments=(), previous_id=(), lineno=None, context=None, formatFlag=None):
|
||||
"""Create the message object.
|
||||
|
||||
:param id: the message ID, or a ``(singular, plural)`` tuple for
|
||||
pluralizable messages
|
||||
:param string: the translated message string, or a
|
||||
``(singular, plural)`` tuple for pluralizable messages
|
||||
:param locations: a sequence of ``(filenname, lineno)`` tuples
|
||||
:param flags: a set or sequence of flags
|
||||
:param auto_comments: a sequence of automatic comments for the message
|
||||
:param user_comments: a sequence of user comments for the message
|
||||
:param previous_id: the previous message ID, or a ``(singular, plural)``
|
||||
tuple for pluralizable messages
|
||||
:param lineno: the line number on which the msgid line was found in the
|
||||
PO file, if any
|
||||
:param context: the message context
|
||||
"""
|
||||
self.id = id #: The message ID
|
||||
if not string and self.pluralizable:
|
||||
string = (u'', u'')
|
||||
self.string = string #: The message translation
|
||||
self.locations = list(distinct(locations))
|
||||
self.flags = set(flags)
|
||||
if id and formatFlag is None:
|
||||
formatFlag = self.guessFormatFlag();
|
||||
if formatFlag:
|
||||
self.flags.add(formatFlag)
|
||||
self.auto_comments = list(distinct(auto_comments))
|
||||
self.user_comments = list(distinct(user_comments))
|
||||
if isinstance(previous_id, str):
|
||||
self.previous_id = [previous_id]
|
||||
else:
|
||||
self.previous_id = list(previous_id)
|
||||
self.lineno = lineno
|
||||
self.context = context
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
|
||||
list(self.flags))
|
||||
|
||||
def __cmp__(self, obj):
|
||||
"""Compare Messages, taking into account plural ids"""
|
||||
def values_to_compare():
|
||||
if isinstance(obj, Message):
|
||||
plural = self.pluralizable
|
||||
obj_plural = obj.pluralizable
|
||||
if plural and obj_plural:
|
||||
return self.id[0], obj.id[0]
|
||||
elif plural:
|
||||
return self.id[0], obj.id
|
||||
elif obj_plural:
|
||||
return self.id, obj.id[0]
|
||||
return self.id, obj.id
|
||||
this, other = values_to_compare()
|
||||
return cmp(this, other)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.__cmp__(other) > 0
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.__cmp__(other) < 0
|
||||
|
||||
def __ge__(self, other):
|
||||
return self.__cmp__(other) >= 0
|
||||
|
||||
def __le__(self, other):
|
||||
return self.__cmp__(other) <= 0
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__cmp__(other) == 0
|
||||
|
||||
def __ne__(self, other):
|
||||
return self.__cmp__(other) != 0
|
||||
|
||||
def clone(self):
|
||||
return Message(*map(copy, (self.id, self.string, self.locations,
|
||||
self.flags, self.auto_comments,
|
||||
self.user_comments, self.previous_id,
|
||||
self.lineno, self.context)))
|
||||
|
||||
@property
|
||||
def pluralizable(self):
|
||||
"""Whether the message is plurizable.
|
||||
|
||||
>>> Message('foo').pluralizable
|
||||
False
|
||||
>>> Message(('foo', 'bar')).pluralizable
|
||||
True
|
||||
|
||||
:type: `bool`"""
|
||||
return isinstance(self.id, (list, tuple))
|
||||
|
||||
def guessFormatFlag(self):
|
||||
""" If the message contains parameters, this function returns a string with the flag that represents the format
|
||||
of those parameters.
|
||||
|
||||
:type: `string`"""
|
||||
ids = self.id
|
||||
if not isinstance(ids, (list, tuple)):
|
||||
ids = [ids]
|
||||
for id in ids:
|
||||
if C_FORMAT.search(id) is not None:
|
||||
return "c-format"
|
||||
for id in ids:
|
||||
if PYTHON_FORMAT.search(id) is not None:
|
||||
return "python-format"
|
||||
return None
|
||||
|
||||
|
||||
DEFAULT_HEADER = u"""\
|
||||
# Translation template for PROJECT.
|
||||
# Copyright © YEAR ORGANIZATION
|
||||
# This file is distributed under the same license as the PROJECT project.
|
||||
#"""
|
||||
|
||||
|
||||
class Catalog(object):
|
||||
"""Representation of a message catalog."""
|
||||
|
||||
def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
|
||||
project=None, version=None, copyright_holder=None,
|
||||
msgid_bugs_address=None, creation_date=None,
|
||||
revision_date=None, charset='utf-8'):
|
||||
"""Initialize the catalog object.
|
||||
|
||||
:param domain: the message domain
|
||||
:param header_comment: the header comment as string, or `None` for the
|
||||
default header
|
||||
:param project: the project's name
|
||||
:param version: the project's version
|
||||
:param copyright_holder: the copyright holder of the catalog
|
||||
:param msgid_bugs_address: the email address or URL to submit bug
|
||||
reports to
|
||||
:param creation_date: the date the catalog was created
|
||||
:param revision_date: the date the catalog was revised
|
||||
:param charset: the encoding to use in the output
|
||||
"""
|
||||
self.domain = domain #: The message domain
|
||||
self._header_comment = header_comment
|
||||
self._messages = OrderedDict()
|
||||
|
||||
self.project = project or 'PROJECT' #: The project name
|
||||
self.version = version #: The project version
|
||||
self.copyright_holder = copyright_holder or 'ORGANIZATION'
|
||||
self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
|
||||
|
||||
self.charset = charset or 'utf-8'
|
||||
|
||||
if creation_date is None:
|
||||
creation_date = datetime.now(LOCALTZ)
|
||||
elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
|
||||
creation_date = creation_date.replace(tzinfo=LOCALTZ)
|
||||
self.creation_date = creation_date #: Creation date of the template
|
||||
if revision_date is None:
|
||||
revision_date = 'YEAR-MO-DA HO:MI+ZONE'
|
||||
elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
|
||||
revision_date = revision_date.replace(tzinfo=LOCALTZ)
|
||||
self.revision_date = revision_date #: Last revision date of the catalog
|
||||
|
||||
self.obsolete = OrderedDict() #: Dictionary of obsolete messages
|
||||
self._num_plurals = None
|
||||
self._plural_expr = None
|
||||
|
||||
def _get_header_comment(self):
|
||||
comment = self._header_comment
|
||||
year = datetime.now(LOCALTZ).strftime('%Y')
|
||||
if hasattr(self.revision_date, 'strftime'):
|
||||
year = self.revision_date.strftime('%Y')
|
||||
comment = comment.replace('PROJECT', self.project) \
|
||||
.replace('YEAR', year) \
|
||||
.replace('ORGANIZATION', self.copyright_holder)
|
||||
return comment
|
||||
|
||||
def _set_header_comment(self, string):
|
||||
self._header_comment = string
|
||||
|
||||
header_comment = property(_get_header_comment, _set_header_comment, doc="""\
|
||||
The header comment for the catalog.
|
||||
|
||||
>>> catalog = Catalog(project='Foobar', version='1.0',
|
||||
... copyright_holder='Foo Company')
|
||||
>>> print catalog.header_comment #doctest: +ELLIPSIS
|
||||
# Translations template for Foobar.
|
||||
# Copyright (C) ... Foo Company
|
||||
# This file is distributed under the same license as the Foobar project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, ....
|
||||
#
|
||||
|
||||
The header can also be set from a string. Any known upper-case variables
|
||||
will be replaced when the header is retrieved again:
|
||||
|
||||
>>> catalog = Catalog(project='Foobar', version='1.0',
|
||||
... copyright_holder='Foo Company')
|
||||
>>> catalog.header_comment = '''\\
|
||||
... # The POT for my really cool PROJECT project.
|
||||
... # Copyright (C) 1990-2003 ORGANIZATION
|
||||
... # This file is distributed under the same license as the PROJECT
|
||||
... # project.
|
||||
... #'''
|
||||
>>> print catalog.header_comment
|
||||
# The POT for my really cool Foobar project.
|
||||
# Copyright (C) 1990-2003 Foo Company
|
||||
# This file is distributed under the same license as the Foobar
|
||||
# project.
|
||||
#
|
||||
|
||||
:type: `unicode`
|
||||
""")
|
||||
|
||||
def _get_mime_headers(self):
|
||||
headers = []
|
||||
projectIdVersion = self.project
|
||||
if self.version:
|
||||
projectIdVersion += " " + self.version
|
||||
headers.append(('Project-Id-Version', projectIdVersion))
|
||||
headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
|
||||
headers.append(('POT-Creation-Date', self.creation_date.strftime('%Y-%m-%d %H:%M%z')))
|
||||
if isinstance(self.revision_date, (datetime, time_, int, float)):
|
||||
headers.append(('PO-Revision-Date', self.revision_date.strftime('%Y-%m-%d %H:%M%z')))
|
||||
else:
|
||||
headers.append(('PO-Revision-Date', self.revision_date))
|
||||
headers.append(('MIME-Version', '1.0'))
|
||||
headers.append(('Content-Type',
|
||||
'text/plain; charset=%s' % self.charset))
|
||||
headers.append(('Content-Transfer-Encoding', '8bit'))
|
||||
headers.append(('Generated-By', 'Potter 1.0\n'))
|
||||
return headers
|
||||
|
||||
def _set_mime_headers(self, headers):
|
||||
for name, value in headers:
|
||||
name = name.lower()
|
||||
if name == 'project-id-version':
|
||||
parts = value.split(' ')
|
||||
self.project = u' '.join(parts[:-1])
|
||||
self.version = parts[-1]
|
||||
elif name == 'report-msgid-bugs-to':
|
||||
self.msgid_bugs_address = value
|
||||
elif name == 'content-type':
|
||||
mimetype, params = parse_header(value)
|
||||
if 'charset' in params:
|
||||
self.charset = params['charset'].lower()
|
||||
elif name == 'plural-forms':
|
||||
_, params = parse_header(' ;' + value)
|
||||
try:
|
||||
self._num_plurals = int(params.get('nplurals', 2))
|
||||
except ValueError:
|
||||
self._num_plurals = 2
|
||||
self._plural_expr = params.get('plural', '(n != 1)')
|
||||
elif name == 'pot-creation-date':
|
||||
# FIXME: this should use dates.parse_datetime as soon as that
|
||||
# is ready
|
||||
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
|
||||
|
||||
tt = time.strptime(value, '%Y-%m-%d %H:%M')
|
||||
ts = time.mktime(tt)
|
||||
|
||||
# Separate the offset into a sign component, hours, and minutes
|
||||
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
|
||||
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
|
||||
|
||||
# Make them all integers
|
||||
plus_minus = int(plus_minus_s + '1')
|
||||
hours_offset = int(hours_offset_s)
|
||||
mins_offset = int(mins_offset_s)
|
||||
|
||||
# Calculate net offset
|
||||
net_mins_offset = hours_offset * 60
|
||||
net_mins_offset += mins_offset
|
||||
net_mins_offset *= plus_minus
|
||||
|
||||
# Create an offset object
|
||||
tzoffset = FixedOffsetTimezone(net_mins_offset)
|
||||
|
||||
# Store the offset in a datetime object
|
||||
dt = datetime.fromtimestamp(ts)
|
||||
self.creation_date = dt.replace(tzinfo=tzoffset)
|
||||
elif name == 'po-revision-date':
|
||||
# Keep the value if it's not the default one
|
||||
if 'YEAR' not in value:
|
||||
# FIXME: this should use dates.parse_datetime as soon as
|
||||
# that is ready
|
||||
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
|
||||
tt = time.strptime(value, '%Y-%m-%d %H:%M')
|
||||
ts = time.mktime(tt)
|
||||
|
||||
# Separate the offset into a sign component, hours, and
|
||||
# minutes
|
||||
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
|
||||
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
|
||||
|
||||
# Make them all integers
|
||||
plus_minus = int(plus_minus_s + '1')
|
||||
hours_offset = int(hours_offset_s)
|
||||
mins_offset = int(mins_offset_s)
|
||||
|
||||
# Calculate net offset
|
||||
net_mins_offset = hours_offset * 60
|
||||
net_mins_offset += mins_offset
|
||||
net_mins_offset *= plus_minus
|
||||
|
||||
# Create an offset object
|
||||
tzoffset = FixedOffsetTimezone(net_mins_offset)
|
||||
|
||||
# Store the offset in a datetime object
|
||||
dt = datetime.fromtimestamp(ts)
|
||||
self.revision_date = dt.replace(tzinfo=tzoffset)
|
||||
|
||||
mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
|
||||
The MIME headers of the catalog, used for the special ``msgid ""`` entry.
|
||||
|
||||
Here's an example of the output for such a catalog template:
|
||||
|
||||
>>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
|
||||
>>> catalog = Catalog(project='Foobar', version='1.0',
|
||||
... creation_date=created)
|
||||
>>> for name, value in catalog.mime_headers:
|
||||
... print '%s: %s' % (name, value)
|
||||
Project-Id-Version: Foobar 1.0
|
||||
Report-Msgid-Bugs-To: EMAIL@ADDRESS
|
||||
POT-Creation-Date: 1990-04-01 15:30+0000
|
||||
PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=utf-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
Generated-By: Potter ...
|
||||
|
||||
:type: `list`
|
||||
""")
|
||||
|
||||
def __contains__(self, id):
|
||||
"""Return whether the catalog has a message with the specified ID."""
|
||||
return self._key_for(id) in self._messages
|
||||
|
||||
def __len__(self):
|
||||
"""The number of messages in the catalog.
|
||||
|
||||
This does not include the special ``msgid ""`` entry."""
|
||||
return len(self._messages)
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterates through all the entries in the catalog, in the order they
|
||||
were added, yielding a `Message` object for every entry.
|
||||
|
||||
:rtype: ``iterator``"""
|
||||
buf = []
|
||||
for name, value in self.mime_headers:
|
||||
buf.append('%s: %s' % (name, value))
|
||||
yield Message(u'', '\n'.join(buf), flags=set())
|
||||
for key in self._messages:
|
||||
yield self._messages[key]
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r>' % (type(self).__name__, self.domain)
|
||||
|
||||
def __delitem__(self, id):
|
||||
"""Delete the message with the specified ID."""
|
||||
self.delete(id)
|
||||
|
||||
def __getitem__(self, id):
|
||||
"""Return the message with the specified ID.
|
||||
|
||||
:param id: the message ID
|
||||
:return: the message with the specified ID, or `None` if no such
|
||||
message is in the catalog
|
||||
:rtype: `Message`
|
||||
"""
|
||||
return self.get(id)
|
||||
|
||||
def __setitem__(self, id, message):
|
||||
"""Add or update the message with the specified ID.
|
||||
|
||||
>>> catalog = Catalog()
|
||||
>>> catalog[u'foo'] = Message(u'foo')
|
||||
>>> catalog[u'foo']
|
||||
<Message u'foo' (flags: [])>
|
||||
|
||||
If a message with that ID is already in the catalog, it is updated
|
||||
to include the locations and flags of the new message.
|
||||
|
||||
>>> catalog = Catalog()
|
||||
>>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
|
||||
>>> catalog[u'foo'].locations
|
||||
[('main.py', 1)]
|
||||
>>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
|
||||
>>> catalog[u'foo'].locations
|
||||
[('main.py', 1), ('utils.py', 5)]
|
||||
|
||||
:param id: the message ID
|
||||
:param message: the `Message` object
|
||||
"""
|
||||
assert isinstance(message, Message), 'expected a Message object'
|
||||
key = self._key_for(id, message.context)
|
||||
current = self._messages.get(key)
|
||||
if current:
|
||||
if message.pluralizable and not current.pluralizable:
|
||||
# The new message adds pluralization
|
||||
current.id = message.id
|
||||
current.string = message.string
|
||||
current.locations = list(distinct(current.locations + message.locations))
|
||||
current.auto_comments = list(distinct(current.auto_comments + message.auto_comments))
|
||||
current.user_comments = list(distinct(current.user_comments + message.user_comments))
|
||||
current.flags |= message.flags
|
||||
message = current
|
||||
elif id == '':
|
||||
# special treatment for the header message
|
||||
def _parse_header(header_string):
|
||||
# message_from_string only works for str, not for unicode
|
||||
headers = message_from_string(header_string.encode('utf8'))
|
||||
decoded_headers = {}
|
||||
for name, value in headers.items():
|
||||
name = name.decode('utf8')
|
||||
value = value.decode('utf8')
|
||||
decoded_headers[name] = value
|
||||
return decoded_headers
|
||||
self.mime_headers = _parse_header(message.string).items()
|
||||
self.header_comment = '\n'.join(['# %s' % comment for comment
|
||||
in message.user_comments])
|
||||
else:
|
||||
if isinstance(id, (list, tuple)):
|
||||
assert isinstance(message.string, (list, tuple)), \
|
||||
'Expected sequence but got %s' % type(message.string)
|
||||
self._messages[key] = message
|
||||
|
||||
def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
|
||||
user_comments=(), previous_id=(), lineno=None, context=None, formatFlag=None):
|
||||
"""Add or update the message with the specified ID.
|
||||
|
||||
>>> catalog = Catalog()
|
||||
>>> catalog.add(u'foo')
|
||||
<Message ...>
|
||||
>>> catalog[u'foo']
|
||||
<Message u'foo' (flags: [])>
|
||||
|
||||
This method simply constructs a `Message` object with the given
|
||||
arguments and invokes `__setitem__` with that object.
|
||||
|
||||
:param id: the message ID, or a ``(singular, plural)`` tuple for
|
||||
pluralizable messages
|
||||
:param string: the translated message string, or a
|
||||
``(singular, plural)`` tuple for pluralizable messages
|
||||
:param locations: a sequence of strings that determine where a message was found
|
||||
:param flags: a set or sequence of flags
|
||||
:param auto_comments: a sequence of automatic comments
|
||||
:param user_comments: a sequence of user comments
|
||||
:param previous_id: the previous message ID, or a ``(singular, plural)``
|
||||
tuple for pluralizable messages
|
||||
:param lineno: the line number on which the msgid line was found in the
|
||||
PO file, if any
|
||||
:param context: the message context
|
||||
:return: the newly added message
|
||||
:rtype: `Message`
|
||||
"""
|
||||
message = Message(id, string, locations, flags, auto_comments,
|
||||
user_comments, previous_id, lineno=lineno,
|
||||
context=context, formatFlag=formatFlag)
|
||||
self[id] = message
|
||||
return message
|
||||
|
||||
def get(self, id, context=None):
|
||||
"""Return the message with the specified ID and context.
|
||||
|
||||
:param id: the message ID
|
||||
:param context: the message context, or ``None`` for no context
|
||||
:return: the message with the specified ID, or `None` if no such
|
||||
message is in the catalog
|
||||
:rtype: `Message`
|
||||
"""
|
||||
return self._messages.get(self._key_for(id, context))
|
||||
|
||||
def delete(self, id, context=None):
|
||||
"""Delete the message with the specified ID and context.
|
||||
|
||||
:param id: the message ID
|
||||
:param context: the message context, or ``None`` for no context
|
||||
"""
|
||||
key = self._key_for(id, context)
|
||||
if key in self._messages:
|
||||
del self._messages[key]
|
||||
|
||||
@property
|
||||
def num_plurals(self):
|
||||
if self._num_plurals is not None:
|
||||
return self._num_plurals
|
||||
else:
|
||||
return 2
|
||||
|
||||
def _key_for(self, id, context=None):
|
||||
"""The key for a message is just the singular ID even for pluralizable
|
||||
messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
|
||||
messages.
|
||||
"""
|
||||
key = id
|
||||
if isinstance(key, (list, tuple)):
|
||||
key = id[0]
|
||||
if context is not None:
|
||||
key = (key, context)
|
||||
return key
|
48
source/tools/i18n/potter/extract.py
Normal file
48
source/tools/i18n/potter/extract.py
Normal file
@ -0,0 +1,48 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2007-2011 Edgewall Software
|
||||
# Copyright (C) 2013 Wildfire Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
#
|
||||
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
# The name of the author may not be used to endorse or promote products derived from this software without specific
|
||||
# prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# This software consists of voluntary contributions made by many
|
||||
# individuals. For the exact contribution history, see the revision
|
||||
# history and logs:
|
||||
# • http://babel.edgewall.org/log/trunk/babel/messages
|
||||
# • http://trac.wildfiregames.com/browser/ps/trunk/source/tools/i18n/potter
|
||||
|
||||
"""Basic infrastructure for extracting localizable messages from source files.
|
||||
|
||||
This module defines an extensible system for collecting localizable message
|
||||
strings from a variety of sources. A native extractor for Python source files
|
||||
is builtin, extractors for other sources can be added using very simple plugins.
|
||||
|
||||
The main entry points into the extraction functionality are the functions
|
||||
`extract_from_dir` and `extract_from_file`.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
__all__ = ['getExtractorInstance']
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
def getExtractorInstance(code, directoryPath, filemasks, options={}):
|
||||
extractorClass = getattr(__import__("potter.extractors", {}, {}, [code,]), code)
|
||||
return extractorClass(directoryPath, filemasks, options)
|
411
source/tools/i18n/potter/extractors.py
Normal file
411
source/tools/i18n/potter/extractors.py
Normal file
@ -0,0 +1,411 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 Wildfire Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
#
|
||||
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
# The name of the author may not be used to endorse or promote products derived from this software without specific
|
||||
# prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# This software consists of voluntary contributions made by many
|
||||
# individuals. For the exact contribution history, see the revision
|
||||
# history and logs:
|
||||
# • http://babel.edgewall.org/log/trunk/babel/messages
|
||||
# • http://trac.wildfiregames.com/browser/ps/trunk/source/tools/i18n/potter
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import codecs, os, sys
|
||||
import json as jsonParser
|
||||
|
||||
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
|
||||
from textwrap import dedent
|
||||
|
||||
from potter.util import parse_encoding, pathmatch, relpath
|
||||
|
||||
try:
|
||||
stringType = unicode
|
||||
except:
|
||||
stringType = str
|
||||
|
||||
|
||||
class Extractor(object):
|
||||
|
||||
def __init__(self, directoryPath, filemasks, options):
|
||||
|
||||
self.directoryPath = directoryPath
|
||||
self.options = options
|
||||
|
||||
if isinstance(filemasks, dict):
|
||||
self.includeMasks = filemasks["includeMasks"]
|
||||
self.excludeMasks = filemasks["excludeMasks"]
|
||||
else:
|
||||
self.includeMasks = filemasks
|
||||
self.excludeMasks = []
|
||||
|
||||
|
||||
def run(self):
|
||||
""" Extracts messages.
|
||||
|
||||
:return: An iterator over ``(message, context, location, comment)`` tuples.
|
||||
:rtype: ``iterator``
|
||||
"""
|
||||
directoryAbsolutePath = os.path.abspath(self.directoryPath)
|
||||
for root, folders, filenames in os.walk(directoryAbsolutePath):
|
||||
for subdir in folders:
|
||||
if subdir.startswith('.') or subdir.startswith('_'):
|
||||
folders.remove(subdir)
|
||||
folders.sort()
|
||||
filenames.sort()
|
||||
for filename in filenames:
|
||||
filename = relpath(os.path.join(root, filename).replace(os.sep, '/'), self.directoryPath)
|
||||
for filemask in self.excludeMasks:
|
||||
if pathmatch(filemask, filename):
|
||||
break
|
||||
else:
|
||||
for filemask in self.includeMasks:
|
||||
if pathmatch(filemask, filename):
|
||||
filepath = os.path.join(directoryAbsolutePath, filename)
|
||||
for message, context, position, comments in self.extractFromFile(filepath):
|
||||
yield message, context, filename + ":" + str(position), comments
|
||||
|
||||
|
||||
def extractFromFile(self, filepath):
|
||||
""" Extracts messages from a specific file.
|
||||
|
||||
:return: An iterator over ``(message, context, position, comments)`` tuples.
|
||||
:rtype: ``iterator``
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class javascript(Extractor):
|
||||
""" Extract messages from JavaScript source code.
|
||||
"""
|
||||
|
||||
empty_msgid_warning = ( '%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
|
||||
'returns the header entry with meta information, not the empty string.' )
|
||||
|
||||
def extractJavascriptFromFile(self, fileObject):
|
||||
|
||||
from potter.jslexer import tokenize, unquote_string
|
||||
funcname = message_lineno = None
|
||||
messages = []
|
||||
last_argument = None
|
||||
translator_comments = []
|
||||
concatenate_next = False
|
||||
last_token = None
|
||||
call_stack = -1
|
||||
comment_tags = self.options.get('commentTags', [])
|
||||
keywords = self.options.get('keywords', {}).keys()
|
||||
|
||||
for token in tokenize(fileObject.read()):
|
||||
if token.type == 'operator' and token.value == '(':
|
||||
if funcname:
|
||||
message_lineno = token.lineno
|
||||
call_stack += 1
|
||||
|
||||
elif call_stack == -1 and token.type == 'linecomment':
|
||||
value = token.value[2:].strip()
|
||||
if translator_comments and \
|
||||
translator_comments[-1][0] == token.lineno - 1:
|
||||
translator_comments.append((token.lineno, value))
|
||||
continue
|
||||
|
||||
for comment_tag in comment_tags:
|
||||
if value.startswith(comment_tag):
|
||||
translator_comments.append((token.lineno, value.strip()))
|
||||
break
|
||||
|
||||
elif token.type == 'multilinecomment':
|
||||
# only one multi-line comment may preceed a translation
|
||||
translator_comments = []
|
||||
value = token.value[2:-2].strip()
|
||||
for comment_tag in comment_tags:
|
||||
if value.startswith(comment_tag):
|
||||
lines = value.splitlines()
|
||||
if lines:
|
||||
lines[0] = lines[0].strip()
|
||||
lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
|
||||
for offset, line in enumerate(lines):
|
||||
translator_comments.append((token.lineno + offset,
|
||||
line))
|
||||
break
|
||||
|
||||
elif funcname and call_stack == 0:
|
||||
if token.type == 'operator' and token.value == ')':
|
||||
if last_argument is not None:
|
||||
messages.append(last_argument)
|
||||
if len(messages) > 1:
|
||||
messages = tuple(messages)
|
||||
elif messages:
|
||||
messages = messages[0]
|
||||
else:
|
||||
messages = None
|
||||
|
||||
# Comments don't apply unless they immediately precede the
|
||||
# message
|
||||
if translator_comments and \
|
||||
translator_comments[-1][0] < message_lineno - 1:
|
||||
translator_comments = []
|
||||
|
||||
if messages is not None:
|
||||
yield (message_lineno, funcname, messages,
|
||||
[comment[1] for comment in translator_comments])
|
||||
|
||||
funcname = message_lineno = last_argument = None
|
||||
concatenate_next = False
|
||||
translator_comments = []
|
||||
messages = []
|
||||
call_stack = -1
|
||||
|
||||
elif token.type == 'string':
|
||||
new_value = unquote_string(token.value)
|
||||
if concatenate_next:
|
||||
last_argument = (last_argument or '') + new_value
|
||||
concatenate_next = False
|
||||
else:
|
||||
last_argument = new_value
|
||||
|
||||
elif token.type == 'operator':
|
||||
if token.value == ',':
|
||||
if last_argument is not None:
|
||||
messages.append(last_argument)
|
||||
last_argument = None
|
||||
else:
|
||||
messages.append(None)
|
||||
concatenate_next = False
|
||||
elif token.value == '+':
|
||||
concatenate_next = True
|
||||
|
||||
elif call_stack > 0 and token.type == 'operator' \
|
||||
and token.value == ')':
|
||||
call_stack -= 1
|
||||
|
||||
elif funcname and call_stack == -1:
|
||||
funcname = None
|
||||
|
||||
elif call_stack == -1 and token.type == 'name' and \
|
||||
token.value in keywords and \
|
||||
(last_token is None or last_token.type != 'name' or
|
||||
last_token.value != 'function'):
|
||||
funcname = token.value
|
||||
|
||||
last_token = token
|
||||
|
||||
|
||||
def extractFromFile(self, filepath):
|
||||
|
||||
with codecs.open(filepath, 'r', encoding='utf-8-sig') as fileObject:
|
||||
for lineno, funcname, messages, comments in self.extractJavascriptFromFile(fileObject):
|
||||
if funcname:
|
||||
spec = self.options.get('keywords', {})[funcname] or (1,)
|
||||
else:
|
||||
spec = (1,)
|
||||
if not isinstance(messages, (list, tuple)):
|
||||
messages = [messages]
|
||||
if not messages:
|
||||
continue
|
||||
|
||||
# Validate the messages against the keyword's specification
|
||||
context = None
|
||||
msgs = []
|
||||
invalid = False
|
||||
# last_index is 1 based like the keyword spec
|
||||
last_index = len(messages)
|
||||
for index in spec:
|
||||
if isinstance(index, (list, tuple)):
|
||||
context = messages[index[0] - 1]
|
||||
continue
|
||||
if last_index < index:
|
||||
# Not enough arguments
|
||||
invalid = True
|
||||
break
|
||||
message = messages[index - 1]
|
||||
if message is None:
|
||||
invalid = True
|
||||
break
|
||||
msgs.append(message)
|
||||
if invalid:
|
||||
continue
|
||||
|
||||
# keyword spec indexes are 1 based, therefore '-1'
|
||||
if isinstance(spec[0], (tuple, list)):
|
||||
# context-aware *gettext method
|
||||
first_msg_index = spec[1] - 1
|
||||
else:
|
||||
first_msg_index = spec[0] - 1
|
||||
if not messages[first_msg_index]:
|
||||
# An empty string msgid isn't valid, emit a warning
|
||||
where = '%s:%i' % (hasattr(fileObject, 'name') and \
|
||||
fileObject.name or '(unknown)', lineno)
|
||||
print >> sys.stderr, self.empty_msgid_warning % where
|
||||
continue
|
||||
|
||||
messages = tuple(msgs)
|
||||
if len(messages) == 1:
|
||||
messages = messages[0]
|
||||
|
||||
yield messages, context, lineno, comments
|
||||
|
||||
|
||||
|
||||
class cpp(javascript):
|
||||
""" Extract messages from C++ source code.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class txt(Extractor):
|
||||
""" Extract messages from plain text files.
|
||||
"""
|
||||
|
||||
def extractFromFile(self, filepath):
|
||||
with codecs.open(filepath, "r", encoding='utf-8-sig') as fileObject:
|
||||
lineCount = 0
|
||||
for line in [line.strip() for line in fileObject.readlines()]:
|
||||
lineCount += 1
|
||||
if line:
|
||||
yield line, None, str(lineCount), []
|
||||
|
||||
|
||||
|
||||
class json(Extractor):
|
||||
""" Extract messages from JSON files.
|
||||
"""
|
||||
|
||||
def __init__(self, directoryPath=None, filemasks=[], options={}):
|
||||
super(json, self).__init__(directoryPath, filemasks, options)
|
||||
self.breadcrumbs = []
|
||||
self.keywords = self.options.get("keywords", {})
|
||||
|
||||
def setOptions(self, options):
|
||||
self.options = options
|
||||
self.keywords = self.options.get("keywords", {})
|
||||
|
||||
@staticmethod
|
||||
def formatBreadcrumbs(breadcrumbs):
|
||||
firstPiece = breadcrumbs[0]
|
||||
if isinstance(firstPiece, int): outputString = "[" + str(firstPiece) + "]"
|
||||
else: outputString = firstPiece
|
||||
for piece in breadcrumbs[1:]:
|
||||
if isinstance(piece, int): outputString += "[" + str(piece) + "]"
|
||||
else: outputString += "." + piece
|
||||
return outputString
|
||||
|
||||
def extractFromFile(self, filepath):
|
||||
with codecs.open(filepath, "r", 'utf-8') as fileObject:
|
||||
for message, breadcrumbs in self.extractFromString(fileObject.read()):
|
||||
yield message, None, self.formatBreadcrumbs(breadcrumbs), []
|
||||
|
||||
def extractFromString(self, string):
|
||||
self.breadcrumbs = []
|
||||
jsonDocument = jsonParser.loads(string)
|
||||
if isinstance(jsonDocument, list):
|
||||
for message, breadcrumbs in self.parseList(jsonDocument):
|
||||
if message: # Skip empty strings.
|
||||
yield message, breadcrumbs
|
||||
elif isinstance(jsonDocument, dict):
|
||||
for message, breadcrumbs in self.parseDictionary(jsonDocument):
|
||||
if message: # Skip empty strings.
|
||||
yield message, breadcrumbs
|
||||
else:
|
||||
raise Exception("Unexpected JSON document parent structure (not a list or a dictionary). You must extend the JSON extractor to support it.")
|
||||
|
||||
def parseList(self, itemsList):
|
||||
index = 0
|
||||
for listItem in itemsList:
|
||||
self.breadcrumbs.append(index)
|
||||
if isinstance(listItem, list):
|
||||
for message, breadcrumbs in self.parseList(listItem):
|
||||
yield message, breadcrumbs
|
||||
elif isinstance(listItem, dict):
|
||||
for message, breadcrumbs in self.parseDictionary(listItem):
|
||||
yield message, breadcrumbs
|
||||
del self.breadcrumbs[-1]
|
||||
index += 1
|
||||
|
||||
def parseDictionary(self, dictionary):
|
||||
for keyword in dictionary:
|
||||
self.breadcrumbs.append(keyword)
|
||||
if keyword in self.keywords:
|
||||
if isinstance(dictionary[keyword], stringType):
|
||||
yield dictionary[keyword], self.breadcrumbs
|
||||
elif isinstance(dictionary[keyword], list):
|
||||
for message, breadcrumbs in self.extractList(dictionary[keyword]):
|
||||
yield message, breadcrumbs
|
||||
elif isinstance(dictionary[keyword], dict):
|
||||
for message, breadcrumbs in self.extractDictionary(dictionary[keyword]):
|
||||
yield message, breadcrumbs
|
||||
elif isinstance(dictionary[keyword], list):
|
||||
for message, breadcrumbs in self.parseList(dictionary[keyword]):
|
||||
yield message, breadcrumbs
|
||||
elif isinstance(dictionary[keyword], dict):
|
||||
for message, breadcrumbs in self.parseDictionary(dictionary[keyword]):
|
||||
yield message, breadcrumbs
|
||||
del self.breadcrumbs[-1]
|
||||
|
||||
def extractList(self, itemsList):
|
||||
index = 0
|
||||
for listItem in itemsList:
|
||||
self.breadcrumbs.append(index)
|
||||
if isinstance(listItem, stringType):
|
||||
yield listItem, self.breadcrumbs
|
||||
del self.breadcrumbs[-1]
|
||||
index += 1
|
||||
|
||||
def extractDictionary(self, dictionary):
|
||||
for keyword in dictionary:
|
||||
self.breadcrumbs.append(keyword)
|
||||
if isinstance(dictionary[keyword], stringType):
|
||||
yield dictionary[keyword], self.breadcrumbs
|
||||
del self.breadcrumbs[-1]
|
||||
|
||||
|
||||
|
||||
class xml(Extractor):
|
||||
""" Extract messages from XML files.
|
||||
"""
|
||||
|
||||
def __init__(self, directoryPath, filemasks, options):
|
||||
super(xml, self).__init__(directoryPath, filemasks, options)
|
||||
self.keywords = self.options.get("keywords", {})
|
||||
self.jsonExtractor = None
|
||||
|
||||
def getJsonExtractor(self):
|
||||
if not self.jsonExtractor:
|
||||
self.jsonExtractor = json()
|
||||
return self.jsonExtractor
|
||||
|
||||
def extractFromFile(self, filepath):
|
||||
from lxml import etree
|
||||
with codecs.open(filepath, "r", encoding='utf-8-sig') as fileObject:
|
||||
xmlDocument = etree.parse(fileObject)
|
||||
for keyword in self.keywords:
|
||||
for element in xmlDocument.iter(keyword):
|
||||
position = str(element.sourceline)
|
||||
if "extractJson" in self.keywords[keyword]:
|
||||
jsonExtractor = self.getJsonExtractor()
|
||||
jsonExtractor.setOptions(self.keywords[keyword]["extractJson"])
|
||||
for message, breadcrumbs in jsonExtractor.extractFromString(element.text):
|
||||
yield message, None, position + ":" + json.formatBreadcrumbs(breadcrumbs), []
|
||||
elif element.text is not None:
|
||||
if "locationAttributes" in self.keywords[keyword]:
|
||||
attributes = [element.get(attribute) for attribute in self.keywords[keyword]["locationAttributes"] if attribute in element.attrib]
|
||||
position += " ({attributes})".format(attributes=", ".join(attributes))
|
||||
yield element.text, None, position, []
|
192
source/tools/i18n/potter/jslexer.py
Normal file
192
source/tools/i18n/potter/jslexer.py
Normal file
@ -0,0 +1,192 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2008-2011 Edgewall Software
|
||||
# Copyright (C) 2013 Wildfire Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
#
|
||||
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
# The name of the author may not be used to endorse or promote products derived from this software without specific
|
||||
# prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# This software consists of voluntary contributions made by many
|
||||
# individuals. For the exact contribution history, see the revision
|
||||
# history and logs:
|
||||
# • http://babel.edgewall.org/log/trunk/babel/messages
|
||||
# • http://trac.wildfiregames.com/browser/ps/trunk/source/tools/i18n/potter
|
||||
|
||||
"""A simple JavaScript 1.5 lexer which is used for the JavaScript
|
||||
extractor.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from operator import itemgetter
|
||||
import re
|
||||
|
||||
operators = [
|
||||
'+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
|
||||
'+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
|
||||
'>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
|
||||
'[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':'
|
||||
]
|
||||
operators.sort(key=lambda x: -len(x))
|
||||
|
||||
escapes = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
|
||||
|
||||
rules = [
|
||||
(None, re.compile(r'\s+(?u)')),
|
||||
(None, re.compile(r'<!--.*')),
|
||||
('linecomment', re.compile(r'//.*')),
|
||||
('multilinecomment', re.compile(r'/\*.*?\*/(?us)')),
|
||||
('name', re.compile(r'(\$+\w*|[^\W\d]\w*)(?u)')),
|
||||
('number', re.compile(r'''(?x)(
|
||||
(?:0|[1-9]\d*)
|
||||
(\.\d+)?
|
||||
([eE][-+]?\d+)? |
|
||||
(0x[a-fA-F0-9]+)
|
||||
)''')),
|
||||
('operator', re.compile(r'(%s)' % '|'.join(map(re.escape, operators)))),
|
||||
('string', re.compile(r'''(?xs)(
|
||||
'(?:[^'\\]*(?:\\.[^'\\]*)*)' |
|
||||
"(?:[^"\\]*(?:\\.[^"\\]*)*)"
|
||||
)'''))
|
||||
]
|
||||
|
||||
division_re = re.compile(r'/=?')
|
||||
regex_re = re.compile(r'/(?:[^/\\]*(?:\\.[^/\\]*)*)/[a-zA-Z]*(?s)')
|
||||
line_re = re.compile(r'(\r\n|\n|\r)')
|
||||
line_join_re = re.compile(r'\\' + line_re.pattern)
|
||||
uni_escape_re = re.compile(r'[a-fA-F0-9]{1,4}')
|
||||
|
||||
|
||||
class Token(tuple):
|
||||
"""Represents a token as returned by `tokenize`."""
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, type, value, lineno):
|
||||
return tuple.__new__(cls, (type, value, lineno))
|
||||
|
||||
type = property(itemgetter(0))
|
||||
value = property(itemgetter(1))
|
||||
lineno = property(itemgetter(2))
|
||||
|
||||
|
||||
def indicates_division(token):
|
||||
"""A helper function that helps the tokenizer to decide if the current
|
||||
token may be followed by a division operator.
|
||||
"""
|
||||
if token.type == 'operator':
|
||||
return token.value in (')', ']', '}', '++', '--')
|
||||
return token.type in ('name', 'number', 'string', 'regexp')
|
||||
|
||||
|
||||
def unquote_string(string):
|
||||
"""Unquote a string with JavaScript rules. The string has to start with
|
||||
string delimiters (``'`` or ``"``.)
|
||||
|
||||
:return: a string
|
||||
"""
|
||||
assert string and string[0] == string[-1] and string[0] in '"\'', \
|
||||
'string provided is not properly delimited'
|
||||
string = line_join_re.sub('\\1', string[1:-1])
|
||||
result = []
|
||||
add = result.append
|
||||
pos = 0
|
||||
|
||||
while 1:
|
||||
# scan for the next escape
|
||||
escape_pos = string.find('\\', pos)
|
||||
if escape_pos < 0:
|
||||
break
|
||||
add(string[pos:escape_pos])
|
||||
|
||||
# check which character is escaped
|
||||
next_char = string[escape_pos + 1]
|
||||
if next_char in escapes:
|
||||
add(escapes[next_char])
|
||||
|
||||
# unicode escapes. trie to consume up to four characters of
|
||||
# hexadecimal characters and try to interpret them as unicode
|
||||
# character point. If there is no such character point, put
|
||||
# all the consumed characters into the string.
|
||||
elif next_char in 'uU':
|
||||
escaped = uni_escape_re.match(string, escape_pos + 2)
|
||||
if escaped is not None:
|
||||
escaped_value = escaped.group()
|
||||
if len(escaped_value) == 4:
|
||||
try:
|
||||
add(unichr(int(escaped_value, 16)))
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
pos = escape_pos + 6
|
||||
continue
|
||||
add(next_char + escaped_value)
|
||||
pos = escaped.end()
|
||||
continue
|
||||
else:
|
||||
add(next_char)
|
||||
|
||||
# bogus escape. Just remove the backslash.
|
||||
else:
|
||||
add(next_char)
|
||||
pos = escape_pos + 2
|
||||
|
||||
if pos < len(string):
|
||||
add(string[pos:])
|
||||
|
||||
return u''.join(result)
|
||||
|
||||
|
||||
def tokenize(source):
|
||||
"""Tokenize a JavaScript source.
|
||||
|
||||
:return: generator of `Token`\s
|
||||
"""
|
||||
may_divide = False
|
||||
pos = 0
|
||||
lineno = 1
|
||||
end = len(source)
|
||||
|
||||
while pos < end:
|
||||
# handle regular rules first
|
||||
for token_type, rule in rules:
|
||||
match = rule.match(source, pos)
|
||||
if match is not None:
|
||||
break
|
||||
# if we don't have a match we don't give up yet, but check for
|
||||
# division operators or regular expression literals, based on
|
||||
# the status of `may_divide` which is determined by the last
|
||||
# processed non-whitespace token using `indicates_division`.
|
||||
else:
|
||||
if may_divide:
|
||||
match = division_re.match(source, pos)
|
||||
token_type = 'operator'
|
||||
else:
|
||||
match = regex_re.match(source, pos)
|
||||
token_type = 'regexp'
|
||||
if match is None:
|
||||
# woops. invalid syntax. jump one char ahead and try again.
|
||||
pos += 1
|
||||
continue
|
||||
|
||||
token_value = match.group()
|
||||
if token_type is not None:
|
||||
token = Token(token_type, token_value, lineno)
|
||||
may_divide = indicates_division(token)
|
||||
yield token
|
||||
lineno += len(line_re.findall(token_value))
|
||||
pos = match.end()
|
236
source/tools/i18n/potter/mofile.py
Normal file
236
source/tools/i18n/potter/mofile.py
Normal file
@ -0,0 +1,236 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2007-2011 Edgewall Software
|
||||
# Copyright (C) 2013 Wildfire Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
#
|
||||
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
# The name of the author may not be used to endorse or promote products derived from this software without specific
|
||||
# prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# This software consists of voluntary contributions made by many
|
||||
# individuals. For the exact contribution history, see the revision
|
||||
# history and logs:
|
||||
# • http://babel.edgewall.org/log/trunk/babel/messages
|
||||
# • http://trac.wildfiregames.com/browser/ps/trunk/source/tools/i18n/potter
|
||||
|
||||
"""Writing of files in the ``gettext`` MO (machine object) format.
|
||||
|
||||
:since: version 0.9
|
||||
:see: `The Format of MO Files
|
||||
<http://www.gnu.org/software/gettext/manual/gettext.html#MO-Files>`_
|
||||
"""
|
||||
|
||||
import array
|
||||
import struct
|
||||
|
||||
from catalog import Catalog, Message
|
||||
|
||||
__all__ = ['read_mo', 'write_mo']
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
LE_MAGIC = 0x950412deL
|
||||
BE_MAGIC = 0xde120495L
|
||||
|
||||
def read_mo(fileobj):
|
||||
"""Read a binary MO file from the given file-like object and return a
|
||||
corresponding `Catalog` object.
|
||||
|
||||
:param fileobj: the file-like object to read the MO file from
|
||||
:return: a catalog object representing the parsed MO file
|
||||
:rtype: `Catalog`
|
||||
|
||||
:note: The implementation of this function is heavily based on the
|
||||
``GNUTranslations._parse`` method of the ``gettext`` module in the
|
||||
standard library.
|
||||
"""
|
||||
catalog = Catalog()
|
||||
headers = {}
|
||||
|
||||
filename = getattr(fileobj, 'name', '')
|
||||
|
||||
buf = fileobj.read()
|
||||
buflen = len(buf)
|
||||
unpack = struct.unpack
|
||||
|
||||
# Parse the .mo file header, which consists of 5 little endian 32
|
||||
# bit words.
|
||||
magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?
|
||||
if magic == LE_MAGIC:
|
||||
version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
|
||||
ii = '<II'
|
||||
elif magic == BE_MAGIC:
|
||||
version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
|
||||
ii = '>II'
|
||||
else:
|
||||
raise IOError(0, 'Bad magic number', filename)
|
||||
|
||||
# Now put all messages from the .mo file buffer into the catalog
|
||||
# dictionary
|
||||
for i in xrange(0, msgcount):
|
||||
mlen, moff = unpack(ii, buf[origidx:origidx + 8])
|
||||
mend = moff + mlen
|
||||
tlen, toff = unpack(ii, buf[transidx:transidx + 8])
|
||||
tend = toff + tlen
|
||||
if mend < buflen and tend < buflen:
|
||||
msg = buf[moff:mend]
|
||||
tmsg = buf[toff:tend]
|
||||
else:
|
||||
raise IOError(0, 'File is corrupt', filename)
|
||||
|
||||
# See if we're looking at GNU .mo conventions for metadata
|
||||
if mlen == 0:
|
||||
# Catalog description
|
||||
lastkey = key = None
|
||||
for item in tmsg.splitlines():
|
||||
item = item.strip()
|
||||
if not item:
|
||||
continue
|
||||
if ':' in item:
|
||||
key, value = item.split(':', 1)
|
||||
lastkey = key = key.strip().lower()
|
||||
headers[key] = value.strip()
|
||||
elif lastkey:
|
||||
headers[lastkey] += '\n' + item
|
||||
|
||||
if '\x04' in msg: # context
|
||||
ctxt, msg = msg.split('\x04')
|
||||
else:
|
||||
ctxt = None
|
||||
|
||||
if '\x00' in msg: # plural forms
|
||||
msg = msg.split('\x00')
|
||||
tmsg = tmsg.split('\x00')
|
||||
if catalog.charset:
|
||||
msg = [x.decode(catalog.charset) for x in msg]
|
||||
tmsg = [x.decode(catalog.charset) for x in tmsg]
|
||||
else:
|
||||
if catalog.charset:
|
||||
msg = msg.decode(catalog.charset)
|
||||
tmsg = tmsg.decode(catalog.charset)
|
||||
catalog[msg] = Message(msg, tmsg, context=ctxt)
|
||||
|
||||
# advance to next entry in the seek tables
|
||||
origidx += 8
|
||||
transidx += 8
|
||||
|
||||
catalog.mime_headers = headers.items()
|
||||
return catalog
|
||||
|
||||
def write_mo(fileobj, catalog, use_fuzzy=False):
|
||||
"""Write a catalog to the specified file-like object using the GNU MO file
|
||||
format.
|
||||
|
||||
>>> from babel.messages import Catalog
|
||||
>>> from gettext import GNUTranslations
|
||||
>>> from StringIO import StringIO
|
||||
|
||||
>>> catalog = Catalog(locale='en_US')
|
||||
>>> catalog.add('foo', 'Voh')
|
||||
<Message ...>
|
||||
>>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
|
||||
<Message ...>
|
||||
>>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
|
||||
<Message ...>
|
||||
>>> catalog.add('Fizz', '')
|
||||
<Message ...>
|
||||
>>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
|
||||
<Message ...>
|
||||
>>> buf = StringIO()
|
||||
|
||||
>>> write_mo(buf, catalog)
|
||||
>>> buf.seek(0)
|
||||
>>> translations = GNUTranslations(fp=buf)
|
||||
>>> translations.ugettext('foo')
|
||||
u'Voh'
|
||||
>>> translations.ungettext('bar', 'baz', 1)
|
||||
u'Bahr'
|
||||
>>> translations.ungettext('bar', 'baz', 2)
|
||||
u'Batz'
|
||||
>>> translations.ugettext('fuz')
|
||||
u'fuz'
|
||||
>>> translations.ugettext('Fizz')
|
||||
u'Fizz'
|
||||
>>> translations.ugettext('Fuzz')
|
||||
u'Fuzz'
|
||||
>>> translations.ugettext('Fuzzes')
|
||||
u'Fuzzes'
|
||||
|
||||
:param fileobj: the file-like object to write to
|
||||
:param catalog: the `Catalog` instance
|
||||
:param use_fuzzy: whether translations marked as "fuzzy" should be included
|
||||
in the output
|
||||
"""
|
||||
messages = list(catalog)
|
||||
if not use_fuzzy:
|
||||
messages[1:] = [m for m in messages[1:] if not m.fuzzy]
|
||||
messages.sort()
|
||||
|
||||
ids = strs = ''
|
||||
offsets = []
|
||||
|
||||
for message in messages:
|
||||
# For each string, we need size and file offset. Each string is NUL
|
||||
# terminated; the NUL does not count into the size.
|
||||
if message.pluralizable:
|
||||
msgid = '\x00'.join([
|
||||
msgid.encode(catalog.charset) for msgid in message.id
|
||||
])
|
||||
msgstrs = []
|
||||
for idx, string in enumerate(message.string):
|
||||
if not string:
|
||||
msgstrs.append(message.id[min(int(idx), 1)])
|
||||
else:
|
||||
msgstrs.append(string)
|
||||
msgstr = '\x00'.join([
|
||||
msgstr.encode(catalog.charset) for msgstr in msgstrs
|
||||
])
|
||||
else:
|
||||
msgid = message.id.encode(catalog.charset)
|
||||
if not message.string:
|
||||
msgstr = message.id.encode(catalog.charset)
|
||||
else:
|
||||
msgstr = message.string.encode(catalog.charset)
|
||||
if message.context:
|
||||
msgid = '\x04'.join([message.context.encode(catalog.charset),
|
||||
msgid])
|
||||
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
|
||||
ids += msgid + '\x00'
|
||||
strs += msgstr + '\x00'
|
||||
|
||||
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
|
||||
# the keys start right after the index tables.
|
||||
keystart = 7 * 4 + 16 * len(messages)
|
||||
valuestart = keystart + len(ids)
|
||||
|
||||
# The string table first has the list of keys, then the list of values.
|
||||
# Each entry has first the size of the string, then the file offset.
|
||||
koffsets = []
|
||||
voffsets = []
|
||||
for o1, l1, o2, l2 in offsets:
|
||||
koffsets += [l1, o1 + keystart]
|
||||
voffsets += [l2, o2 + valuestart]
|
||||
offsets = koffsets + voffsets
|
||||
|
||||
fileobj.write(struct.pack('Iiiiiii',
|
||||
LE_MAGIC, # magic
|
||||
0, # version
|
||||
len(messages), # number of entries
|
||||
7 * 4, # start of key index
|
||||
7 * 4 + len(messages) * 8, # start of value index
|
||||
0, 0 # size and offset of hash table
|
||||
) + array.array("i", offsets).tostring() + ids + strs)
|
506
source/tools/i18n/potter/pofile.py
Normal file
506
source/tools/i18n/potter/pofile.py
Normal file
@ -0,0 +1,506 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2007-2011 Edgewall Software
|
||||
# Copyright (C) 2013 Wildfire Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
#
|
||||
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
# The name of the author may not be used to endorse or promote products derived from this software without specific
|
||||
# prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# This software consists of voluntary contributions made by many
|
||||
# individuals. For the exact contribution history, see the revision
|
||||
# history and logs:
|
||||
# • http://babel.edgewall.org/log/trunk/babel/messages
|
||||
# • http://trac.wildfiregames.com/browser/ps/trunk/source/tools/i18n/potter
|
||||
|
||||
"""Reading and writing of files in the ``gettext`` PO (portable object)
|
||||
format.
|
||||
|
||||
:see: `The Format of PO Files
|
||||
<http://www.gnu.org/software/gettext/manual/gettext.html#PO-Files>`_
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from datetime import datetime
|
||||
import os
|
||||
import re
|
||||
|
||||
from potter.util import wraptext
|
||||
from potter.catalog import Catalog, Message
|
||||
|
||||
__all__ = ['read_po', 'write_po']
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
|
||||
|
||||
def unescape(string):
|
||||
r"""Reverse `escape` the given string.
|
||||
|
||||
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
|
||||
Say:
|
||||
"hello, world!"
|
||||
<BLANKLINE>
|
||||
|
||||
:param string: the string to unescape
|
||||
"""
|
||||
def replace_escapes(match):
|
||||
m = match.group(1)
|
||||
if m == 'n':
|
||||
return '\n'
|
||||
elif m == 't':
|
||||
return '\t'
|
||||
elif m == 'r':
|
||||
return '\r'
|
||||
# m is \ or "
|
||||
return m
|
||||
return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
|
||||
|
||||
|
||||
def denormalize(string):
|
||||
r"""Reverse the normalization done by the `normalize` function.
|
||||
|
||||
>>> print denormalize(r'''""
|
||||
... "Say:\n"
|
||||
... " \"hello, world!\"\n"''')
|
||||
Say:
|
||||
"hello, world!"
|
||||
<BLANKLINE>
|
||||
|
||||
>>> print denormalize(r'''""
|
||||
... "Say:\n"
|
||||
... " \"Lorem ipsum dolor sit "
|
||||
... "amet, consectetur adipisicing"
|
||||
... " elit, \"\n"''')
|
||||
Say:
|
||||
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
|
||||
<BLANKLINE>
|
||||
|
||||
:param string: the string to denormalize
|
||||
"""
|
||||
if '\n' in string:
|
||||
escaped_lines = string.splitlines()
|
||||
if string.startswith('""'):
|
||||
escaped_lines = escaped_lines[1:]
|
||||
lines = map(unescape, escaped_lines)
|
||||
return ''.join(lines)
|
||||
else:
|
||||
return unescape(string)
|
||||
|
||||
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset="utf-8"):
|
||||
"""Read messages from a ``gettext`` PO (portable object) file from the given
|
||||
file-like object and return a `Catalog`.
|
||||
|
||||
>>> from datetime import datetime
|
||||
>>> from StringIO import StringIO
|
||||
>>> buf = StringIO('''
|
||||
... #: main.py:1
|
||||
... #, fuzzy, python-format
|
||||
... msgid "foo %(name)s"
|
||||
... msgstr "quux %(name)s"
|
||||
...
|
||||
... # A user comment
|
||||
... #. An auto comment
|
||||
... #: main.py:3
|
||||
... msgid "bar"
|
||||
... msgid_plural "baz"
|
||||
... msgstr[0] "bar"
|
||||
... msgstr[1] "baaz"
|
||||
... ''')
|
||||
>>> catalog = read_po(buf)
|
||||
>>> catalog.revision_date = datetime(2007, 04, 01)
|
||||
|
||||
>>> for message in catalog:
|
||||
... if message.id:
|
||||
... print (message.id, message.string)
|
||||
... print ' ', (message.locations, message.flags)
|
||||
... print ' ', (message.user_comments, message.auto_comments)
|
||||
(u'foo %(name)s', u'quux %(name)s')
|
||||
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
|
||||
([], [])
|
||||
((u'bar', u'baz'), (u'bar', u'baaz'))
|
||||
([(u'main.py', 3)], set([]))
|
||||
([u'A user comment'], [u'An auto comment'])
|
||||
|
||||
.. versionadded:: 1.0
|
||||
Added support for explicit charset argument.
|
||||
|
||||
:param fileobj: the file-like object to read the PO file from
|
||||
:param locale: the locale identifier or `Locale` object, or `None`
|
||||
if the catalog is not bound to a locale (which basically
|
||||
means it's a template)
|
||||
:param domain: the message domain
|
||||
:param ignore_obsolete: whether to ignore obsolete messages in the input
|
||||
:param charset: the character set of the catalog.
|
||||
"""
|
||||
catalog = Catalog(locale=locale, domain=domain, charset=charset)
|
||||
|
||||
counter = [0]
|
||||
offset = [0]
|
||||
messages = []
|
||||
translations = []
|
||||
locations = []
|
||||
flags = []
|
||||
user_comments = []
|
||||
auto_comments = []
|
||||
obsolete = [False]
|
||||
context = []
|
||||
in_msgid = [False]
|
||||
in_msgstr = [False]
|
||||
in_msgctxt = [False]
|
||||
|
||||
def _add_message():
|
||||
translations.sort()
|
||||
if len(messages) > 1:
|
||||
msgid = tuple([denormalize(m) for m in messages])
|
||||
else:
|
||||
msgid = denormalize(messages[0])
|
||||
if isinstance(msgid, (list, tuple)):
|
||||
string = []
|
||||
for idx in range(catalog.num_plurals):
|
||||
try:
|
||||
string.append(translations[idx])
|
||||
except IndexError:
|
||||
string.append((idx, ''))
|
||||
string = tuple([denormalize(t[1]) for t in string])
|
||||
else:
|
||||
string = denormalize(translations[0][1])
|
||||
if context:
|
||||
msgctxt = denormalize('\n'.join(context))
|
||||
else:
|
||||
msgctxt = None
|
||||
message = Message(msgid, string, list(locations), set(flags),
|
||||
auto_comments, user_comments, lineno=offset[0] + 1,
|
||||
context=msgctxt)
|
||||
if obsolete[0]:
|
||||
if not ignore_obsolete:
|
||||
catalog.obsolete[msgid] = message
|
||||
else:
|
||||
catalog[msgid] = message
|
||||
del messages[:]; del translations[:]; del context[:]; del locations[:];
|
||||
del flags[:]; del auto_comments[:]; del user_comments[:];
|
||||
obsolete[0] = False
|
||||
counter[0] += 1
|
||||
|
||||
def _process_message_line(lineno, line):
|
||||
if line.startswith('msgid_plural'):
|
||||
in_msgid[0] = True
|
||||
msg = line[12:].lstrip()
|
||||
messages.append(msg)
|
||||
elif line.startswith('msgid'):
|
||||
in_msgid[0] = True
|
||||
offset[0] = lineno
|
||||
txt = line[5:].lstrip()
|
||||
if messages:
|
||||
_add_message()
|
||||
messages.append(txt)
|
||||
elif line.startswith('msgstr'):
|
||||
in_msgid[0] = False
|
||||
in_msgstr[0] = True
|
||||
msg = line[6:].lstrip()
|
||||
if msg.startswith('['):
|
||||
idx, msg = msg[1:].split(']', 1)
|
||||
translations.append([int(idx), msg.lstrip()])
|
||||
else:
|
||||
translations.append([0, msg])
|
||||
elif line.startswith('msgctxt'):
|
||||
if messages:
|
||||
_add_message()
|
||||
in_msgid[0] = in_msgstr[0] = False
|
||||
context.append(line[7:].lstrip())
|
||||
elif line.startswith('"'):
|
||||
if in_msgid[0]:
|
||||
messages[-1] += u'\n' + line.rstrip()
|
||||
elif in_msgstr[0]:
|
||||
translations[-1][1] += u'\n' + line.rstrip()
|
||||
elif in_msgctxt[0]:
|
||||
context.append(line.rstrip())
|
||||
|
||||
for lineno, line in enumerate(fileobj.readlines()):
|
||||
line = line.strip()
|
||||
if not isinstance(line, unicode):
|
||||
line = line.decode(catalog.charset)
|
||||
if line.startswith('#'):
|
||||
in_msgid[0] = in_msgstr[0] = False
|
||||
if messages and translations:
|
||||
_add_message()
|
||||
if line[1:].startswith(':'):
|
||||
for location in line[2:].lstrip().split():
|
||||
pos = location.rfind(':')
|
||||
if pos >= 0:
|
||||
try:
|
||||
lineno = int(location[pos + 1:])
|
||||
except ValueError:
|
||||
continue
|
||||
locations.append((location[:pos], lineno))
|
||||
elif line[1:].startswith(','):
|
||||
for flag in line[2:].lstrip().split(','):
|
||||
flags.append(flag.strip())
|
||||
elif line[1:].startswith('~'):
|
||||
obsolete[0] = True
|
||||
_process_message_line(lineno, line[2:].lstrip())
|
||||
elif line[1:].startswith('.'):
|
||||
# These are called auto-comments
|
||||
comment = line[2:].strip()
|
||||
if comment: # Just check that we're not adding empty comments
|
||||
auto_comments.append(comment)
|
||||
else:
|
||||
# These are called user comments
|
||||
user_comments.append(line[1:].strip())
|
||||
else:
|
||||
_process_message_line(lineno, line)
|
||||
|
||||
if messages:
|
||||
_add_message()
|
||||
|
||||
# No actual messages found, but there was some info in comments, from which
|
||||
# we'll construct an empty header message
|
||||
elif not counter[0] and (flags or user_comments or auto_comments):
|
||||
messages.append(u'')
|
||||
translations.append([0, u''])
|
||||
_add_message()
|
||||
|
||||
return catalog
|
||||
|
||||
WORD_SEP = re.compile('('
|
||||
r'\s+|' # any whitespace
|
||||
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
|
||||
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
|
||||
')')
|
||||
|
||||
def escape(string):
|
||||
r"""Escape the given string so that it can be included in double-quoted
|
||||
strings in ``PO`` files.
|
||||
|
||||
>>> escape('''Say:
|
||||
... "hello, world!"
|
||||
... ''')
|
||||
'"Say:\\n \\"hello, world!\\"\\n"'
|
||||
|
||||
:param string: the string to escape
|
||||
:return: the escaped string
|
||||
:rtype: `str` or `unicode`
|
||||
"""
|
||||
return '"%s"' % string.replace('\\', '\\\\') \
|
||||
.replace('\t', '\\t') \
|
||||
.replace('\r', '\\r') \
|
||||
.replace('\n', '\\n') \
|
||||
.replace('\"', '\\"')
|
||||
|
||||
def normalize(string, prefix='', width=80):
|
||||
r"""Convert a string into a format that is appropriate for .po files.
|
||||
|
||||
>>> print normalize('''Say:
|
||||
... "hello, world!"
|
||||
... ''', width=None)
|
||||
""
|
||||
"Say:\n"
|
||||
" \"hello, world!\"\n"
|
||||
|
||||
>>> print normalize('''Say:
|
||||
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
|
||||
... ''', width=32)
|
||||
""
|
||||
"Say:\n"
|
||||
" \"Lorem ipsum dolor sit "
|
||||
"amet, consectetur adipisicing"
|
||||
" elit, \"\n"
|
||||
|
||||
:param string: the string to normalize
|
||||
:param prefix: a string that should be prepended to every line
|
||||
:param width: the maximum line width; use `None`, 0, or a negative number
|
||||
to completely disable line wrapping
|
||||
:return: the normalized string
|
||||
:rtype: `unicode`
|
||||
"""
|
||||
if width and width > 0:
|
||||
prefixlen = len(prefix)
|
||||
lines = []
|
||||
for line in string.splitlines(True):
|
||||
if len(escape(line)) + prefixlen > width:
|
||||
chunks = WORD_SEP.split(line)
|
||||
chunks.reverse()
|
||||
while chunks:
|
||||
buf = []
|
||||
size = 2
|
||||
while chunks:
|
||||
l = len(escape(chunks[-1])) - 2 + prefixlen
|
||||
if size + l < width:
|
||||
buf.append(chunks.pop())
|
||||
size += l
|
||||
else:
|
||||
if not buf:
|
||||
# handle long chunks by putting them on a
|
||||
# separate line
|
||||
buf.append(chunks.pop())
|
||||
break
|
||||
lines.append(u''.join(buf))
|
||||
else:
|
||||
lines.append(line)
|
||||
else:
|
||||
lines = string.splitlines(True)
|
||||
|
||||
if len(lines) <= 1:
|
||||
return escape(string)
|
||||
|
||||
# Remove empty trailing line
|
||||
if lines and not lines[-1]:
|
||||
del lines[-1]
|
||||
lines[-1] += '\n'
|
||||
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
|
||||
|
||||
def write_po(fileobj, catalog, width=80, no_location=False, omit_header=False,
|
||||
sort_output=False, sort_by_file=False, ignore_obsolete=False,
|
||||
include_previous=False):
|
||||
r"""Write a ``gettext`` PO (portable object) template file for a given
|
||||
message catalog to the provided file-like object.
|
||||
|
||||
>>> catalog = Catalog()
|
||||
>>> catalog.add(u'foo %(name)s', locations=['main.py:1',],
|
||||
... flags=('fuzzy',))
|
||||
<Message...>
|
||||
>>> catalog.add((u'bar', u'baz'), locations=['main.py:3',])
|
||||
<Message...>
|
||||
>>> from StringIO import StringIO
|
||||
>>> buf = StringIO()
|
||||
>>> write_po(buf, catalog, omit_header=True)
|
||||
>>> print buf.getvalue()
|
||||
#: main.py:1
|
||||
#, fuzzy, python-format
|
||||
msgid "foo %(name)s"
|
||||
msgstr ""
|
||||
<BLANKLINE>
|
||||
#: main.py:3
|
||||
msgid "bar"
|
||||
msgid_plural "baz"
|
||||
msgstr[0] ""
|
||||
msgstr[1] ""
|
||||
<BLANKLINE>
|
||||
<BLANKLINE>
|
||||
|
||||
:param fileobj: the file-like object to write to
|
||||
:param catalog: the `Catalog` instance
|
||||
:param width: the maximum line width for the generated output; use `None`,
|
||||
0, or a negative number to completely disable line wrapping
|
||||
:param no_location: do not emit a location comment for every message
|
||||
:param omit_header: do not include the ``msgid ""`` entry at the top of the
|
||||
output
|
||||
:param sort_output: whether to sort the messages in the output by msgid
|
||||
:param sort_by_file: whether to sort the messages in the output by their
|
||||
locations
|
||||
:param ignore_obsolete: whether to ignore obsolete messages and not include
|
||||
them in the output; by default they are included as
|
||||
comments
|
||||
:param include_previous: include the old msgid as a comment when
|
||||
updating the catalog
|
||||
"""
|
||||
def _normalize(key, prefix=''):
|
||||
return normalize(key, prefix=prefix, width=width)
|
||||
|
||||
def _write(text):
|
||||
fileobj.write(text)
|
||||
|
||||
def _write_comment(comment, prefix=''):
|
||||
# xgettext always wraps comments even if --no-wrap is passed;
|
||||
# provide the same behaviour
|
||||
if width and width > 0:
|
||||
_width = width
|
||||
else:
|
||||
_width = 80
|
||||
if isinstance(comment, (tuple, list)):
|
||||
commentText = str(comment[0])
|
||||
for piece in comment[1:]:
|
||||
commentText += ":" + str(piece)
|
||||
comment = commentText
|
||||
for line in wraptext(comment, _width):
|
||||
_write('#%s %s\n' % (prefix, line.strip()))
|
||||
|
||||
def _write_message(message, prefix=''):
|
||||
if isinstance(message.id, (list, tuple)):
|
||||
if message.context:
|
||||
_write('%smsgctxt %s\n' % (prefix,
|
||||
_normalize(message.context, prefix)))
|
||||
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
|
||||
_write('%smsgid_plural %s\n' % (
|
||||
prefix, _normalize(message.id[1], prefix)
|
||||
))
|
||||
|
||||
for idx in range(2):
|
||||
try:
|
||||
string = message.string[idx]
|
||||
except IndexError:
|
||||
string = ''
|
||||
_write('%smsgstr[%d] %s\n' % (
|
||||
prefix, idx, _normalize(string, prefix)
|
||||
))
|
||||
else:
|
||||
if message.context:
|
||||
_write('%smsgctxt %s\n' % (prefix,
|
||||
_normalize(message.context, prefix)))
|
||||
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
|
||||
_write('%smsgstr %s\n' % (
|
||||
prefix, _normalize(message.string or '', prefix)
|
||||
))
|
||||
|
||||
messages = list(catalog)
|
||||
if sort_output:
|
||||
messages.sort()
|
||||
elif sort_by_file:
|
||||
messages.sort(lambda x,y: cmp(x.locations, y.locations))
|
||||
|
||||
for message in messages:
|
||||
if not message.id: # This is the header "message"
|
||||
if omit_header:
|
||||
continue
|
||||
comment_header = catalog.header_comment
|
||||
if width and width > 0:
|
||||
lines = []
|
||||
for line in comment_header.splitlines():
|
||||
lines += wraptext(line, width=width,
|
||||
subsequent_indent='# ')
|
||||
comment_header = u'\n'.join(lines)
|
||||
_write(comment_header + u'\n')
|
||||
|
||||
for comment in message.user_comments:
|
||||
_write_comment(comment)
|
||||
for comment in message.auto_comments:
|
||||
_write_comment(comment, prefix='.')
|
||||
|
||||
if not no_location:
|
||||
for location in message.locations:
|
||||
_write_comment(location, prefix=':')
|
||||
if message.flags:
|
||||
_write('#%s\n' % ', '.join([''] + list(message.flags)))
|
||||
|
||||
if message.previous_id and include_previous:
|
||||
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
|
||||
prefix='|')
|
||||
if len(message.previous_id) > 1:
|
||||
_write_comment('msgid_plural %s' % _normalize(
|
||||
message.previous_id[1]
|
||||
), prefix='|')
|
||||
|
||||
_write_message(message)
|
||||
_write('\n')
|
||||
|
||||
if not ignore_obsolete:
|
||||
for message in catalog.obsolete.values():
|
||||
for comment in message.user_comments:
|
||||
_write_comment(comment)
|
||||
_write_message(message, prefix='#~ ')
|
||||
_write('\n')
|
300
source/tools/i18n/potter/util.py
Normal file
300
source/tools/i18n/potter/util.py
Normal file
@ -0,0 +1,300 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2007-2011 Edgewall Software
|
||||
# Copyright (C) 2013 Wildfire Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
#
|
||||
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
# The name of the author may not be used to endorse or promote products derived from this software without specific
|
||||
# prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
# AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# This software consists of voluntary contributions made by many
|
||||
# individuals. For the exact contribution history, see the revision
|
||||
# history and logs:
|
||||
# • http://babel.edgewall.org/log/trunk/babel/messages
|
||||
# • http://trac.wildfiregames.com/browser/ps/trunk/source/tools/i18n/potter
|
||||
|
||||
"""Various utility classes and functions."""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import codecs
|
||||
from datetime import timedelta, tzinfo
|
||||
import os
|
||||
import re
|
||||
import textwrap
|
||||
import time
|
||||
|
||||
try:
|
||||
from itertools import izip as zip
|
||||
from itertools import imap as map
|
||||
except ImportError:
|
||||
pass # Python 3
|
||||
|
||||
missing = object()
|
||||
|
||||
__all__ = ['distinct', 'pathmatch', 'relpath', 'wraptext', 'UTC',
|
||||
'LOCALTZ']
|
||||
__docformat__ = 'restructuredtext en'
|
||||
|
||||
|
||||
def distinct(iterable):
|
||||
"""Yield all items in an iterable collection that are distinct.
|
||||
|
||||
Unlike when using sets for a similar effect, the original ordering of the
|
||||
items in the collection is preserved by this function.
|
||||
|
||||
>>> print list(distinct([1, 2, 1, 3, 4, 4]))
|
||||
[1, 2, 3, 4]
|
||||
>>> print list(distinct('foobar'))
|
||||
['f', 'o', 'b', 'a', 'r']
|
||||
|
||||
:param iterable: the iterable collection providing the data
|
||||
:return: the distinct items in the collection
|
||||
:rtype: ``iterator``
|
||||
"""
|
||||
seen = set()
|
||||
for item in iter(iterable):
|
||||
if item not in seen:
|
||||
yield item
|
||||
seen.add(item)
|
||||
|
||||
# Regexp to match python magic encoding line
|
||||
PYTHON_MAGIC_COMMENT_re = re.compile(
|
||||
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', re.VERBOSE)
|
||||
def parse_encoding(fp):
|
||||
"""Deduce the encoding of a source file from magic comment.
|
||||
|
||||
It does this in the same way as the `Python interpreter`__
|
||||
|
||||
.. __: http://docs.python.org/ref/encodings.html
|
||||
|
||||
The ``fp`` argument should be a seekable file object.
|
||||
|
||||
(From Jeff Dairiki)
|
||||
"""
|
||||
pos = fp.tell()
|
||||
fp.seek(0)
|
||||
try:
|
||||
line1 = fp.readline()
|
||||
has_bom = line1.startswith(codecs.BOM_UTF8)
|
||||
if has_bom:
|
||||
line1 = line1[len(codecs.BOM_UTF8):]
|
||||
|
||||
m = PYTHON_MAGIC_COMMENT_re.match(line1)
|
||||
if not m:
|
||||
try:
|
||||
import parser
|
||||
parser.suite(line1)
|
||||
except (ImportError, SyntaxError):
|
||||
# Either it's a real syntax error, in which case the source is
|
||||
# not valid python source, or line2 is a continuation of line1,
|
||||
# in which case we don't want to scan line2 for a magic
|
||||
# comment.
|
||||
pass
|
||||
else:
|
||||
line2 = fp.readline()
|
||||
m = PYTHON_MAGIC_COMMENT_re.match(line2)
|
||||
|
||||
if has_bom:
|
||||
if m:
|
||||
raise SyntaxError(
|
||||
"python refuses to compile code with both a UTF8 "
|
||||
"byte-order-mark and a magic encoding comment")
|
||||
return 'utf_8'
|
||||
elif m:
|
||||
return m.group(1)
|
||||
else:
|
||||
return None
|
||||
finally:
|
||||
fp.seek(pos)
|
||||
|
||||
def pathmatch(pattern, filename):
|
||||
"""Extended pathname pattern matching.
|
||||
|
||||
This function is similar to what is provided by the ``fnmatch`` module in
|
||||
the Python standard library, but:
|
||||
|
||||
* can match complete (relative or absolute) path names, and not just file
|
||||
names, and
|
||||
* also supports a convenience pattern ("**") to match files at any
|
||||
directory level.
|
||||
|
||||
Examples:
|
||||
|
||||
>>> pathmatch('**.py', 'bar.py')
|
||||
True
|
||||
>>> pathmatch('**.py', 'foo/bar/baz.py')
|
||||
True
|
||||
>>> pathmatch('**.py', 'templates/index.html')
|
||||
False
|
||||
|
||||
>>> pathmatch('**/templates/*.html', 'templates/index.html')
|
||||
True
|
||||
>>> pathmatch('**/templates/*.html', 'templates/foo/bar.html')
|
||||
False
|
||||
|
||||
:param pattern: the glob pattern
|
||||
:param filename: the path name of the file to match against
|
||||
:return: `True` if the path name matches the pattern, `False` otherwise
|
||||
:rtype: `bool`
|
||||
"""
|
||||
symbols = {
|
||||
'?': '[^/]',
|
||||
'?/': '[^/]/',
|
||||
'*': '[^/]+',
|
||||
'*/': '[^/]+/',
|
||||
'**/': '(?:.+/)*?',
|
||||
'**': '(?:.+/)*?[^/]+',
|
||||
}
|
||||
buf = []
|
||||
for idx, part in enumerate(re.split('([?*]+/?)', pattern)):
|
||||
if idx % 2:
|
||||
buf.append(symbols[part])
|
||||
elif part:
|
||||
buf.append(re.escape(part))
|
||||
match = re.match(''.join(buf) + '$', filename.replace(os.sep, '/'))
|
||||
return match is not None
|
||||
|
||||
|
||||
class TextWrapper(textwrap.TextWrapper):
|
||||
wordsep_re = re.compile(
|
||||
r'(\s+|' # any whitespace
|
||||
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' # em-dash
|
||||
)
|
||||
|
||||
|
||||
def wraptext(text, width=70, initial_indent='', subsequent_indent=''):
|
||||
"""Simple wrapper around the ``textwrap.wrap`` function in the standard
|
||||
library. This version does not wrap lines on hyphens in words.
|
||||
|
||||
:param text: the text to wrap
|
||||
:param width: the maximum line width
|
||||
:param initial_indent: string that will be prepended to the first line of
|
||||
wrapped output
|
||||
:param subsequent_indent: string that will be prepended to all lines save
|
||||
the first of wrapped output
|
||||
:return: a list of lines
|
||||
:rtype: `list`
|
||||
"""
|
||||
wrapper = TextWrapper(width=width, initial_indent=initial_indent,
|
||||
subsequent_indent=subsequent_indent,
|
||||
break_long_words=False)
|
||||
return wrapper.wrap(text)
|
||||
|
||||
|
||||
try:
|
||||
relpath = os.path.relpath
|
||||
except AttributeError:
|
||||
def relpath(path, start='.'):
|
||||
"""Compute the relative path to one path from another.
|
||||
|
||||
>>> relpath('foo/bar.txt', '').replace(os.sep, '/')
|
||||
'foo/bar.txt'
|
||||
>>> relpath('foo/bar.txt', 'foo').replace(os.sep, '/')
|
||||
'bar.txt'
|
||||
>>> relpath('foo/bar.txt', 'baz').replace(os.sep, '/')
|
||||
'../foo/bar.txt'
|
||||
|
||||
:return: the relative path
|
||||
:rtype: `basestring`
|
||||
"""
|
||||
start_list = os.path.abspath(start).split(os.sep)
|
||||
path_list = os.path.abspath(path).split(os.sep)
|
||||
|
||||
# Work out how much of the filepath is shared by start and path.
|
||||
i = len(os.path.commonprefix([start_list, path_list]))
|
||||
|
||||
rel_list = [os.path.pardir] * (len(start_list) - i) + path_list[i:]
|
||||
return os.path.join(*rel_list)
|
||||
|
||||
ZERO = timedelta(0)
|
||||
|
||||
|
||||
class FixedOffsetTimezone(tzinfo):
|
||||
"""Fixed offset in minutes east from UTC."""
|
||||
|
||||
def __init__(self, offset, name=None):
|
||||
self._offset = timedelta(minutes=offset)
|
||||
if name is None:
|
||||
name = 'Etc/GMT+%d' % offset
|
||||
self.zone = name
|
||||
|
||||
def __str__(self):
|
||||
return self.zone
|
||||
|
||||
def __repr__(self):
|
||||
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self._offset
|
||||
|
||||
def tzname(self, dt):
|
||||
return self.zone
|
||||
|
||||
def dst(self, dt):
|
||||
return ZERO
|
||||
|
||||
|
||||
try:
|
||||
from pytz import UTC
|
||||
except ImportError:
|
||||
UTC = FixedOffsetTimezone(0, 'UTC')
|
||||
"""`tzinfo` object for UTC (Universal Time).
|
||||
|
||||
:type: `tzinfo`
|
||||
"""
|
||||
|
||||
STDOFFSET = timedelta(seconds = -time.timezone)
|
||||
if time.daylight:
|
||||
DSTOFFSET = timedelta(seconds = -time.altzone)
|
||||
else:
|
||||
DSTOFFSET = STDOFFSET
|
||||
|
||||
DSTDIFF = DSTOFFSET - STDOFFSET
|
||||
|
||||
|
||||
class LocalTimezone(tzinfo):
|
||||
|
||||
def utcoffset(self, dt):
|
||||
if self._isdst(dt):
|
||||
return DSTOFFSET
|
||||
else:
|
||||
return STDOFFSET
|
||||
|
||||
def dst(self, dt):
|
||||
if self._isdst(dt):
|
||||
return DSTDIFF
|
||||
else:
|
||||
return ZERO
|
||||
|
||||
def tzname(self, dt):
|
||||
return time.tzname[self._isdst(dt)]
|
||||
|
||||
def _isdst(self, dt):
|
||||
tt = (dt.year, dt.month, dt.day,
|
||||
dt.hour, dt.minute, dt.second,
|
||||
dt.weekday(), 0, -1)
|
||||
stamp = time.mktime(tt)
|
||||
tt = time.localtime(stamp)
|
||||
return tt.tm_isdst > 0
|
||||
|
||||
|
||||
LOCALTZ = LocalTimezone()
|
||||
"""`tzinfo` object for local time-zone.
|
||||
|
||||
:type: `tzinfo`
|
||||
"""
|
63
source/tools/i18n/pullTranslations.py
Normal file
63
source/tools/i18n/pullTranslations.py
Normal file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding:utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 Wildfire Games.
|
||||
# This file is part of 0 A.D.
|
||||
#
|
||||
# 0 A.D. is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# 0 A.D. is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Although this script itself should work with both Python 2 and Python 3, it relies on the Transifex Client, which at
|
||||
this moment (2013-10-12) does not support Python 3.
|
||||
|
||||
As soon as Transifex Client supports Python 3, simply updating its folder should be enough to make this script work
|
||||
with Python 3 as well.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import os, sys
|
||||
|
||||
# Python version check.
|
||||
if sys.version_info[0] != 2:
|
||||
print(__doc__)
|
||||
sys.exit()
|
||||
|
||||
from txclib.project import Project
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
|
||||
l10nToolsDirectory = os.path.dirname(os.path.realpath(__file__))
|
||||
projectRootDirectory = os.path.abspath(os.path.join(l10nToolsDirectory, os.pardir, os.pardir, os.pardir))
|
||||
l10nFolderName = "l10n"
|
||||
transifexClientFolder = ".tx"
|
||||
|
||||
for root, folders, filenames in os.walk(projectRootDirectory):
|
||||
root = root.decode('utf-8')
|
||||
for folder in folders:
|
||||
if folder == l10nFolderName:
|
||||
if os.path.exists(os.path.join(root, folder, transifexClientFolder)):
|
||||
path = os.path.join(root, folder)
|
||||
os.chdir(path)
|
||||
project = Project(path)
|
||||
project.pull(fetchall=True, force=True)
|
||||
# Use this to pull only the main languages (those that will most likely be included in A16)
|
||||
#project.pull(languages=['en', 'de', 'it', 'pt_PT', 'nl', 'es', 'fr'])
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
119
source/tools/i18n/tx
Normal file
119
source/tools/i18n/tx
Normal file
@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from optparse import OptionParser, OptionValueError
|
||||
import os
|
||||
import sys
|
||||
import ssl
|
||||
import errno
|
||||
from txclib import utils
|
||||
from txclib import get_version
|
||||
from txclib.log import set_log_level, logger
|
||||
|
||||
reload(sys) # WTF? Otherwise setdefaultencoding doesn't work
|
||||
|
||||
# This block ensures that ^C interrupts are handled quietly.
|
||||
try:
|
||||
import signal
|
||||
|
||||
def exithandler(signum,frame):
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
sys.exit(1)
|
||||
|
||||
signal.signal(signal.SIGINT, exithandler)
|
||||
signal.signal(signal.SIGTERM, exithandler)
|
||||
if hasattr(signal, 'SIGPIPE'):
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
|
||||
# When we open file with f = codecs.open we specifi FROM what encoding to read
|
||||
# This sets the encoding for the strings which are created with f.read()
|
||||
sys.setdefaultencoding('utf-8')
|
||||
|
||||
|
||||
def main(argv):
|
||||
"""
|
||||
Here we parse the flags (short, long) and we instantiate the classes.
|
||||
"""
|
||||
usage = "usage: %prog [options] command [cmd_options]"
|
||||
description = "This is the Transifex command line client which"\
|
||||
" allows you to manage your translations locally and sync"\
|
||||
" them with the master Transifex server.\nIf you'd like to"\
|
||||
" check the available commands issue `%prog help` or if you"\
|
||||
" just want help with a specific command issue `%prog help"\
|
||||
" command`"
|
||||
|
||||
parser = OptionParser(
|
||||
usage=usage, version=get_version(), description=description
|
||||
)
|
||||
parser.disable_interspersed_args()
|
||||
parser.add_option(
|
||||
"-d", "--debug", action="store_true", dest="debug",
|
||||
default=False, help=("enable debug messages")
|
||||
)
|
||||
parser.add_option(
|
||||
"-q", "--quiet", action="store_true", dest="quiet",
|
||||
default=False, help="don't print status messages to stdout"
|
||||
)
|
||||
parser.add_option(
|
||||
"-r", "--root", action="store", dest="root_dir", type="string",
|
||||
default=None, help="change root directory (default is cwd)"
|
||||
)
|
||||
parser.add_option(
|
||||
"--traceback", action="store_true", dest="trace", default=False,
|
||||
help="print full traceback on exceptions"
|
||||
)
|
||||
parser.add_option(
|
||||
"--disable-colors", action="store_true", dest="color_disable",
|
||||
default=(os.name == 'nt' or not sys.stdout.isatty()),
|
||||
help="disable colors in the output of commands"
|
||||
)
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if len(args) < 1:
|
||||
parser.error("No command was given")
|
||||
|
||||
utils.DISABLE_COLORS = options.color_disable
|
||||
|
||||
# set log level
|
||||
if options.quiet:
|
||||
set_log_level('WARNING')
|
||||
elif options.debug:
|
||||
set_log_level('DEBUG')
|
||||
|
||||
# find .tx
|
||||
path_to_tx = options.root_dir or utils.find_dot_tx()
|
||||
|
||||
|
||||
cmd = args[0]
|
||||
try:
|
||||
print "utils.exec_command(" + str(cmd) + ", " + str(args[1:]) + ", " + path_to_tx + ")"
|
||||
utils.exec_command(cmd, args[1:], path_to_tx)
|
||||
except ssl.SSLError as e:
|
||||
if 'certificate verify failed' in e.strerror:
|
||||
logger.error(
|
||||
'Error: Could not verify the SSL certificate of the remote host'
|
||||
)
|
||||
else:
|
||||
logger.error(errno.errorcode[e.errno])
|
||||
sys.exit(1)
|
||||
except utils.UnknownCommandError:
|
||||
logger.error("tx: Command %s not found" % cmd)
|
||||
except SystemExit:
|
||||
sys.exit()
|
||||
except:
|
||||
import traceback
|
||||
if options.trace:
|
||||
traceback.print_exc()
|
||||
else:
|
||||
formatted_lines = traceback.format_exc().splitlines()
|
||||
logger.error(formatted_lines[-1])
|
||||
sys.exit(1)
|
||||
|
||||
# Run baby :) ... run
|
||||
if __name__ == "__main__":
|
||||
# sys.argv[0] is the name of the script that we’re running.
|
||||
main(sys.argv[1:])
|
344
source/tools/i18n/txclib/LICENSE
Normal file
344
source/tools/i18n/txclib/LICENSE
Normal file
@ -0,0 +1,344 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Library General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software
|
||||
interchange; or,
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and
|
||||
"any later version", you have the option of following the terms and
|
||||
conditions either of that version or of any later version published by
|
||||
the Free Software Foundation. If the Program does not specify a
|
||||
version number of this License, you may choose any version ever
|
||||
published by the Free Software Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO
|
||||
WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
||||
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
|
||||
OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY
|
||||
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
|
||||
PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
|
||||
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
|
||||
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
|
||||
AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU
|
||||
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
|
||||
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
|
||||
PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
|
||||
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
|
||||
FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF
|
||||
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these
|
||||
terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Library General
|
||||
Public License instead of this License.
|
||||
|
12
source/tools/i18n/txclib/__init__.py
Normal file
12
source/tools/i18n/txclib/__init__.py
Normal file
@ -0,0 +1,12 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
VERSION = (0, 9, 0, 'final')
|
||||
|
||||
|
||||
def get_version():
|
||||
version = '%s.%s' % (VERSION[0], VERSION[1])
|
||||
if VERSION[2]:
|
||||
version = '%s.%s' % (version, VERSION[2])
|
||||
if VERSION[3] != 'final':
|
||||
version = '%s %s' % (version, VERSION[3])
|
||||
return version
|
3895
source/tools/i18n/txclib/cacert.pem
Normal file
3895
source/tools/i18n/txclib/cacert.pem
Normal file
File diff suppressed because it is too large
Load Diff
576
source/tools/i18n/txclib/commands.py
Normal file
576
source/tools/i18n/txclib/commands.py
Normal file
@ -0,0 +1,576 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
In this file we have all the top level commands for the transifex client.
|
||||
Since we're using a way to automatically list them and execute them, when
|
||||
adding code to this file you must take care of the following:
|
||||
* Added functions must begin with 'cmd_' followed by the actual name of the
|
||||
command being used in the command line (eg cmd_init)
|
||||
* The description for each function that we display to the user is read from
|
||||
the func_doc attribute which reads the doc string. So, when adding
|
||||
docstring to a new function make sure you add an oneliner which is
|
||||
descriptive and is meant to be seen by the user.
|
||||
* When including libraries, it's best if you include modules instead of
|
||||
functions because that way our function resolution will work faster and the
|
||||
chances of overlapping are minimal
|
||||
* All functions should use the OptionParser and should have a usage and
|
||||
descripition field.
|
||||
"""
|
||||
import os
|
||||
import re, shutil
|
||||
import sys
|
||||
from optparse import OptionParser, OptionGroup
|
||||
import ConfigParser
|
||||
|
||||
|
||||
from txclib import utils, project
|
||||
from txclib.utils import parse_json, compile_json, files_in_project
|
||||
from txclib.config import OrderedRawConfigParser
|
||||
from txclib.exceptions import UnInitializedError
|
||||
from txclib.parsers import delete_parser, help_parser, parse_csv_option, \
|
||||
status_parser, pull_parser, set_parser, push_parser, init_parser
|
||||
from txclib.paths import posix_path
|
||||
from txclib.log import logger
|
||||
|
||||
|
||||
def cmd_init(argv, path_to_tx):
|
||||
"Initialize a new transifex project."
|
||||
parser = init_parser()
|
||||
(options, args) = parser.parse_args(argv)
|
||||
if len(args) > 1:
|
||||
parser.error("Too many arguments were provided. Aborting...")
|
||||
if args:
|
||||
path_to_tx = args[0]
|
||||
else:
|
||||
path_to_tx = os.getcwd()
|
||||
|
||||
if os.path.isdir(os.path.join(path_to_tx,".tx")):
|
||||
logger.info("tx: There is already a tx folder!")
|
||||
reinit = raw_input("Do you want to delete it and reinit the project? [y/N]: ")
|
||||
while (reinit != 'y' and reinit != 'Y' and reinit != 'N' and reinit != 'n' and reinit != ''):
|
||||
reinit = raw_input("Do you want to delete it and reinit the project? [y/N]: ")
|
||||
if not reinit or reinit in ['N', 'n', 'NO', 'no', 'No']:
|
||||
return
|
||||
# Clean the old settings
|
||||
# FIXME: take a backup
|
||||
else:
|
||||
rm_dir = os.path.join(path_to_tx, ".tx")
|
||||
shutil.rmtree(rm_dir)
|
||||
|
||||
logger.info("Creating .tx folder...")
|
||||
os.mkdir(os.path.join(path_to_tx,".tx"))
|
||||
|
||||
# Handle the credentials through transifexrc
|
||||
home = os.path.expanduser("~")
|
||||
txrc = os.path.join(home, ".transifexrc")
|
||||
config = OrderedRawConfigParser()
|
||||
|
||||
default_transifex = "https://www.transifex.com"
|
||||
transifex_host = options.host or raw_input("Transifex instance [%s]: " % default_transifex)
|
||||
|
||||
if not transifex_host:
|
||||
transifex_host = default_transifex
|
||||
if not transifex_host.startswith(('http://', 'https://')):
|
||||
transifex_host = 'https://' + transifex_host
|
||||
|
||||
config_file = os.path.join(path_to_tx, ".tx", "config")
|
||||
if not os.path.exists(config_file):
|
||||
# The path to the config file (.tx/config)
|
||||
logger.info("Creating skeleton...")
|
||||
config = OrderedRawConfigParser()
|
||||
config.add_section('main')
|
||||
config.set('main', 'host', transifex_host)
|
||||
# Touch the file if it doesn't exist
|
||||
logger.info("Creating config file...")
|
||||
fh = open(config_file, 'w')
|
||||
config.write(fh)
|
||||
fh.close()
|
||||
|
||||
prj = project.Project(path_to_tx)
|
||||
prj.getset_host_credentials(transifex_host, user=options.user,
|
||||
password=options.password)
|
||||
prj.save()
|
||||
logger.info("Done.")
|
||||
|
||||
|
||||
def cmd_set(argv, path_to_tx):
|
||||
"Add local or remote files under transifex"
|
||||
parser = set_parser()
|
||||
(options, args) = parser.parse_args(argv)
|
||||
|
||||
# Implement options/args checks
|
||||
# TODO !!!!!!!
|
||||
if options.local:
|
||||
try:
|
||||
expression = args[0]
|
||||
except IndexError:
|
||||
parser.error("Please specify an expression.")
|
||||
if not options.resource:
|
||||
parser.error("Please specify a resource")
|
||||
if not options.source_language:
|
||||
parser.error("Please specify a source language.")
|
||||
if not '<lang>' in expression:
|
||||
parser.error("The expression you have provided is not valid.")
|
||||
if not utils.valid_slug(options.resource):
|
||||
parser.error("Invalid resource slug. The format is <project_slug>"\
|
||||
".<resource_slug> and the valid characters include [_-\w].")
|
||||
_auto_local(path_to_tx, options.resource,
|
||||
source_language=options.source_language,
|
||||
expression = expression, source_file=options.source_file,
|
||||
execute=options.execute, regex=False)
|
||||
if options.execute:
|
||||
_set_minimum_perc(options.resource, options.minimum_perc, path_to_tx)
|
||||
_set_mode(options.resource, options.mode, path_to_tx)
|
||||
_set_type(options.resource, options.i18n_type, path_to_tx)
|
||||
return
|
||||
|
||||
if options.remote:
|
||||
try:
|
||||
url = args[0]
|
||||
except IndexError:
|
||||
parser.error("Please specify an remote url")
|
||||
_auto_remote(path_to_tx, url)
|
||||
_set_minimum_perc(options.resource, options.minimum_perc, path_to_tx)
|
||||
_set_mode(options.resource, options.mode, path_to_tx)
|
||||
return
|
||||
|
||||
if options.is_source:
|
||||
resource = options.resource
|
||||
if not resource:
|
||||
parser.error("You must specify a resource name with the"
|
||||
" -r|--resource flag.")
|
||||
|
||||
lang = options.language
|
||||
if not lang:
|
||||
parser.error("Please specify a source language.")
|
||||
|
||||
if len(args) != 1:
|
||||
parser.error("Please specify a file.")
|
||||
|
||||
if not utils.valid_slug(resource):
|
||||
parser.error("Invalid resource slug. The format is <project_slug>"\
|
||||
".<resource_slug> and the valid characters include [_-\w].")
|
||||
|
||||
file = args[0]
|
||||
# Calculate relative path
|
||||
path_to_file = os.path.relpath(file, path_to_tx)
|
||||
_set_source_file(path_to_tx, resource, options.language, path_to_file)
|
||||
elif options.resource or options.language:
|
||||
resource = options.resource
|
||||
lang = options.language
|
||||
|
||||
if len(args) != 1:
|
||||
parser.error("Please specify a file")
|
||||
|
||||
# Calculate relative path
|
||||
path_to_file = os.path.relpath(args[0], path_to_tx)
|
||||
|
||||
try:
|
||||
_go_to_dir(path_to_tx)
|
||||
except UnInitializedError, e:
|
||||
utils.logger.error(e)
|
||||
return
|
||||
|
||||
if not utils.valid_slug(resource):
|
||||
parser.error("Invalid resource slug. The format is <project_slug>"\
|
||||
".<resource_slug> and the valid characters include [_-\w].")
|
||||
_set_translation(path_to_tx, resource, lang, path_to_file)
|
||||
|
||||
_set_mode(options.resource, options.mode, path_to_tx)
|
||||
_set_type(options.resource, options.i18n_type, path_to_tx)
|
||||
_set_minimum_perc(options.resource, options.minimum_perc, path_to_tx)
|
||||
|
||||
logger.info("Done.")
|
||||
return
|
||||
|
||||
|
||||
def _auto_local(path_to_tx, resource, source_language, expression, execute=False,
|
||||
source_file=None, regex=False):
|
||||
"""Auto configure local project."""
|
||||
# The path everything will be relative to
|
||||
curpath = os.path.abspath(os.curdir)
|
||||
|
||||
# Force expr to be a valid regex expr (escaped) but keep <lang> intact
|
||||
expr_re = utils.regex_from_filefilter(expression, curpath)
|
||||
expr_rec = re.compile(expr_re)
|
||||
|
||||
if not execute:
|
||||
logger.info("Only printing the commands which will be run if the "
|
||||
"--execute switch is specified.")
|
||||
|
||||
# First, let's construct a dictionary of all matching files.
|
||||
# Note: Only the last matching file of a language will be stored.
|
||||
translation_files = {}
|
||||
for f_path in files_in_project(curpath):
|
||||
match = expr_rec.match(posix_path(f_path))
|
||||
if match:
|
||||
lang = match.group(1)
|
||||
if lang == source_language and not source_file:
|
||||
source_file = f_path
|
||||
else:
|
||||
translation_files[lang] = f_path
|
||||
|
||||
if not source_file:
|
||||
raise Exception("Could not find a source language file. Please run"
|
||||
" set --source manually and then re-run this command or provide"
|
||||
" the source file with the -s flag.")
|
||||
if execute:
|
||||
logger.info("Updating source for resource %s ( %s -> %s )." % (resource,
|
||||
source_language, os.path.relpath(source_file, path_to_tx)))
|
||||
_set_source_file(path_to_tx, resource, source_language,
|
||||
os.path.relpath(source_file, path_to_tx))
|
||||
else:
|
||||
logger.info('\ntx set --source -r %(res)s -l %(lang)s %(file)s\n' % {
|
||||
'res': resource,
|
||||
'lang': source_language,
|
||||
'file': os.path.relpath(source_file, curpath)})
|
||||
|
||||
prj = project.Project(path_to_tx)
|
||||
root_dir = os.path.abspath(path_to_tx)
|
||||
|
||||
if execute:
|
||||
try:
|
||||
prj.config.get("%s" % resource, "source_file")
|
||||
except ConfigParser.NoSectionError:
|
||||
raise Exception("No resource with slug \"%s\" was found.\nRun 'tx set --auto"
|
||||
"-local -r %s \"expression\"' to do the initial configuration." % resource)
|
||||
|
||||
# Now let's handle the translation files.
|
||||
if execute:
|
||||
logger.info("Updating file expression for resource %s ( %s )." % (resource,
|
||||
expression))
|
||||
# Eval file_filter relative to root dir
|
||||
file_filter = posix_path(
|
||||
os.path.relpath(os.path.join(curpath, expression), path_to_tx)
|
||||
)
|
||||
prj.config.set("%s" % resource, "file_filter", file_filter)
|
||||
else:
|
||||
for (lang, f_path) in sorted(translation_files.items()):
|
||||
logger.info('tx set -r %(res)s -l %(lang)s %(file)s' % {
|
||||
'res': resource,
|
||||
'lang': lang,
|
||||
'file': os.path.relpath(f_path, curpath)})
|
||||
|
||||
if execute:
|
||||
prj.save()
|
||||
|
||||
|
||||
def _auto_remote(path_to_tx, url):
|
||||
"""
|
||||
Initialize a remote release/project/resource to the current directory.
|
||||
"""
|
||||
logger.info("Auto configuring local project from remote URL...")
|
||||
|
||||
type, vars = utils.parse_tx_url(url)
|
||||
prj = project.Project(path_to_tx)
|
||||
username, password = prj.getset_host_credentials(vars['hostname'])
|
||||
|
||||
if type == 'project':
|
||||
logger.info("Getting details for project %s" % vars['project'])
|
||||
proj_info = utils.get_details('project_details',
|
||||
username, password,
|
||||
hostname = vars['hostname'], project = vars['project'])
|
||||
resources = [ '.'.join([vars['project'], r['slug']]) for r in proj_info['resources'] ]
|
||||
logger.info("%s resources found. Configuring..." % len(resources))
|
||||
elif type == 'release':
|
||||
logger.info("Getting details for release %s" % vars['release'])
|
||||
rel_info = utils.get_details('release_details',
|
||||
username, password, hostname = vars['hostname'],
|
||||
project = vars['project'], release = vars['release'])
|
||||
resources = []
|
||||
for r in rel_info['resources']:
|
||||
if r.has_key('project'):
|
||||
resources.append('.'.join([r['project']['slug'], r['slug']]))
|
||||
else:
|
||||
resources.append('.'.join([vars['project'], r['slug']]))
|
||||
logger.info("%s resources found. Configuring..." % len(resources))
|
||||
elif type == 'resource':
|
||||
logger.info("Getting details for resource %s" % vars['resource'])
|
||||
resources = [ '.'.join([vars['project'], vars['resource']]) ]
|
||||
else:
|
||||
raise("Url '%s' is not recognized." % url)
|
||||
|
||||
for resource in resources:
|
||||
logger.info("Configuring resource %s." % resource)
|
||||
proj, res = resource.split('.')
|
||||
res_info = utils.get_details('resource_details',
|
||||
username, password, hostname = vars['hostname'],
|
||||
project = proj, resource=res)
|
||||
try:
|
||||
source_lang = res_info['source_language_code']
|
||||
i18n_type = res_info['i18n_type']
|
||||
except KeyError:
|
||||
raise Exception("Remote server seems to be running an unsupported version"
|
||||
" of Transifex. Either update your server software of fallback"
|
||||
" to a previous version of transifex-client.")
|
||||
prj.set_remote_resource(
|
||||
resource=resource,
|
||||
host = vars['hostname'],
|
||||
source_lang = source_lang,
|
||||
i18n_type = i18n_type)
|
||||
|
||||
prj.save()
|
||||
|
||||
|
||||
def cmd_push(argv, path_to_tx):
|
||||
"Push local files to remote server"
|
||||
parser = push_parser()
|
||||
(options, args) = parser.parse_args(argv)
|
||||
force_creation = options.force_creation
|
||||
languages = parse_csv_option(options.languages)
|
||||
resources = parse_csv_option(options.resources)
|
||||
skip = options.skip_errors
|
||||
prj = project.Project(path_to_tx)
|
||||
if not (options.push_source or options.push_translations):
|
||||
parser.error("You need to specify at least one of the -s|--source,"
|
||||
" -t|--translations flags with the push command.")
|
||||
|
||||
prj.push(
|
||||
force=force_creation, resources=resources, languages=languages,
|
||||
skip=skip, source=options.push_source,
|
||||
translations=options.push_translations,
|
||||
no_interactive=options.no_interactive
|
||||
)
|
||||
logger.info("Done.")
|
||||
|
||||
|
||||
def cmd_pull(argv, path_to_tx):
|
||||
"Pull files from remote server to local repository"
|
||||
parser = pull_parser()
|
||||
(options, args) = parser.parse_args(argv)
|
||||
if options.fetchall and options.languages:
|
||||
parser.error("You can't user a language filter along with the"\
|
||||
" -a|--all option")
|
||||
languages = parse_csv_option(options.languages)
|
||||
resources = parse_csv_option(options.resources)
|
||||
skip = options.skip_errors
|
||||
minimum_perc = options.minimum_perc or None
|
||||
|
||||
try:
|
||||
_go_to_dir(path_to_tx)
|
||||
except UnInitializedError, e:
|
||||
utils.logger.error(e)
|
||||
return
|
||||
|
||||
# instantiate the project.Project
|
||||
prj = project.Project(path_to_tx)
|
||||
prj.pull(
|
||||
languages=languages, resources=resources, overwrite=options.overwrite,
|
||||
fetchall=options.fetchall, fetchsource=options.fetchsource,
|
||||
force=options.force, skip=skip, minimum_perc=minimum_perc,
|
||||
mode=options.mode
|
||||
)
|
||||
logger.info("Done.")
|
||||
|
||||
|
||||
def _set_source_file(path_to_tx, resource, lang, path_to_file):
|
||||
"""Reusable method to set source file."""
|
||||
proj, res = resource.split('.')
|
||||
if not proj or not res:
|
||||
raise Exception("\"%s.%s\" is not a valid resource identifier. It should"
|
||||
" be in the following format project_slug.resource_slug." %
|
||||
(proj, res))
|
||||
if not lang:
|
||||
raise Exception("You haven't specified a source language.")
|
||||
|
||||
try:
|
||||
_go_to_dir(path_to_tx)
|
||||
except UnInitializedError, e:
|
||||
utils.logger.error(e)
|
||||
return
|
||||
|
||||
if not os.path.exists(path_to_file):
|
||||
raise Exception("tx: File ( %s ) does not exist." %
|
||||
os.path.join(path_to_tx, path_to_file))
|
||||
|
||||
# instantiate the project.Project
|
||||
prj = project.Project(path_to_tx)
|
||||
root_dir = os.path.abspath(path_to_tx)
|
||||
|
||||
if root_dir not in os.path.normpath(os.path.abspath(path_to_file)):
|
||||
raise Exception("File must be under the project root directory.")
|
||||
|
||||
logger.info("Setting source file for resource %s.%s ( %s -> %s )." % (
|
||||
proj, res, lang, path_to_file))
|
||||
|
||||
path_to_file = os.path.relpath(path_to_file, root_dir)
|
||||
|
||||
prj = project.Project(path_to_tx)
|
||||
|
||||
# FIXME: Check also if the path to source file already exists.
|
||||
try:
|
||||
try:
|
||||
prj.config.get("%s.%s" % (proj, res), "source_file")
|
||||
except ConfigParser.NoSectionError:
|
||||
prj.config.add_section("%s.%s" % (proj, res))
|
||||
except ConfigParser.NoOptionError:
|
||||
pass
|
||||
finally:
|
||||
prj.config.set(
|
||||
"%s.%s" % (proj, res), "source_file", posix_path(path_to_file)
|
||||
)
|
||||
prj.config.set("%s.%s" % (proj, res), "source_lang", lang)
|
||||
|
||||
prj.save()
|
||||
|
||||
|
||||
def _set_translation(path_to_tx, resource, lang, path_to_file):
|
||||
"""Reusable method to set translation file."""
|
||||
|
||||
proj, res = resource.split('.')
|
||||
if not project or not resource:
|
||||
raise Exception("\"%s\" is not a valid resource identifier. It should"
|
||||
" be in the following format project_slug.resource_slug." %
|
||||
resource)
|
||||
|
||||
try:
|
||||
_go_to_dir(path_to_tx)
|
||||
except UnInitializedError, e:
|
||||
utils.logger.error(e)
|
||||
return
|
||||
|
||||
# Warn the user if the file doesn't exist
|
||||
if not os.path.exists(path_to_file):
|
||||
logger.info("Warning: File '%s' doesn't exist." % path_to_file)
|
||||
|
||||
# instantiate the project.Project
|
||||
prj = project.Project(path_to_tx)
|
||||
root_dir = os.path.abspath(path_to_tx)
|
||||
|
||||
if root_dir not in os.path.normpath(os.path.abspath(path_to_file)):
|
||||
raise Exception("File must be under the project root directory.")
|
||||
|
||||
if lang == prj.config.get("%s.%s" % (proj, res), "source_lang"):
|
||||
raise Exception("tx: You cannot set translation file for the source language."
|
||||
" Source languages contain the strings which will be translated!")
|
||||
|
||||
logger.info("Updating translations for resource %s ( %s -> %s )." % (resource,
|
||||
lang, path_to_file))
|
||||
path_to_file = os.path.relpath(path_to_file, root_dir)
|
||||
prj.config.set(
|
||||
"%s.%s" % (proj, res), "trans.%s" % lang, posix_path(path_to_file)
|
||||
)
|
||||
|
||||
prj.save()
|
||||
|
||||
|
||||
def cmd_status(argv, path_to_tx):
|
||||
"Print status of current project"
|
||||
parser = status_parser()
|
||||
(options, args) = parser.parse_args(argv)
|
||||
resources = parse_csv_option(options.resources)
|
||||
prj = project.Project(path_to_tx)
|
||||
resources = prj.get_chosen_resources(resources)
|
||||
resources_num = len(resources)
|
||||
for idx, res in enumerate(resources):
|
||||
p, r = res.split('.')
|
||||
logger.info("%s -> %s (%s of %s)" % (p, r, idx + 1, resources_num))
|
||||
logger.info("Translation Files:")
|
||||
slang = prj.get_resource_option(res, 'source_lang')
|
||||
sfile = prj.get_resource_option(res, 'source_file') or "N/A"
|
||||
lang_map = prj.get_resource_lang_mapping(res)
|
||||
logger.info(" - %s: %s (%s)" % (utils.color_text(slang, "RED"),
|
||||
sfile, utils.color_text("source", "YELLOW")))
|
||||
files = prj.get_resource_files(res)
|
||||
fkeys = files.keys()
|
||||
fkeys.sort()
|
||||
for lang in fkeys:
|
||||
local_lang = lang
|
||||
if lang in lang_map.values():
|
||||
local_lang = lang_map.flip[lang]
|
||||
logger.info(" - %s: %s" % (utils.color_text(local_lang, "RED"),
|
||||
files[lang]))
|
||||
logger.info("")
|
||||
|
||||
|
||||
def cmd_help(argv, path_to_tx):
|
||||
"""List all available commands"""
|
||||
parser = help_parser()
|
||||
(options, args) = parser.parse_args(argv)
|
||||
if len(args) > 1:
|
||||
parser.error("Multiple arguments received. Exiting...")
|
||||
|
||||
# Get all commands
|
||||
fns = utils.discover_commands()
|
||||
|
||||
# Print help for specific command
|
||||
if len(args) == 1:
|
||||
try:
|
||||
fns[argv[0]](['--help'], path_to_tx)
|
||||
except KeyError:
|
||||
utils.logger.error("Command %s not found" % argv[0])
|
||||
# or print summary of all commands
|
||||
|
||||
# the code below will only be executed if the KeyError exception is thrown
|
||||
# becuase in all other cases the function called with --help will exit
|
||||
# instead of return here
|
||||
keys = fns.keys()
|
||||
keys.sort()
|
||||
|
||||
logger.info("Transifex command line client.\n")
|
||||
logger.info("Available commands are:")
|
||||
for key in keys:
|
||||
logger.info(" %-15s\t%s" % (key, fns[key].func_doc))
|
||||
logger.info("\nFor more information run %s command --help" % sys.argv[0])
|
||||
|
||||
|
||||
def cmd_delete(argv, path_to_tx):
|
||||
"Delete an accessible resource or translation in a remote server."
|
||||
parser = delete_parser()
|
||||
(options, args) = parser.parse_args(argv)
|
||||
languages = parse_csv_option(options.languages)
|
||||
resources = parse_csv_option(options.resources)
|
||||
skip = options.skip_errors
|
||||
force = options.force_delete
|
||||
prj = project.Project(path_to_tx)
|
||||
prj.delete(resources, languages, skip, force)
|
||||
logger.info("Done.")
|
||||
|
||||
|
||||
def _go_to_dir(path):
|
||||
"""Change the current working directory to the directory specified as
|
||||
argument.
|
||||
|
||||
Args:
|
||||
path: The path to chdor to.
|
||||
Raises:
|
||||
UnInitializedError, in case the directory has not been initialized.
|
||||
"""
|
||||
if path is None:
|
||||
raise UnInitializedError(
|
||||
"Directory has not been initialzied. "
|
||||
"Did you forget to run 'tx init' first?"
|
||||
)
|
||||
os.chdir(path)
|
||||
|
||||
|
||||
def _set_minimum_perc(resource, value, path_to_tx):
|
||||
"""Set the minimum percentage in the .tx/config file."""
|
||||
args = (resource, 'minimum_perc', value, path_to_tx, 'set_min_perc')
|
||||
_set_project_option(*args)
|
||||
|
||||
|
||||
def _set_mode(resource, value, path_to_tx):
|
||||
"""Set the mode in the .tx/config file."""
|
||||
args = (resource, 'mode', value, path_to_tx, 'set_default_mode')
|
||||
_set_project_option(*args)
|
||||
|
||||
|
||||
def _set_type(resource, value, path_to_tx):
|
||||
"""Set the i18n type in the .tx/config file."""
|
||||
args = (resource, 'type', value, path_to_tx, 'set_i18n_type')
|
||||
_set_project_option(*args)
|
||||
|
||||
|
||||
def _set_project_option(resource, name, value, path_to_tx, func_name):
|
||||
"""Save the option to the project config file."""
|
||||
if value is None:
|
||||
return
|
||||
if not resource:
|
||||
logger.debug("Setting the %s for all resources." % name)
|
||||
resources = []
|
||||
else:
|
||||
logger.debug("Setting the %s for resource %s." % (name, resource))
|
||||
resources = [resource, ]
|
||||
prj = project.Project(path_to_tx)
|
||||
getattr(prj, func_name)(resources, value)
|
||||
prj.save()
|
115
source/tools/i18n/txclib/config.py
Normal file
115
source/tools/i18n/txclib/config.py
Normal file
@ -0,0 +1,115 @@
|
||||
import ConfigParser
|
||||
|
||||
|
||||
class OrderedRawConfigParser( ConfigParser.RawConfigParser ):
|
||||
"""
|
||||
Overload standard Class ConfigParser.RawConfigParser
|
||||
"""
|
||||
def write(self, fp):
|
||||
"""Write an .ini-format representation of the configuration state."""
|
||||
if self._defaults:
|
||||
fp.write("[%s]\n" % DEFAULTSECT)
|
||||
for key in sorted( self._defaults ):
|
||||
fp.write( "%s = %s\n" % (key, str( self._defaults[ key ]
|
||||
).replace('\n', '\n\t')) )
|
||||
fp.write("\n")
|
||||
for section in self._sections:
|
||||
fp.write("[%s]\n" % section)
|
||||
for key in sorted( self._sections[section] ):
|
||||
if key != "__name__":
|
||||
fp.write("%s = %s\n" %
|
||||
(key, str( self._sections[section][ key ]
|
||||
).replace('\n', '\n\t')))
|
||||
fp.write("\n")
|
||||
|
||||
optionxform = str
|
||||
|
||||
|
||||
_NOTFOUND = object()
|
||||
|
||||
|
||||
class Flipdict(dict):
|
||||
"""An injective (one-to-one) python dict. Ensures that each key maps
|
||||
to a unique value, and each value maps back to that same key.
|
||||
|
||||
Code mostly taken from here:
|
||||
http://code.activestate.com/recipes/576968-flipdict-python-dict-that-also-maintains-a-one-to-/
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
self._flip = dict.__new__(self.__class__)
|
||||
setattr(self._flip, "_flip", self)
|
||||
for key, val in dict(*args, **kw).iteritems():
|
||||
self[key] = val
|
||||
|
||||
@property
|
||||
def flip(self):
|
||||
"""The inverse mapping."""
|
||||
return self._flip
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%r)" % (self.__class__.__name__, dict(self))
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self)
|
||||
|
||||
@classmethod
|
||||
def fromkeys(cls, keys, value=None):
|
||||
return cls(dict.fromkeys(keys, value))
|
||||
|
||||
def __setitem__(self, key, val):
|
||||
k = self._flip.get(val, _NOTFOUND)
|
||||
if not (k is _NOTFOUND or k==key):
|
||||
raise KeyError('(key,val) would erase mapping for value %r' % val)
|
||||
|
||||
v = self.get(key, _NOTFOUND)
|
||||
if v is not _NOTFOUND:
|
||||
dict.__delitem__(self._flip, v)
|
||||
|
||||
dict.__setitem__(self, key, val)
|
||||
dict.__setitem__(self._flip, val, key)
|
||||
|
||||
def setdefault(self, key, default = None):
|
||||
# Copied from python's UserDict.DictMixin code.
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = default
|
||||
return default
|
||||
|
||||
def update(self, other = None, **kwargs):
|
||||
# Copied from python's UserDict.DictMixin code.
|
||||
# Make progressively weaker assumptions about "other"
|
||||
if other is None:
|
||||
pass
|
||||
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
|
||||
for k, v in other.iteritems():
|
||||
self[k] = v
|
||||
elif hasattr(other, 'keys'):
|
||||
for k in other.keys():
|
||||
self[k] = other[k]
|
||||
else:
|
||||
for k, v in other:
|
||||
self[k] = v
|
||||
if kwargs:
|
||||
self.update(kwargs)
|
||||
|
||||
def __delitem__(self, key):
|
||||
val = dict.pop(self, key)
|
||||
dict.__delitem__(self._flip, val)
|
||||
|
||||
def pop(self, key, *args):
|
||||
val = dict.pop(self, key, *args)
|
||||
dict.__delitem__(self._flip, val)
|
||||
return val
|
||||
|
||||
def popitem(self):
|
||||
key, val = dict.popitem(self)
|
||||
dict.__delitem__(self._flip, val)
|
||||
return key, val
|
||||
|
||||
def clear(self):
|
||||
dict.clear(self)
|
||||
dict.clear(self._flip)
|
13
source/tools/i18n/txclib/exceptions.py
Normal file
13
source/tools/i18n/txclib/exceptions.py
Normal file
@ -0,0 +1,13 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Exception classes for the tx client.
|
||||
"""
|
||||
|
||||
|
||||
class UnInitializedError(Exception):
|
||||
"""The project directory has not been initialized."""
|
||||
|
||||
|
||||
class UnknownCommandError(Exception):
|
||||
"""The provided command is not supported."""
|
45
source/tools/i18n/txclib/http_utils.py
Normal file
45
source/tools/i18n/txclib/http_utils.py
Normal file
@ -0,0 +1,45 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
HTTP-related utility functions.
|
||||
"""
|
||||
|
||||
import gzip
|
||||
try:
|
||||
import cStringIO as StringIO
|
||||
except ImportError:
|
||||
import StringIO
|
||||
|
||||
|
||||
def _gzip_decode(gzip_data):
|
||||
"""
|
||||
Unzip gzipped data and return them.
|
||||
|
||||
:param gzip_data: Gzipped data.
|
||||
:returns: The actual data.
|
||||
"""
|
||||
try:
|
||||
gzip_data = StringIO.StringIO(gzip_data)
|
||||
gzip_file = gzip.GzipFile(fileobj=gzip_data)
|
||||
data = gzip_file.read()
|
||||
return data
|
||||
finally:
|
||||
gzip_data.close()
|
||||
|
||||
|
||||
def http_response(response):
|
||||
"""
|
||||
Return the response of a HTTP request.
|
||||
|
||||
If the response has been gzipped, gunzip it first.
|
||||
|
||||
:param response: The raw response of a HTTP request.
|
||||
:returns: A response suitable to be used by clients.
|
||||
"""
|
||||
metadata = response.info()
|
||||
data = response.read()
|
||||
response.close()
|
||||
if metadata.get('content-encoding') == 'gzip':
|
||||
return _gzip_decode(data)
|
||||
else:
|
||||
return data
|
37
source/tools/i18n/txclib/log.py
Normal file
37
source/tools/i18n/txclib/log.py
Normal file
@ -0,0 +1,37 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Add logging capabilities to tx-client.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import logging
|
||||
|
||||
_logger = logging.getLogger('txclib')
|
||||
_logger.setLevel(logging.INFO)
|
||||
|
||||
_formatter = logging.Formatter('%(message)s')
|
||||
|
||||
_error_handler = logging.StreamHandler(sys.stderr)
|
||||
_error_handler.setLevel(logging.ERROR)
|
||||
_error_handler.setFormatter(_formatter)
|
||||
_logger.addHandler(_error_handler)
|
||||
|
||||
_msg_handler = logging.StreamHandler(sys.stdout)
|
||||
_msg_handler.setLevel(logging.DEBUG)
|
||||
_msg_handler.setFormatter(_formatter)
|
||||
_msg_filter = logging.Filter()
|
||||
_msg_filter.filter = lambda r: r.levelno < logging.ERROR
|
||||
_msg_handler.addFilter(_msg_filter)
|
||||
_logger.addHandler(_msg_handler)
|
||||
|
||||
logger = _logger
|
||||
|
||||
|
||||
def set_log_level(level):
|
||||
"""Set the level for the logger.
|
||||
|
||||
Args:
|
||||
level: A string among DEBUG, INFO, WARNING, ERROR, CRITICAL.
|
||||
"""
|
||||
logger.setLevel(getattr(logging, level))
|
0
source/tools/i18n/txclib/packages/__init__.py
Normal file
0
source/tools/i18n/txclib/packages/__init__.py
Normal file
@ -0,0 +1,73 @@
|
||||
"""The match_hostname() function from Python 3.2, essential when using SSL."""
|
||||
|
||||
# See https://bitbucket.org/brandon/backports.ssl_match_hostname
|
||||
|
||||
import re
|
||||
|
||||
__version__ = '3.2.3' # Transifex-fixed
|
||||
|
||||
|
||||
class CertificateError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def _dnsname_to_pat(dn, max_wildcards=2):
|
||||
# See also http://bugs.python.org/issue17980
|
||||
pats = []
|
||||
for frag in dn.split(r'.'):
|
||||
if frag.count('*') > max_wildcards:
|
||||
raise CertificateError(
|
||||
"too many wildcards in certificate name: " + repr(dn)
|
||||
)
|
||||
if frag == '*':
|
||||
# When '*' is a fragment by itself, it matches a non-empty dotless
|
||||
# fragment.
|
||||
pats.append('[^.]+')
|
||||
else:
|
||||
if frag.count('*') > 2:
|
||||
raise CertificateError('Invalid hostname in the certificate')
|
||||
# Otherwise, '*' matches any dotless fragment.
|
||||
frag = re.escape(frag)
|
||||
pats.append(frag.replace(r'\*', '[^.]*'))
|
||||
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
|
||||
|
||||
|
||||
def match_hostname(cert, hostname):
|
||||
"""Verify that *cert* (in decoded format as returned by
|
||||
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
|
||||
are mostly followed, but IP addresses are not accepted for *hostname*.
|
||||
|
||||
CertificateError is raised on failure. On success, the function
|
||||
returns nothing.
|
||||
"""
|
||||
if not cert:
|
||||
raise ValueError("empty or no certificate")
|
||||
dnsnames = []
|
||||
san = cert.get('subjectAltName', ())
|
||||
for key, value in san:
|
||||
if key == 'DNS':
|
||||
if _dnsname_to_pat(value).match(hostname):
|
||||
return
|
||||
dnsnames.append(value)
|
||||
if not dnsnames:
|
||||
# The subject is only checked when there is no dNSName entry
|
||||
# in subjectAltName
|
||||
for sub in cert.get('subject', ()):
|
||||
for key, value in sub:
|
||||
# XXX according to RFC 2818, the most specific Common Name
|
||||
# must be used.
|
||||
if key == 'commonName':
|
||||
if _dnsname_to_pat(value).match(hostname):
|
||||
return
|
||||
dnsnames.append(value)
|
||||
if len(dnsnames) > 1:
|
||||
raise CertificateError("hostname %r "
|
||||
"doesn't match either of %s"
|
||||
% (hostname, ', '.join(map(repr, dnsnames))))
|
||||
elif len(dnsnames) == 1:
|
||||
raise CertificateError("hostname %r "
|
||||
"doesn't match %r"
|
||||
% (hostname, dnsnames[0]))
|
||||
else:
|
||||
raise CertificateError("no appropriate commonName or "
|
||||
"subjectAltName fields were found")
|
241
source/tools/i18n/txclib/parsers.py
Normal file
241
source/tools/i18n/txclib/parsers.py
Normal file
@ -0,0 +1,241 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from optparse import OptionParser, OptionGroup
|
||||
|
||||
|
||||
class EpilogParser(OptionParser):
|
||||
def format_epilog(self, formatter):
|
||||
return self.epilog
|
||||
|
||||
|
||||
def delete_parser():
|
||||
"""Return the command-line parser for the delete command."""
|
||||
usage = "usage: %prog [tx_options] delete OPTION [OPTIONS]"
|
||||
description = (
|
||||
"This command deletes translations for a resource in the remote server."
|
||||
)
|
||||
epilog = (
|
||||
"\nExamples:\n"
|
||||
" To delete a translation:\n "
|
||||
"$ tx delete -r project.resource -l <lang_code>\n\n"
|
||||
" To delete a resource:\n $ tx delete -r project.resource\n"
|
||||
)
|
||||
parser = EpilogParser(usage=usage, description=description, epilog=epilog)
|
||||
parser.add_option(
|
||||
"-r", "--resource", action="store", dest="resources", default=None,
|
||||
help="Specify the resource you want to delete (defaults to all)"
|
||||
)
|
||||
parser.add_option(
|
||||
"-l","--language", action="store", dest="languages",
|
||||
default=None, help="Specify the translation you want to delete"
|
||||
)
|
||||
parser.add_option(
|
||||
"--skip", action="store_true", dest="skip_errors", default=False,
|
||||
help="Don't stop on errors."
|
||||
)
|
||||
parser.add_option(
|
||||
"-f","--force", action="store_true", dest="force_delete",
|
||||
default=False, help="Delete an entity forcefully."
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def help_parser():
|
||||
"""Return the command-line parser for the help command."""
|
||||
usage="usage: %prog help command"
|
||||
description="Lists all available commands in the transifex command"\
|
||||
" client. If a command is specified, the help page of the specific"\
|
||||
" command is displayed instead."
|
||||
|
||||
parser = OptionParser(usage=usage, description=description)
|
||||
return parser
|
||||
|
||||
|
||||
def init_parser():
|
||||
"""Return the command-line parser for the init command."""
|
||||
usage="usage: %prog [tx_options] init <path>"
|
||||
description="This command initializes a new project for use with"\
|
||||
" transifex. It is recommended to execute this command in the"\
|
||||
" top level directory of your project so that you can include"\
|
||||
" all files under it in transifex. If no path is provided, the"\
|
||||
" current working dir will be used."
|
||||
parser = OptionParser(usage=usage, description=description)
|
||||
parser.add_option("--host", action="store", dest="host",
|
||||
default=None, help="Specify a default Transifex host.")
|
||||
parser.add_option("--user", action="store", dest="user",
|
||||
default=None, help="Specify username for Transifex server.")
|
||||
parser.add_option("--pass", action="store", dest="password",
|
||||
default=None, help="Specify password for Transifex server.")
|
||||
return parser
|
||||
|
||||
|
||||
def pull_parser():
|
||||
"""Return the command-line parser for the pull command."""
|
||||
usage="usage: %prog [tx_options] pull [options]"
|
||||
description="This command pulls all outstanding changes from the remote"\
|
||||
" Transifex server to the local repository. By default, only the"\
|
||||
" files that are watched by Transifex will be updated but if you"\
|
||||
" want to fetch the translations for new languages as well, use the"\
|
||||
" -a|--all option. (Note: new translations are saved in the .tx folder"\
|
||||
" and require the user to manually rename them and add then in "\
|
||||
" transifex using the set_translation command)."
|
||||
parser = OptionParser(usage=usage,description=description)
|
||||
parser.add_option("-l","--language", action="store", dest="languages",
|
||||
default=[], help="Specify which translations you want to pull"
|
||||
" (defaults to all)")
|
||||
parser.add_option("-r","--resource", action="store", dest="resources",
|
||||
default=[], help="Specify the resource for which you want to pull"
|
||||
" the translations (defaults to all)")
|
||||
parser.add_option("-a","--all", action="store_true", dest="fetchall",
|
||||
default=False, help="Fetch all translation files from server (even new"
|
||||
" ones)")
|
||||
parser.add_option("-s","--source", action="store_true", dest="fetchsource",
|
||||
default=False, help="Force the fetching of the source file (default:"
|
||||
" False)")
|
||||
parser.add_option("-f","--force", action="store_true", dest="force",
|
||||
default=False, help="Force download of translations files.")
|
||||
parser.add_option("--skip", action="store_true", dest="skip_errors",
|
||||
default=False, help="Don't stop on errors. Useful when pushing many"
|
||||
" files concurrently.")
|
||||
parser.add_option("--disable-overwrite", action="store_false",
|
||||
dest="overwrite", default=True,
|
||||
help="By default transifex will fetch new translations files and"\
|
||||
" replace existing ones. Use this flag if you want to disable"\
|
||||
" this feature")
|
||||
parser.add_option("--minimum-perc", action="store", type="int",
|
||||
dest="minimum_perc", default=0,
|
||||
help="Specify the minimum acceptable percentage of a translation "
|
||||
"in order to download it.")
|
||||
parser.add_option(
|
||||
"--mode", action="store", dest="mode", help=(
|
||||
"Specify the mode of the translation file to pull (e.g. "
|
||||
"'reviewed'). See http://bit.ly/txcmod1 for available values."
|
||||
)
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def push_parser():
|
||||
"""Return the command-line parser for the push command."""
|
||||
usage="usage: %prog [tx_options] push [options]"
|
||||
description="This command pushes all local files that have been added to"\
|
||||
" Transifex to the remote server. All new translations are merged"\
|
||||
" with existing ones and if a language doesn't exists then it gets"\
|
||||
" created. If you want to push the source file as well (either"\
|
||||
" because this is your first time running the client or because"\
|
||||
" you just have updated with new entries), use the -f|--force option."\
|
||||
" By default, this command will push all files which are watched by"\
|
||||
" Transifex but you can filter this per resource or/and language."
|
||||
parser = OptionParser(usage=usage, description=description)
|
||||
parser.add_option("-l","--language", action="store", dest="languages",
|
||||
default=None, help="Specify which translations you want to push"
|
||||
" (defaults to all)")
|
||||
parser.add_option("-r","--resource", action="store", dest="resources",
|
||||
default=None, help="Specify the resource for which you want to push"
|
||||
" the translations (defaults to all)")
|
||||
parser.add_option("-f","--force", action="store_true", dest="force_creation",
|
||||
default=False, help="Push source files without checking modification"
|
||||
" times.")
|
||||
parser.add_option("--skip", action="store_true", dest="skip_errors",
|
||||
default=False, help="Don't stop on errors. Useful when pushing many"
|
||||
" files concurrently.")
|
||||
parser.add_option("-s", "--source", action="store_true", dest="push_source",
|
||||
default=False, help="Push the source file to the server.")
|
||||
|
||||
parser.add_option("-t", "--translations", action="store_true", dest="push_translations",
|
||||
default=False, help="Push the translation files to the server")
|
||||
parser.add_option("--no-interactive", action="store_true", dest="no_interactive",
|
||||
default=False, help="Don't require user input when forcing a push.")
|
||||
return parser
|
||||
|
||||
|
||||
def set_parser():
|
||||
"""Return the command-line parser for the set command."""
|
||||
usage="usage: %prog [tx_options] set [options] [args]"
|
||||
description="This command can be used to create a mapping between files"\
|
||||
" and projects either using local files or using files from a remote"\
|
||||
" Transifex server."
|
||||
epilog="\nExamples:\n"\
|
||||
" To set the source file:\n $ tx set -r project.resource --source -l en <file>\n\n"\
|
||||
" To set a single translation file:\n $ tx set -r project.resource -l de <file>\n\n"\
|
||||
" To automatically detect and assign the source files and translations:\n"\
|
||||
" $ tx set --auto-local -r project.resource 'expr' --source-lang en\n\n"\
|
||||
" To set a specific file as a source and auto detect translations:\n"\
|
||||
" $ tx set --auto-local -r project.resource 'expr' --source-lang en"\
|
||||
" --source-file <file>\n\n"\
|
||||
" To set a remote release/resource/project:\n"\
|
||||
" $ tx set --auto-remote <transifex-url>\n"
|
||||
parser = EpilogParser(usage=usage, description=description, epilog=epilog)
|
||||
parser.add_option("--auto-local", action="store_true", dest="local",
|
||||
default=False, help="Used when auto configuring local project.")
|
||||
parser.add_option("--auto-remote", action="store_true", dest="remote",
|
||||
default=False, help="Used when adding remote files from Transifex"
|
||||
" server.")
|
||||
parser.add_option("-r","--resource", action="store", dest="resource",
|
||||
default=None, help="Specify the slug of the resource that you're"
|
||||
" setting up (This must be in the following format:"
|
||||
" `project_slug.resource_slug`).")
|
||||
parser.add_option(
|
||||
"--source", action="store_true", dest="is_source", default=False,
|
||||
help=(
|
||||
"Specify that the given file is a source file "
|
||||
"[doesn't work with the --auto-* commands]."
|
||||
)
|
||||
)
|
||||
parser.add_option("-l","--language", action="store", dest="language",
|
||||
default=None, help="Specify which translations you want to pull"
|
||||
" [doesn't work with the --auto-* commands].")
|
||||
parser.add_option("-t", "--type", action="store", dest="i18n_type",
|
||||
help=(
|
||||
"Specify the i18n type of the resource(s). This is only needed, if "
|
||||
"the resource(s) does not exist yet in Transifex. For a list of "
|
||||
"available i18n types, see "
|
||||
"http://help.transifex.com/features/formats.html"
|
||||
)
|
||||
)
|
||||
parser.add_option("--minimum-perc", action="store", dest="minimum_perc",
|
||||
help=(
|
||||
"Specify the minimum acceptable percentage of a translation "
|
||||
"in order to download it."
|
||||
)
|
||||
)
|
||||
parser.add_option(
|
||||
"--mode", action="store", dest="mode", help=(
|
||||
"Specify the mode of the translation file to pull (e.g. "
|
||||
"'reviewed'). See http://help.transifex.com/features/client/"
|
||||
"index.html#defining-the-mode-of-the-translated-file for the"
|
||||
"available values."
|
||||
)
|
||||
)
|
||||
group = OptionGroup(parser, "Extended options", "These options can only be"
|
||||
" used with the --auto-local command.")
|
||||
group.add_option("-s","--source-language", action="store",
|
||||
dest="source_language",
|
||||
default=None, help="Specify the source language of a resource"
|
||||
" [requires --auto-local].")
|
||||
group.add_option("-f","--source-file", action="store", dest="source_file",
|
||||
default=None, help="Specify the source file of a resource [requires"
|
||||
" --auto-local].")
|
||||
group.add_option("--execute", action="store_true", dest="execute",
|
||||
default=False, help="Execute commands [requires --auto-local].")
|
||||
parser.add_option_group(group)
|
||||
return parser
|
||||
|
||||
|
||||
def status_parser():
|
||||
"""Return the command-line parser for the status command."""
|
||||
usage="usage: %prog [tx_options] status [options]"
|
||||
description="Prints the status of the current project by reading the"\
|
||||
" data in the configuration file."
|
||||
parser = OptionParser(usage=usage,description=description)
|
||||
parser.add_option("-r","--resource", action="store", dest="resources",
|
||||
default=[], help="Specify resources")
|
||||
return parser
|
||||
|
||||
|
||||
def parse_csv_option(option):
|
||||
"""Return a list out of the comma-separated option or an empty list."""
|
||||
if option:
|
||||
return option.split(',')
|
||||
else:
|
||||
return []
|
36
source/tools/i18n/txclib/paths.py
Normal file
36
source/tools/i18n/txclib/paths.py
Normal file
@ -0,0 +1,36 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Path handling.
|
||||
|
||||
We need to take into account the differences between UNIX systems and
|
||||
Windows.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
|
||||
posix_sep = os.sep if os.altsep is None else os.altsep
|
||||
|
||||
|
||||
def posix_path(fpath):
|
||||
"""Convert a filesystem path to a posix path.
|
||||
|
||||
Always use the forward slash as a separator. For instance,
|
||||
in windows the separator is the backslash.
|
||||
|
||||
Args:
|
||||
fpath: The path to convert.
|
||||
"""
|
||||
return fpath if os.altsep is None else fpath.replace(os.sep, os.altsep)
|
||||
|
||||
|
||||
def native_path(fpath):
|
||||
"""Convert a filesystem path to a native path.
|
||||
|
||||
Use whatever separator is defined by the platform.
|
||||
|
||||
Args:
|
||||
fpath: The path to convert.
|
||||
"""
|
||||
return fpath if os.altsep is None else fpath.replace(os.altsep, os.sep)
|
54
source/tools/i18n/txclib/processors.py
Normal file
54
source/tools/i18n/txclib/processors.py
Normal file
@ -0,0 +1,54 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Module for API-related calls.
|
||||
"""
|
||||
|
||||
import urlparse
|
||||
|
||||
|
||||
def hostname_tld_migration(hostname):
|
||||
"""
|
||||
Migrate transifex.net to transifex.com.
|
||||
|
||||
:param hostname: The hostname to migrate (if needed).
|
||||
:returns: A hostname with the transifex.com domain (if needed).
|
||||
"""
|
||||
parts = urlparse.urlparse(hostname)
|
||||
if parts.hostname.endswith('transifex.net'):
|
||||
hostname = hostname.replace('transifex.net', 'transifex.com', 1)
|
||||
return hostname
|
||||
|
||||
|
||||
def hostname_ssl_migration(hostname):
|
||||
"""
|
||||
Migrate Transifex hostnames to use HTTPS.
|
||||
|
||||
:param hostname: The hostname to migrate (if needed).
|
||||
:returns: A https hostname (if needed).
|
||||
"""
|
||||
parts = urlparse.urlparse(hostname)
|
||||
is_transifex = (
|
||||
parts.hostname[-14:-3] == '.transifex.' or
|
||||
parts.hostname == 'transifex.net' or
|
||||
parts.hostname == 'transifex.com'
|
||||
)
|
||||
is_https = parts.scheme == 'https'
|
||||
if is_transifex and not is_https:
|
||||
if not parts.scheme:
|
||||
hostname = 'https:' + hostname
|
||||
else:
|
||||
hostname = hostname.replace(parts.scheme, 'https', 1)
|
||||
return hostname
|
||||
|
||||
|
||||
def visit_hostname(hostname):
|
||||
"""
|
||||
Have a chance to visit a hostname before actually using it.
|
||||
|
||||
:param hostname: The original hostname.
|
||||
:returns: The hostname with the necessary changes.
|
||||
"""
|
||||
for processor in [hostname_ssl_migration, hostname_tld_migration, ]:
|
||||
hostname = processor(hostname)
|
||||
return hostname
|
1264
source/tools/i18n/txclib/project.py
Normal file
1264
source/tools/i18n/txclib/project.py
Normal file
File diff suppressed because it is too large
Load Diff
19
source/tools/i18n/txclib/urls.py
Normal file
19
source/tools/i18n/txclib/urls.py
Normal file
@ -0,0 +1,19 @@
|
||||
# These are the Transifex API urls
|
||||
|
||||
API_URLS = {
|
||||
'get_resources': '%(hostname)s/api/2/project/%(project)s/resources/',
|
||||
'project_details': '%(hostname)s/api/2/project/%(project)s/?details',
|
||||
'resource_details': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/',
|
||||
'release_details': '%(hostname)s/api/2/project/%(project)s/release/%(release)s/',
|
||||
'pull_file': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/translation/%(language)s/?file',
|
||||
'pull_reviewed_file': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/translation/%(language)s/?file&mode=reviewed',
|
||||
'pull_translator_file': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/translation/%(language)s/?file&mode=translator',
|
||||
'pull_developer_file': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/translation/%(language)s/?file&mode=default',
|
||||
'resource_stats': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/stats/',
|
||||
'create_resource': '%(hostname)s/api/2/project/%(project)s/resources/',
|
||||
'push_source': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/content/',
|
||||
'push_translation': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/translation/%(language)s/',
|
||||
'delete_translation': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/translation/%(language)s/',
|
||||
'formats': '%(hostname)s/api/2/formats/',
|
||||
'delete_resource': '%(hostname)s/api/2/project/%(project)s/resource/%(resource)s/',
|
||||
}
|
218
source/tools/i18n/txclib/utils.py
Normal file
218
source/tools/i18n/txclib/utils.py
Normal file
@ -0,0 +1,218 @@
|
||||
import os, sys, re, errno
|
||||
try:
|
||||
from json import loads as parse_json, dumps as compile_json
|
||||
except ImportError:
|
||||
from simplejson import loads as parse_json, dumps as compile_json
|
||||
import urllib2 # This should go and instead use do_url_request everywhere
|
||||
|
||||
from txclib.urls import API_URLS
|
||||
from txclib.log import logger
|
||||
from txclib.exceptions import UnknownCommandError
|
||||
from txclib.paths import posix_path, native_path, posix_sep
|
||||
from txclib.web import verify_ssl
|
||||
|
||||
|
||||
def find_dot_tx(path = os.path.curdir, previous = None):
|
||||
"""Return the path where .tx folder is found.
|
||||
|
||||
The 'path' should be a DIRECTORY.
|
||||
This process is functioning recursively from the current directory to each
|
||||
one of the ancestors dirs.
|
||||
"""
|
||||
path = os.path.abspath(path)
|
||||
if path == previous:
|
||||
return None
|
||||
joined = os.path.join(path, ".tx")
|
||||
if os.path.isdir(joined):
|
||||
return path
|
||||
else:
|
||||
return find_dot_tx(os.path.dirname(path), path)
|
||||
|
||||
|
||||
#################################################
|
||||
# Parse file filter expressions and create regex
|
||||
|
||||
def regex_from_filefilter(file_filter, root_path = os.path.curdir):
|
||||
"""Create proper regex from <lang> expression."""
|
||||
# Force expr to be a valid regex expr (escaped) but keep <lang> intact
|
||||
expr_re = re.escape(
|
||||
posix_path(os.path.join(root_path, native_path(file_filter)))
|
||||
)
|
||||
expr_re = expr_re.replace("\\<lang\\>", '<lang>').replace(
|
||||
'<lang>', '([^%(sep)s]+)' % { 'sep': re.escape(posix_sep)})
|
||||
|
||||
return "^%s$" % expr_re
|
||||
|
||||
|
||||
TX_URLS = {
|
||||
'resource': '(?P<hostname>https?://(\w|\.|:|-)+)/projects/p/(?P<project>(\w|-)+)/resource/(?P<resource>(\w|-)+)/?$',
|
||||
'release': '(?P<hostname>https?://(\w|\.|:|-)+)/projects/p/(?P<project>(\w|-)+)/r/(?P<release>(\w|-)+)/?$',
|
||||
'project': '(?P<hostname>https?://(\w|\.|:|-)+)/projects/p/(?P<project>(\w|-)+)/?$',
|
||||
}
|
||||
|
||||
|
||||
def parse_tx_url(url):
|
||||
"""
|
||||
Try to match given url to any of the valid url patterns specified in
|
||||
TX_URLS. If not match is found, we raise exception
|
||||
"""
|
||||
for type_ in TX_URLS.keys():
|
||||
pattern = TX_URLS[type_]
|
||||
m = re.match(pattern, url)
|
||||
if m:
|
||||
return type_, m.groupdict()
|
||||
raise Exception(
|
||||
"tx: Malformed url given. Please refer to our docs: http://bit.ly/txautor"
|
||||
)
|
||||
|
||||
|
||||
def get_details(api_call, username, password, *args, **kwargs):
|
||||
"""
|
||||
Get the tx project info through the API.
|
||||
|
||||
This function can also be used to check the existence of a project.
|
||||
"""
|
||||
import base64
|
||||
url = (API_URLS[api_call] % (kwargs)).encode('UTF-8')
|
||||
verify_ssl(url)
|
||||
|
||||
req = urllib2.Request(url=url)
|
||||
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
|
||||
authheader = "Basic %s" % base64string
|
||||
req.add_header("Authorization", authheader)
|
||||
|
||||
try:
|
||||
fh = urllib2.urlopen(req)
|
||||
raw = fh.read()
|
||||
fh.close()
|
||||
remote_project = parse_json(raw)
|
||||
except urllib2.HTTPError, e:
|
||||
if e.code in [401, 403, 404]:
|
||||
raise e
|
||||
else:
|
||||
# For other requests, we should print the message as well
|
||||
raise Exception("Remote server replied: %s" % e.read())
|
||||
except urllib2.URLError, e:
|
||||
error = e.args[0]
|
||||
raise Exception("Remote server replied: %s" % error[1])
|
||||
|
||||
return remote_project
|
||||
|
||||
|
||||
def valid_slug(slug):
|
||||
"""
|
||||
Check if a slug contains only valid characters.
|
||||
|
||||
Valid chars include [-_\w]
|
||||
"""
|
||||
try:
|
||||
a, b = slug.split('.')
|
||||
except ValueError:
|
||||
return False
|
||||
else:
|
||||
if re.match("^[A-Za-z0-9_-]*$", a) and re.match("^[A-Za-z0-9_-]*$", b):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def discover_commands():
|
||||
"""
|
||||
Inspect commands.py and find all available commands
|
||||
"""
|
||||
import inspect
|
||||
from txclib import commands
|
||||
|
||||
command_table = {}
|
||||
fns = inspect.getmembers(commands, inspect.isfunction)
|
||||
|
||||
for name, fn in fns:
|
||||
if name.startswith("cmd_"):
|
||||
command_table.update({
|
||||
name.split("cmd_")[1]:fn
|
||||
})
|
||||
|
||||
return command_table
|
||||
|
||||
|
||||
def exec_command(command, *args, **kwargs):
|
||||
"""
|
||||
Execute given command
|
||||
"""
|
||||
commands = discover_commands()
|
||||
try:
|
||||
cmd_fn = commands[command]
|
||||
except KeyError:
|
||||
raise UnknownCommandError
|
||||
cmd_fn(*args,**kwargs)
|
||||
|
||||
|
||||
def mkdir_p(path):
|
||||
try:
|
||||
if path:
|
||||
os.makedirs(path)
|
||||
except OSError, exc: # Python >2.5
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def confirm(prompt='Continue?', default=True):
|
||||
"""
|
||||
Prompt the user for a Yes/No answer.
|
||||
|
||||
Args:
|
||||
prompt: The text displayed to the user ([Y/n] will be appended)
|
||||
default: If the default value will be yes or no
|
||||
"""
|
||||
valid_yes = ['Y', 'y', 'Yes', 'yes', ]
|
||||
valid_no = ['N', 'n', 'No', 'no', ]
|
||||
if default:
|
||||
prompt = prompt + '[Y/n]'
|
||||
valid_yes.append('')
|
||||
else:
|
||||
prompt = prompt + '[y/N]'
|
||||
valid_no.append('')
|
||||
|
||||
ans = raw_input(prompt)
|
||||
while (ans not in valid_yes and ans not in valid_no):
|
||||
ans = raw_input(prompt)
|
||||
|
||||
return ans in valid_yes
|
||||
|
||||
|
||||
# Stuff for command line colored output
|
||||
|
||||
COLORS = [
|
||||
'BLACK', 'RED', 'GREEN', 'YELLOW',
|
||||
'BLUE', 'MAGENTA', 'CYAN', 'WHITE'
|
||||
]
|
||||
|
||||
DISABLE_COLORS = False
|
||||
|
||||
|
||||
def color_text(text, color_name, bold=False):
|
||||
"""
|
||||
This command can be used to colorify command line output. If the shell
|
||||
doesn't support this or the --disable-colors options has been set, it just
|
||||
returns the plain text.
|
||||
|
||||
Usage:
|
||||
print "%s" % color_text("This text is red", "RED")
|
||||
"""
|
||||
if color_name in COLORS and not DISABLE_COLORS:
|
||||
return '\033[%s;%sm%s\033[0m' % (
|
||||
int(bold), COLORS.index(color_name) + 30, text)
|
||||
else:
|
||||
return text
|
||||
|
||||
|
||||
def files_in_project(curpath):
|
||||
"""
|
||||
Iterate over the files in the project.
|
||||
|
||||
Return each file under ``curpath`` with its absolute name.
|
||||
"""
|
||||
for root, dirs, files in os.walk(curpath, followlinks=True):
|
||||
for f in files:
|
||||
yield os.path.abspath(os.path.join(root, f))
|
176
source/tools/i18n/txclib/web.py
Normal file
176
source/tools/i18n/txclib/web.py
Normal file
@ -0,0 +1,176 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import urllib2
|
||||
import socket
|
||||
import ssl
|
||||
import urlparse
|
||||
import mimetools
|
||||
import mimetypes
|
||||
import platform
|
||||
#from pkg_resources import resource_filename, resource_string
|
||||
from txclib import get_version
|
||||
from txclib.packages.ssl_match_hostname import match_hostname
|
||||
|
||||
|
||||
# Helper class to enable urllib2 to handle PUT/DELETE requests as well
|
||||
class RequestWithMethod(urllib2.Request):
|
||||
"""Workaround for using DELETE with urllib2"""
|
||||
|
||||
def __init__(self, url, method, data=None, headers={},
|
||||
origin_req_host=None, unverifiable=False):
|
||||
self._method = method
|
||||
urllib2.Request.__init__(self, url, data=data, headers=headers,
|
||||
origin_req_host=None, unverifiable=False)
|
||||
|
||||
def get_method(self):
|
||||
return self._method
|
||||
|
||||
|
||||
import urllib
|
||||
import stat
|
||||
from cStringIO import StringIO
|
||||
|
||||
|
||||
class Callable:
|
||||
def __init__(self, anycallable):
|
||||
self.__call__ = anycallable
|
||||
|
||||
# Controls how sequences are uncoded. If true, elements may be given multiple
|
||||
# values by assigning a sequence.
|
||||
doseq = 1
|
||||
|
||||
|
||||
class MultipartPostHandler(urllib2.BaseHandler):
|
||||
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
|
||||
|
||||
def http_request(self, request):
|
||||
data = request.get_data()
|
||||
if data is not None and type(data) != str:
|
||||
v_files = []
|
||||
v_vars = []
|
||||
try:
|
||||
for (key, value) in data.items():
|
||||
if type(value) == file:
|
||||
v_files.append((key, value))
|
||||
else:
|
||||
v_vars.append((key, value))
|
||||
except TypeError:
|
||||
systype, value, traceback = sys.exc_info()
|
||||
raise TypeError, "not a valid non-string sequence or mapping object", traceback
|
||||
|
||||
if len(v_files) == 0:
|
||||
data = urllib.urlencode(v_vars, doseq)
|
||||
else:
|
||||
boundary, data = self.multipart_encode(v_vars, v_files)
|
||||
|
||||
contenttype = 'multipart/form-data; boundary=%s' % boundary
|
||||
if(request.has_header('Content-Type')
|
||||
and request.get_header('Content-Type').find('multipart/form-data') != 0):
|
||||
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
|
||||
request.add_unredirected_header('Content-Type', contenttype)
|
||||
|
||||
request.add_data(data)
|
||||
|
||||
return request
|
||||
|
||||
def multipart_encode(vars, files, boundary = None, buf = None):
|
||||
if boundary is None:
|
||||
boundary = mimetools.choose_boundary()
|
||||
if buf is None:
|
||||
buf = StringIO()
|
||||
for(key, value) in vars:
|
||||
buf.write('--%s\r\n' % boundary)
|
||||
buf.write('Content-Disposition: form-data; name="%s"' % key)
|
||||
buf.write('\r\n\r\n' + value + '\r\n')
|
||||
for(key, fd) in files:
|
||||
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
|
||||
filename = fd.name.split(os.path.sep)[-1]
|
||||
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
|
||||
buf.write('--%s\r\n' % boundary)
|
||||
buf.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename))
|
||||
buf.write('Content-Type: %s\r\n' % contenttype)
|
||||
# buffer += 'Content-Length: %s\r\n' % file_size
|
||||
fd.seek(0)
|
||||
buf.write('\r\n' + fd.read() + '\r\n')
|
||||
buf.write('--' + boundary + '--\r\n\r\n')
|
||||
buf = buf.getvalue()
|
||||
return boundary, buf
|
||||
multipart_encode = Callable(multipart_encode)
|
||||
|
||||
https_request = http_request
|
||||
|
||||
|
||||
def user_agent_identifier():
|
||||
"""Return the user agent for the client."""
|
||||
client_info = (get_version(), platform.system(), platform.machine())
|
||||
return "txclient/%s (%s %s)" % client_info
|
||||
|
||||
|
||||
def _verify_ssl(hostname, port=443):
|
||||
"""Verify the SSL certificate of the given host."""
|
||||
sock = socket.create_connection((hostname, port))
|
||||
try:
|
||||
ssl_sock = ssl.wrap_socket(
|
||||
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=certs_file()
|
||||
)
|
||||
match_hostname(ssl_sock.getpeercert(), hostname)
|
||||
finally:
|
||||
sock.close()
|
||||
|
||||
|
||||
def certs_file():
|
||||
if platform.system() == 'Windows':
|
||||
# Workaround py2exe and resource_filename incompatibility.
|
||||
# Store the content in the filesystem permanently.
|
||||
app_dir = os.path.join(
|
||||
os.getenv('appdata', os.path.expanduser('~')), 'transifex-client'
|
||||
)
|
||||
if not os.path.exists(app_dir):
|
||||
os.mkdir(app_dir)
|
||||
ca_file = os.path.join(app_dir, 'cacert.pem')
|
||||
if not os.path.exists(ca_file):
|
||||
content = resource_string(__name__, 'cacert.pem')
|
||||
with open(ca_file, 'w') as f:
|
||||
f.write(content)
|
||||
return ca_file
|
||||
else:
|
||||
POSSIBLE_CA_BUNDLE_PATHS = [
|
||||
# Red Hat, CentOS, Fedora and friends
|
||||
# (provided by the ca-certificates package):
|
||||
'/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem',
|
||||
'/etc/ssl/certs/ca-bundle.crt',
|
||||
'/etc/pki/tls/certs/ca-bundle.crt',
|
||||
# Ubuntu, Debian, and friends
|
||||
# (provided by the ca-certificates package):
|
||||
'/etc/ssl/certs/ca-certificates.crt',
|
||||
# FreeBSD (provided by the ca_root_nss package):
|
||||
'/usr/local/share/certs/ca-root-nss.crt',
|
||||
# openSUSE (provided by the ca-certificates package),
|
||||
# the 'certs' directory is the
|
||||
# preferred way but may not be supported by the SSL module,
|
||||
# thus it has 'ca-bundle.pem'
|
||||
# as a fallback (which is generated from pem files in the
|
||||
# 'certs' directory):
|
||||
'/etc/ssl/ca-bundle.pem',
|
||||
]
|
||||
for path in POSSIBLE_CA_BUNDLE_PATHS:
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
return resource_filename(__name__, 'cacert.pem')
|
||||
|
||||
|
||||
def verify_ssl(host):
|
||||
parts = urlparse.urlparse(host)
|
||||
if parts.scheme != 'https':
|
||||
return
|
||||
|
||||
if ':' in parts.netloc:
|
||||
hostname, port = parts.netloc.split(':')
|
||||
else:
|
||||
hostname = parts.netloc
|
||||
if parts.port is not None:
|
||||
port = parts.port
|
||||
else:
|
||||
port = 443
|
||||
_verify_ssl(hostname, port)
|
112
source/tools/i18n/updateTemplates.py
Normal file
112
source/tools/i18n/updateTemplates.py
Normal file
@ -0,0 +1,112 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding:utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 Wildfire Games.
|
||||
# This file is part of 0 A.D.
|
||||
#
|
||||
# 0 A.D. is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# 0 A.D. is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import codecs, json, os, textwrap
|
||||
|
||||
from potter.catalog import Catalog, Message
|
||||
from potter.extract import getExtractorInstance
|
||||
from potter.pofile import write_po
|
||||
|
||||
|
||||
l10nToolsDirectory = os.path.dirname(os.path.realpath(__file__))
|
||||
projectRootDirectory = os.path.abspath(os.path.join(l10nToolsDirectory, os.pardir, os.pardir, os.pardir))
|
||||
l10nFolderName = "l10n"
|
||||
messagesFilename = "messages.json"
|
||||
|
||||
|
||||
def warnAboutUntouchedMods():
|
||||
"""
|
||||
Warn about mods that are not properly configured to get their messages extracted.
|
||||
"""
|
||||
modsRootFolder = os.path.join(projectRootDirectory, "binaries", "data", "mods")
|
||||
untouchedMods = {}
|
||||
for modFolder in os.listdir(modsRootFolder):
|
||||
if modFolder[0] != "_":
|
||||
if not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName)):
|
||||
untouchedMods[modFolder] = "There is no '{folderName}' folder in the root folder of this mod.".format(folderName=l10nFolderName)
|
||||
elif not os.path.exists(os.path.join(modsRootFolder, modFolder, l10nFolderName, messagesFilename)):
|
||||
untouchedMods[modFolder] = "There is no '{filename}' file within the '{folderName}' folder in the root folder of this mod.".format(folderName=l10nFolderName, filename=messagesFilename)
|
||||
if untouchedMods:
|
||||
print(textwrap.dedent("""
|
||||
Warning: No messages were extracted from the following mods:
|
||||
"""))
|
||||
for mod in untouchedMods:
|
||||
print("• {modName}: {warningMessage}".format(modName=mod, warningMessage=untouchedMods[mod]))
|
||||
print(textwrap.dedent("""
|
||||
For this script to extract messages from a mod folder, this mod folder must contain a '{folderName}'
|
||||
folder, and this folder must contain a '{filename}' file that describes how to extract messages for the
|
||||
mod. See the folder of the main mod ('public') for an example, and see the documentation for more
|
||||
information.
|
||||
""".format(folderName=l10nFolderName, filename=messagesFilename)
|
||||
))
|
||||
|
||||
|
||||
def generateTemplatesForMessagesFile(messagesFilePath):
|
||||
|
||||
with open(messagesFilePath, 'r') as fileObject:
|
||||
settings = json.load(fileObject)
|
||||
|
||||
rootPath = os.path.dirname(messagesFilePath)
|
||||
|
||||
for templateSettings in settings:
|
||||
|
||||
if "skip" in templateSettings and templateSettings["skip"] == "yes":
|
||||
continue
|
||||
|
||||
inputRootPath = rootPath
|
||||
if "inputRoot" in templateSettings:
|
||||
inputRootPath = os.path.join(rootPath, templateSettings["inputRoot"])
|
||||
|
||||
template = Catalog()
|
||||
template.project = templateSettings["project"]
|
||||
template.copyright_holder = templateSettings["copyrightHolder"]
|
||||
|
||||
for rule in templateSettings["rules"]:
|
||||
|
||||
if "skip" in rule and rule["skip"] == "yes":
|
||||
continue
|
||||
|
||||
options = rule.get("options", {})
|
||||
extractor = getExtractorInstance(rule["extractor"], inputRootPath, rule["filemasks"], options)
|
||||
for message, context, location, comments in extractor.run():
|
||||
formatFlag = None
|
||||
if "format" in options:
|
||||
formatFlag = options["format"]
|
||||
template.add(message, context=context, locations=[location], auto_comments=comments, formatFlag=formatFlag)
|
||||
|
||||
with codecs.open(os.path.join(rootPath, templateSettings["output"]), 'w', 'utf-8') as fileObject:
|
||||
write_po(fileObject, template)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
for root, folders, filenames in os.walk(projectRootDirectory):
|
||||
for folder in folders:
|
||||
if folder == l10nFolderName:
|
||||
messagesFilePath = os.path.join(root, folder, messagesFilename)
|
||||
if os.path.exists(messagesFilePath):
|
||||
generateTemplatesForMessagesFile(messagesFilePath)
|
||||
|
||||
warnAboutUntouchedMods()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue
Block a user