#! /opt/alt/python311/bin/python3.11
# -*- coding: iso-8859-1 -*-
# Originally written by Barry Warsaw <barry@python.org>
# Minimally patched to make it even more xgettext compatible
# by Peter Funk <pf@artcom-gmbh.de>
# Added checks that _() only contains string literals, and
# command line args are resolved to module lists, i.e. you
# can now pass a filename, a module or package name, or a
# directory (including globbing chars, important for Win32).
# Made docstring fit in 80 chars wide displays using pydoc.
__doc__ = _("""pygettext -- Python equivalent of xgettext(1)
Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
internationalization of C programs. Most of these tools are independent of
the programming language and can be used from within Python programs.
Martin von Loewis' work[1] helps considerably in this regard.
There's one problem though; xgettext is the program that scans source code
looking for message strings, but it groks only C (or C++). Python
introduces a few wrinkles, such as dual quoting characters, triple quoted
strings, and raw strings. xgettext understands none of this.
Enter pygettext, which uses Python's standard tokenize module to scan
Python source code, generating .pot files identical to what GNU xgettext[2]
generates for C and C++ code. From there, the standard GNU tools can be
A word about marking Python strings as candidates for translation. GNU
xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
and gettext_noop. But those can be a lot of text to include all over your
code. C and C++ have a trick: they use the C preprocessor. Most
internationalized C source includes a #define for gettext() to _() so that
what has to be written in the source is much less. Thus these are both
gettext("Translatable String")
Python of course has no preprocessor so this doesn't work so well. Thus,
pygettext searches only for _() by default, but see the -k/--keyword flag
below for how to augment this.
[1] https://www.python.org/workshops/1997-10/proceedings/loewis.html
[2] https://www.gnu.org/software/gettext/gettext.html
NOTE: pygettext attempts to be option and feature compatible with GNU
xgettext where ever possible. However some options are still missing or are
not fully implemented. Also, xgettext's use of command line switches with
option arguments is broken, and in these cases, pygettext just defines
Usage: pygettext [options] inputfile ...
Rename the default output file from messages.pot to name.pot.
Replace non-ASCII characters with octal escape sequences.
Extract module, class, method, and function docstrings. These do
not need to be wrapped in _() markers, and in fact cannot be for
Python to consider them docstrings. (See also the -X option).
Print this help message and exit.
Keywords to look for in addition to the default set, which are:
You can have multiple -k flags on the command line.
Disable the default set of keywords (see above). Any keywords
explicitly added with the -k/--keyword option are still recognized.
Do not write filename/lineno location comments.
Write filename/lineno location comments indicating where each
extracted string is found in the source. These lines appear before
each msgid. The style of comments is controlled by the -S/--style
option. This is the default.
Rename the default output file from messages.pot to filename. If
filename is `-' then the output is sent to standard out.
Output files will be placed in directory dir.
Specify which style to use for location comments. Two styles are
Solaris # File: filename, line: line-number
The style name is case insensitive. GNU style is the default.
Print the names of the files being processed.
Print the version of pygettext and exit.
Set width of output to columns.
Specify a file that contains a list of strings that are not be
extracted from the input files. Each string to be excluded must
appear on a line by itself in the file.
Specify a file that contains a list of files (one per line) that
should not have their docstrings extracted. This is only useful in
conjunction with the -D option above.
If `inputfile' is -, standard input is read.
import importlib.machinery
DEFAULTKEYWORDS = ', '.join(default_keywords)
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
"Project-Id-Version: PACKAGE VERSION\\n"
"POT-Creation-Date: %(time)s\\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: LANGUAGE <LL@li.org>\\n"
"Content-Type: text/plain; charset=%(charset)s\\n"
"Content-Transfer-Encoding: %(encoding)s\\n"
"Generated-By: pygettext.py %(version)s\\n"
print(__doc__ % globals(), file=sys.stderr)
print(msg, file=sys.stderr)
def make_escapes(pass_nonascii):
# Allow non-ascii characters to pass through so that e.g. 'msgid
# escape any character outside the 32..126 range.
escapes = [r"\%03o" % i for i in range(mod)]
escapes[ord('\\')] = r'\\'
escapes[ord('\t')] = r'\t'
escapes[ord('\r')] = r'\r'
escapes[ord('\n')] = r'\n'
escapes[ord('\"')] = r'\"'
def escape_ascii(s, encoding):
return ''.join(escapes[ord(c)] if ord(c) < 128 else c for c in s)
def escape_nonascii(s, encoding):
return ''.join(escapes[b] for b in s.encode(encoding))
def is_literal_string(s):
return s[0] in '\'"' or (s[0] in 'rRuU' and s[1] in '\'"')
return eval(s, {'__builtins__':{}}, {})
def normalize(s, encoding):
# This converts the various Python string types into a format that is
# appropriate for .po files, namely much closer to C style.
s = '"' + escape(s, encoding) + '"'
lines[-1] = lines[-1] + '\n'
for i in range(len(lines)):
lines[i] = escape(lines[i], encoding)
s = '""\n"' + lineterm.join(lines) + '"'
def containsAny(str, set):
"""Check whether 'str' contains ANY of the chars in 'set'"""
return 1 in [c in str for c in set]
def getFilesForName(name):
"""Get a list of module files for a filename, a module or package name,
if not os.path.exists(name):
if containsAny(name, "*?[]"):
list.extend(getFilesForName(file))
# try to find module or package
spec = importlib.util.find_spec(name)
# find all python files in directory
# get extension for python source files
_py_ext = importlib.machinery.SOURCE_SUFFIXES[0]
for root, dirs, files in os.walk(name):
# don't recurse into CVS directories
# add all *.py files to list
[os.path.join(root, file) for file in files
if os.path.splitext(file)[1] == _py_ext]
elif os.path.exists(name):
def __init__(self, options):
self.__state = self.__waiting
self.__enclosurecount = 0
def __call__(self, ttype, tstring, stup, etup, line):
## print('ttype:', token.tok_name[ttype], 'tstring:', tstring,
self.__state(ttype, tstring, stup[0])
def __waiting(self, ttype, tstring, lineno):
# Do docstring extractions, if enabled
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
if ttype == tokenize.STRING and is_literal_string(tstring):
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
if ttype in (tokenize.COMMENT, tokenize.NL, tokenize.ENCODING):
# class or func/method docstring?
if ttype == tokenize.NAME and tstring in ('class', 'def'):
self.__state = self.__suiteseen
if ttype == tokenize.NAME and tstring in opts.keywords:
self.__state = self.__keywordseen
if ttype == tokenize.STRING:
maybe_fstring = ast.parse(tstring, mode='eval').body
if not isinstance(maybe_fstring, ast.JoinedStr):
for value in filter(lambda node: isinstance(node, ast.FormattedValue),
for call in filter(lambda node: isinstance(node, ast.Call),
if isinstance(func, ast.Name):
elif isinstance(func, ast.Attribute):
if func_name not in opts.keywords:
'*** %(file)s:%(lineno)s: Seen unexpected amount of'
' positional arguments in gettext call: %(source_segment)s'
'source_segment': ast.get_source_segment(tstring, call) or tstring,
'*** %(file)s:%(lineno)s: Seen unexpected keyword arguments'
' in gettext call: %(source_segment)s'
'source_segment': ast.get_source_segment(tstring, call) or tstring,
if not isinstance(arg, ast.Constant):
'*** %(file)s:%(lineno)s: Seen unexpected argument type'
' in gettext call: %(source_segment)s'
'source_segment': ast.get_source_segment(tstring, call) or tstring,
if isinstance(arg.value, str):
self.__addentry(arg.value, lineno)
def __suiteseen(self, ttype, tstring, lineno):
# skip over any enclosure pairs until we see the colon
if tstring == ':' and self.__enclosurecount == 0:
# we see a colon and we're not in an enclosure: end of def
self.__state = self.__suitedocstring
self.__enclosurecount += 1
self.__enclosurecount -= 1
def __suitedocstring(self, ttype, tstring, lineno):
# ignore any intervening noise
if ttype == tokenize.STRING and is_literal_string(tstring):
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
# there was no class docstring
self.__state = self.__waiting
def __keywordseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == '(':
self.__state = self.__openseen
self.__state = self.__waiting
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING and is_literal_string(tstring):
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
self.__state = self.__waiting
def __addentry(self, msg, lineno=None, isdocstring=0):
if not msg in self.__options.toexclude:
entry = (self.__curfile, lineno)
self.__messages.setdefault(msg, {})[entry] = isdocstring
def set_filename(self, filename):
self.__curfile = filename
timestamp = time.strftime('%Y-%m-%d %H:%M%z')
encoding = fp.encoding if fp.encoding else 'UTF-8'
print(pot_header % {'time': timestamp, 'version': __version__,
'encoding': '8bit'}, file=fp)
# Sort the entries. First sort each particular entry's keys, then
# sort all the entries by their first item.
for k, v in self.__messages.items():
reverse.setdefault(tuple(keys), []).append((k, v))
rkeys = sorted(reverse.keys())
# If the entry was gleaned out of a docstring, then add a
# comment stating so. This is to aid translators who may wish
# to skip translating some unimportant docstrings.
isdocstring = any(v.values())
# k is the message string, v is a dictionary-set of (filename,
# lineno) tuples. We want to sort the entries in v first by
# file name and then by line number.
if not options.writelocations:
# location comments are different b/w Solaris and GNU:
elif options.locationstyle == options.SOLARIS:
for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno}
'# File: %(filename)s, line: %(lineno)d') % d, file=fp)
elif options.locationstyle == options.GNU:
# fit as many locations on one line, as long as the
# resulting line length doesn't exceed 'options.width'
for filename, lineno in v: