Commit 962ad74f authored by Petr Prikryl's avatar Petr Prikryl

Merge branch 'master' into devel

parents 39bdafef 8cac977d
...@@ -167,7 +167,7 @@ void InputString::setEnabled(bool state) ...@@ -167,7 +167,7 @@ void InputString::setEnabled(bool state)
{ {
m_lab->setEnabled(state); m_lab->setEnabled(state);
if (m_le) m_le->setEnabled(state); if (m_le) m_le->setEnabled(state);
if (m_im) m_le->setEnabled(state); if (m_im) m_im->setEnabled(state);
if (m_br) m_br->setEnabled(state); if (m_br) m_br->setEnabled(state);
if (m_com) m_com->setEnabled(state); if (m_com) m_com->setEnabled(state);
updateDefault(); updateDefault();
......
...@@ -594,9 +594,10 @@ fi ...@@ -594,9 +594,10 @@ fi
# - check for python ---------------------------------------------------------- # - check for python ----------------------------------------------------------
python_version=0
printf " Checking for python... " printf " Checking for python... "
if test "$f_python" = NO; then if test "$f_python" = NO; then
python_names="python2 python" python_names="python3 python2 python"
python_dirs="$bin_dirs /usr/bin /usr/local/bin /bin /sbin" python_dirs="$bin_dirs /usr/bin /usr/local/bin /bin /sbin"
python_prog=NO python_prog=NO
python_found=NO python_found=NO
...@@ -604,9 +605,16 @@ if test "$f_python" = NO; then ...@@ -604,9 +605,16 @@ if test "$f_python" = NO; then
for j in $python_dirs; do for j in $python_dirs; do
if test -x "$j/$i"; then if test -x "$j/$i"; then
python_found=YES python_found=YES
if test `$j/$i -c "import sys; print sys.version_info[0]"` = 2; then if test `$j/$i -c "import sys; print(sys.version_info[0])"` = 3; then
python_prog="$j/$i" python_prog="$j/$i";
python_version=`$j/$i -c "import platform; print(platform.python_version())"`;
break 2 break 2
elif test `$j/$i -c "import sys; print(sys.version_info[0])"` = 2; then
if test `$j/$i -c "import sys; print(sys.version_info[1])"` -ge 6; then
python_prog="$j/$i";
python_version=`$j/$i -c "import platform; print(platform.python_version())"`;
break 2
fi
fi fi
fi fi
done done
...@@ -616,14 +624,14 @@ fi ...@@ -616,14 +624,14 @@ fi
if test "$f_python" = NO; then if test "$f_python" = NO; then
if test "$python_found" = YES; then if test "$python_found" = YES; then
echo "version should be python 2." echo "version should be python 2.6 or higher."
else else
echo "not found!"; echo "not found!";
fi fi
echo echo
exit 2 exit 2
fi fi
echo "using $f_python"; echo "using $f_python (version $python_version)";
# - check for perl ------------------------------------------------------------ # - check for perl ------------------------------------------------------------
......
...@@ -556,6 +556,7 @@ open-source tools: ...@@ -556,6 +556,7 @@ open-source tools:
<li>GNU bison version 2.5 (Linux) and 2.3 (MacOSX) <li>GNU bison version 2.5 (Linux) and 2.3 (MacOSX)
<li>GNU make version 3.81 <li>GNU make version 3.81
<li>Perl version 5.12 <li>Perl version 5.12
<li>Python verion 2.7 and 3.4
<li>TeX Live 2009 (or later) <li>TeX Live 2009 (or later)
</ul> </ul>
......
...@@ -66,11 +66,31 @@ ...@@ -66,11 +66,31 @@
of translators introduced. of translators introduced.
""" """
from __future__ import generators
import codecs
import os import os
import platform
import re import re
import sys import sys
import textwrap
def xopen(fname, mode='r', encoding='utf-8-sig'):
'''Unified open of text files with UTF-8 default encoding.
The 'utf-8-sig' skips the BOM automatically.
'''
# Use UTF-8 without BOM when writing to a text file.
if encoding == 'utf-8-sig' and mode == 'w':
encoding = 'utf-8'
major, minor, patch = (int(e) for e in platform.python_version_tuple())
if major == 2:
if mode == 'w':
mode = 'wU'
import codecs
return codecs.open(fname, mode=mode, encoding=encoding) # Python 2
else:
return open(fname, mode=mode, encoding=encoding) # Python 3
def fill(s): def fill(s):
...@@ -99,58 +119,6 @@ def fill(s): ...@@ -99,58 +119,6 @@ def fill(s):
return '\n'.join(lines) return '\n'.join(lines)
# The following function dedent() is the verbatim copy from the textwrap.py
# module. The textwrap.py was introduced in Python 2.3. To make this script
# working also in older Python versions, I have decided to copy it.
# Notice that the textwrap.py is copyrighted:
#
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
#
# The explicit permission to use the code here was sent by Guido van Rossum
# (4th June, 2004).
#
def dedent(text):
"""dedent(text : string) -> string
Remove any whitespace than can be uniformly removed from the left
of every line in `text`.
This can be used e.g. to make triple-quoted strings line up with
the left edge of screen/whatever, while still presenting it in the
source code in indented form.
For example:
def test():
# end first line with \ to avoid the empty line!
s = '''\
hello
world
'''
print repr(s) # prints ' hello\n world\n '
print repr(dedent(s)) # prints 'hello\n world\n'
"""
lines = text.expandtabs().split('\n')
margin = None
for line in lines:
content = line.lstrip()
if not content:
continue
indent = len(line) - len(content)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if margin is not None and margin > 0:
for i in range(len(lines)):
lines[i] = lines[i][margin:]
return '\n'.join(lines)
class Transl: class Transl:
"""One instance is build for each translator. """One instance is build for each translator.
...@@ -237,7 +205,7 @@ class Transl: ...@@ -237,7 +205,7 @@ class Transl:
# Open the file for reading and extracting tokens until the eof. # Open the file for reading and extracting tokens until the eof.
# Initialize the finite automaton. # Initialize the finite automaton.
f = open(self.fname) f = xopen(self.fname)
lineNo = 0 lineNo = 0
line = '' # init -- see the pos initialization below line = '' # init -- see the pos initialization below
linelen = 0 # init linelen = 0 # init
...@@ -256,8 +224,6 @@ class Transl: ...@@ -256,8 +224,6 @@ class Transl:
else: else:
lineNo += 1 lineNo += 1
line = f.readline() line = f.readline()
if line.startswith('\xef\xbb\xbf'):
line = line[3:] # skip the BOM
linelen = len(line) linelen = len(line)
pos = 0 pos = 0
if line == '': # eof if line == '': # eof
...@@ -276,7 +242,7 @@ class Transl: ...@@ -276,7 +242,7 @@ class Transl:
# If it is an unknown item, it can still be recognized # If it is an unknown item, it can still be recognized
# here. Keywords and separators are the example. # here. Keywords and separators are the example.
if tokenId == 'unknown': if tokenId == 'unknown':
if tokenDic.has_key(tokenStr): if tokenStr in tokenDic:
tokenId = tokenDic[tokenStr] tokenId = tokenDic[tokenStr]
elif tokenStr.isdigit(): elif tokenStr.isdigit():
tokenId = 'num' tokenId = 'num'
...@@ -329,7 +295,7 @@ class Transl: ...@@ -329,7 +295,7 @@ class Transl:
tokenStr = c tokenStr = c
tokenLineNo = lineNo tokenLineNo = lineNo
status = 8 status = 8
elif tokenDic.has_key(c): # known one-char token elif c in tokenDic: # known one-char token
tokenId = tokenDic[c] tokenId = tokenDic[c]
tokenStr = c tokenStr = c
tokenLineNo = lineNo tokenLineNo = lineNo
...@@ -424,7 +390,7 @@ class Transl: ...@@ -424,7 +390,7 @@ class Transl:
if c.isspace(): if c.isspace():
pos += 1 pos += 1
status = 0 # tokenId may be determined later status = 0 # tokenId may be determined later
elif tokenDic.has_key(c): # separator, don't move pos elif c in tokenDic: # separator, don't move pos
status = 0 status = 0
else: else:
tokenStr += c # collect tokenStr += c # collect
...@@ -457,7 +423,7 @@ class Transl: ...@@ -457,7 +423,7 @@ class Transl:
# Always assume that the previous tokens were processed. Get # Always assume that the previous tokens were processed. Get
# the next one. # the next one.
tokenId, tokenStr, tokenLineNo = tokenIterator.next() tokenId, tokenStr, tokenLineNo = next(tokenIterator)
# Process the token and never return back. # Process the token and never return back.
if status == 0: # waiting for the 'class' keyword. if status == 0: # waiting for the 'class' keyword.
...@@ -588,7 +554,7 @@ class Transl: ...@@ -588,7 +554,7 @@ class Transl:
while status != 777: while status != 777:
# Get the next token. # Get the next token.
tokenId, tokenStr, tokenLineNo = tokenIterator.next() tokenId, tokenStr, tokenLineNo = next(tokenIterator)
if status == 0: # waiting for 'public:' if status == 0: # waiting for 'public:'
if tokenId == 'public': if tokenId == 'public':
...@@ -670,7 +636,7 @@ class Transl: ...@@ -670,7 +636,7 @@ class Transl:
elif status == 9: # after semicolon, produce the dic item elif status == 9: # after semicolon, produce the dic item
if tokenId == 'semic': if tokenId == 'semic':
assert(not resultDic.has_key(uniPrototype)) assert(uniPrototype not in resultDic)
resultDic[uniPrototype] = prototype resultDic[uniPrototype] = prototype
status = 2 status = 2
else: else:
...@@ -752,7 +718,7 @@ class Transl: ...@@ -752,7 +718,7 @@ class Transl:
# Eat the rest of the source to cause closing the file. # Eat the rest of the source to cause closing the file.
while tokenId != 'eof': while tokenId != 'eof':
tokenId, tokenStr, tokenLineNo = tokenIterator.next() tokenId, tokenStr, tokenLineNo = next(tokenIterator)
# Return the resulting dictionary with 'uniPrototype -> prototype'. # Return the resulting dictionary with 'uniPrototype -> prototype'.
return resultDic return resultDic
...@@ -800,7 +766,7 @@ class Transl: ...@@ -800,7 +766,7 @@ class Transl:
while status != 777: while status != 777:
# Get the next token. # Get the next token.
tokenId, tokenStr, tokenLineNo = tokenIterator.next() tokenId, tokenStr, tokenLineNo = next(tokenIterator)
if status == 0: # waiting for 'public:' if status == 0: # waiting for 'public:'
if tokenId == 'public': if tokenId == 'public':
...@@ -912,7 +878,7 @@ class Transl: ...@@ -912,7 +878,7 @@ class Transl:
sys.stderr.write(msg) sys.stderr.write(msg)
assert False assert False
assert(not self.prototypeDic.has_key(uniPrototype)) assert(uniPrototype not in self.prototypeDic)
# Insert new dictionary item. # Insert new dictionary item.
self.prototypeDic[uniPrototype] = prototype self.prototypeDic[uniPrototype] = prototype
status = 2 # body consumed status = 2 # body consumed
...@@ -1056,12 +1022,12 @@ class Transl: ...@@ -1056,12 +1022,12 @@ class Transl:
# For the required methods, update the dictionary of methods # For the required methods, update the dictionary of methods
# implemented by the adapter. # implemented by the adapter.
for protoUni in self.prototypeDic: for protoUni in self.prototypeDic:
if reqDic.has_key(protoUni): if protoUni in reqDic:
# This required method will be marked as implemented # This required method will be marked as implemented
# by this adapter class. This implementation assumes # by this adapter class. This implementation assumes
# that newer adapters do not reimplement any required # that newer adapters do not reimplement any required
# methods already implemented by older adapters. # methods already implemented by older adapters.
assert(not adaptDic.has_key(protoUni)) assert(protoUni not in adaptDic)
adaptDic[protoUni] = (version, self.classId) adaptDic[protoUni] = (version, self.classId)
# Clear the dictionary object and the information related # Clear the dictionary object and the information related
...@@ -1094,7 +1060,7 @@ class Transl: ...@@ -1094,7 +1060,7 @@ class Transl:
# Eat the rest of the source to cause closing the file. # Eat the rest of the source to cause closing the file.
while True: while True:
try: try:
t = tokenIterator.next() t = next(tokenIterator)
except StopIteration: except StopIteration:
break break
...@@ -1106,7 +1072,7 @@ class Transl: ...@@ -1106,7 +1072,7 @@ class Transl:
# Build the list of obsolete methods. # Build the list of obsolete methods.
self.obsoleteMethods = [] self.obsoleteMethods = []
for p in myDic: for p in myDic:
if not reqDic.has_key(p): if p not in reqDic:
self.obsoleteMethods.append(p) self.obsoleteMethods.append(p)
# Build the list of missing methods and the list of implemented # Build the list of missing methods and the list of implemented
...@@ -1114,7 +1080,7 @@ class Transl: ...@@ -1114,7 +1080,7 @@ class Transl:
self.missingMethods = [] self.missingMethods = []
self.implementedMethods = [] self.implementedMethods = []
for p in reqDic: for p in reqDic:
if myDic.has_key(p): if p in myDic:
self.implementedMethods.append(p) self.implementedMethods.append(p)
else: else:
self.missingMethods.append(p) self.missingMethods.append(p)
...@@ -1133,7 +1099,7 @@ class Transl: ...@@ -1133,7 +1099,7 @@ class Transl:
adaptMinVersion = '9.9.99' adaptMinVersion = '9.9.99'
adaptMinClass = 'TranslatorAdapter_9_9_99' adaptMinClass = 'TranslatorAdapter_9_9_99'
for uniProto in self.missingMethods: for uniProto in self.missingMethods:
if adaptDic.has_key(uniProto): if uniProto in adaptDic:
version, cls = adaptDic[uniProto] version, cls = adaptDic[uniProto]
if version < adaptMinVersion: if version < adaptMinVersion:
adaptMinVersion = version adaptMinVersion = version
...@@ -1342,9 +1308,9 @@ class TrManager: ...@@ -1342,9 +1308,9 @@ class TrManager:
sys.exit(1) sys.exit(1)
else: else:
lst = os.listdir(self.src_path) lst = os.listdir(self.src_path)
lst = filter(lambda x: x[:11] == 'translator_' lst = [x for x in lst if x[:11] == 'translator_'
and x[-2:] == '.h' and x[-2:] == '.h'
and x != 'translator_adapter.h', lst) and x != 'translator_adapter.h']
# Build the object for the translator_xx.h files, and process the # Build the object for the translator_xx.h files, and process the
# content of the file. Then insert the object to the dictionary # content of the file. Then insert the object to the dictionary
...@@ -1366,7 +1332,7 @@ class TrManager: ...@@ -1366,7 +1332,7 @@ class TrManager:
# Build the auxiliary list with strings compound of the status, # Build the auxiliary list with strings compound of the status,
# readable form of the language, and classId. # readable form of the language, and classId.
statLst = [] statLst = []
for obj in self.__translDic.values(): for obj in list(self.__translDic.values()):
assert(obj.classId != 'Translator') assert(obj.classId != 'Translator')
s = obj.status + '|' + obj.langReadable + '|' + obj.classId s = obj.status + '|' + obj.langReadable + '|' + obj.classId
statLst.append(s) statLst.append(s)
...@@ -1384,9 +1350,10 @@ class TrManager: ...@@ -1384,9 +1350,10 @@ class TrManager:
# Build the list of tuples that contain (langReadable, obj). # Build the list of tuples that contain (langReadable, obj).
# Sort it by readable name. # Sort it by readable name.
self.langLst = [] self.langLst = []
for obj in self.__translDic.values(): for obj in list(self.__translDic.values()):
self.langLst.append((obj.langReadable, obj)) self.langLst.append((obj.langReadable, obj))
self.langLst.sort(lambda a, b: cmp(a[0], b[0]))
self.langLst.sort(key=lambda x: x[0])
# Create the list with readable language names. If the language has # Create the list with readable language names. If the language has
# also the English-based version, modify the item by appending # also the English-based version, modify the item by appending
...@@ -1400,7 +1367,7 @@ class TrManager: ...@@ -1400,7 +1367,7 @@ class TrManager:
# of the English-based object. If the object exists, modify the # of the English-based object. If the object exists, modify the
# name for the readable list of supported languages. # name for the readable list of supported languages.
classIdEn = obj.classId + 'En' classIdEn = obj.classId + 'En'
if self.__translDic.has_key(classIdEn): if classIdEn in self.__translDic:
name += ' (+En)' name += ' (+En)'
# Append the result name of the language, possibly with note. # Append the result name of the language, possibly with note.
...@@ -1424,16 +1391,16 @@ class TrManager: ...@@ -1424,16 +1391,16 @@ class TrManager:
for name, obj in self.langLst: for name, obj in self.langLst:
if obj.status == 'En': if obj.status == 'En':
classId = obj.classId[:-2] classId = obj.classId[:-2]
if self.__translDic.has_key(classId): if classId in self.__translDic:
self.numLang -= 1 # the couple will be counted as one self.numLang -= 1 # the couple will be counted as one
# Extract the version of Doxygen. # Extract the version of Doxygen.
f = open(os.path.join(self.doxy_path, 'VERSION')) f = xopen(os.path.join(self.doxy_path, 'VERSION'))
self.doxVersion = f.readline().strip() self.doxVersion = f.readline().strip()
f.close() f.close()
# Update the last modification time. # Update the last modification time.
for tr in self.__translDic.values(): for tr in list(self.__translDic.values()):
tim = tr.getmtime() tim = tr.getmtime()
if tim > self.lastModificationTime: if tim > self.lastModificationTime:
self.lastModificationTime = tim self.lastModificationTime = tim
...@@ -1472,11 +1439,11 @@ class TrManager: ...@@ -1472,11 +1439,11 @@ class TrManager:
probably used should be checked first and the resulting reduced probably used should be checked first and the resulting reduced
dictionary should be used for checking the next files (speed up). dictionary should be used for checking the next files (speed up).
""" """
lst_in = dic.keys() # identifiers to be searched for lst_in = list(dic.keys()) # identifiers to be searched for
# Read content of the file as one string. # Read content of the file as one string.
assert os.path.isfile(fname) assert os.path.isfile(fname)
f = open(fname) f = xopen(fname)
cont = f.read() cont = f.read()
f.close() f.close()
...@@ -1497,7 +1464,7 @@ class TrManager: ...@@ -1497,7 +1464,7 @@ class TrManager:
# Build the dictionary of the required method prototypes with # Build the dictionary of the required method prototypes with
# method identifiers used as keys. # method identifiers used as keys.
trdic = {} trdic = {}
for prototype in self.requiredMethodsDic.keys(): for prototype in list(self.requiredMethodsDic.keys()):
ri = prototype.split('(')[0] ri = prototype.split('(')[0]
identifier = ri.split()[1].strip() identifier = ri.split()[1].strip()
trdic[identifier] = prototype trdic[identifier] = prototype
...@@ -1553,7 +1520,7 @@ class TrManager: ...@@ -1553,7 +1520,7 @@ class TrManager:
output = os.path.join(self.doc_path, self.translatorReportFileName) output = os.path.join(self.doc_path, self.translatorReportFileName)
# Open the textual report file for the output. # Open the textual report file for the output.
f = open(output, 'w') f = xopen(output, 'w')
# Output the information about the version. # Output the information about the version.
f.write('(' + self.doxVersion + ')\n\n') f.write('(' + self.doxVersion + ')\n\n')
...@@ -1581,7 +1548,7 @@ class TrManager: ...@@ -1581,7 +1548,7 @@ class TrManager:
# The e-mail addresses of the maintainers will be collected to # The e-mail addresses of the maintainers will be collected to
# the auxiliary file in the order of translator classes listed # the auxiliary file in the order of translator classes listed
# in the translator report. # in the translator report.
fmail = open('mailto.txt', 'w') fmail = xopen('mailto.txt', 'w')
# Write the list of "up-to-date" translator classes. # Write the list of "up-to-date" translator classes.
if self.upToDateIdLst: if self.upToDateIdLst:
...@@ -1665,12 +1632,12 @@ class TrManager: ...@@ -1665,12 +1632,12 @@ class TrManager:
# adapters. # adapters.
if not self.script_argLst: if not self.script_argLst:
to_remove = {} to_remove = {}
for version, adaptClassId in self.adaptMethodsDic.values(): for version, adaptClassId in list(self.adaptMethodsDic.values()):
if version < adaptMinVersion: if version < adaptMinVersion:
to_remove[adaptClassId] = True to_remove[adaptClassId] = True
if to_remove: if to_remove:
lst = to_remove.keys() lst = list(to_remove.keys())
lst.sort() lst.sort()
plural = len(lst) > 1 plural = len(lst) > 1
note = 'Note: The adapter class' note = 'Note: The adapter class'
...@@ -1716,7 +1683,7 @@ class TrManager: ...@@ -1716,7 +1683,7 @@ class TrManager:
f.write('\n' + '=' * 70 + '\n') f.write('\n' + '=' * 70 + '\n')
f.write(fill(s) + '\n\n') f.write(fill(s) + '\n\n')
keys = dic.keys() keys = list(dic.keys())
keys.sort() keys.sort()
for key in keys: for key in keys:
f.write(' ' + dic[key] + '\n') f.write(' ' + dic[key] + '\n')
...@@ -1726,7 +1693,7 @@ class TrManager: ...@@ -1726,7 +1693,7 @@ class TrManager:
f.write('\n' + '=' * 70) f.write('\n' + '=' * 70)
f.write('\nDetails for translators (classes sorted alphabetically):\n') f.write('\nDetails for translators (classes sorted alphabetically):\n')
cls = self.__translDic.keys() cls = list(self.__translDic.keys())
cls.sort() cls.sort()
for c in cls: for c in cls:
...@@ -1753,7 +1720,7 @@ class TrManager: ...@@ -1753,7 +1720,7 @@ class TrManager:
self.lastModificationTime = tim self.lastModificationTime = tim
# Process the content of the maintainers file. # Process the content of the maintainers file.
f = codecs.open(fname, 'r', 'utf-8') f = xopen(fname)
inside = False # inside the record for the language inside = False # inside the record for the language
lineReady = True lineReady = True
classId = None classId = None
...@@ -1764,28 +1731,28 @@ class TrManager: ...@@ -1764,28 +1731,28 @@ class TrManager:
lineReady = line != '' # when eof, then line == '' lineReady = line != '' # when eof, then line == ''
line = line.strip() # eof should also behave as separator line = line.strip() # eof should also behave as separator
if line != u'' and line[0] == u'%': # skip the comment line if line != '' and line[0] == '%': # skip the comment line
continue continue
if not inside: # if outside of the record if not inside: # if outside of the record
if line != u'': # should be language identifier if line != '': # should be language identifier
classId = line classId = line
maintainersLst = [] maintainersLst = []
inside = True inside = True
# Otherwise skip empty line that do not act as separator. # Otherwise skip empty line that do not act as separator.
else: # if inside the record else: # if inside the record
if line == u'': # separator found if line == '': # separator found
inside = False inside = False
else: else:
# If it is the first maintainer, create the empty list. # If it is the first maintainer, create the empty list.
if not self.__maintainersDic.has_key(classId): if classId not in self.__maintainersDic:
self.__maintainersDic[classId] = [] self.__maintainersDic[classId] = []
# Split the information about the maintainer and append # Split the information about the maintainer and append
# the tuple. The address may be prefixed '[unreachable]' # the tuple. The address may be prefixed '[unreachable]'
# or whatever '[xxx]'. This will be processed later. # or whatever '[xxx]'. This will be processed later.
lst = line.split(u':', 1) lst = line.split(':', 1)
assert(len(lst) == 2) assert(len(lst) == 2)
t = (lst[0].strip(), lst[1].strip()) t = (lst[0].strip(), lst[1].strip())
self.__maintainersDic[classId].append(t) self.__maintainersDic[classId].append(t)
...@@ -1817,7 +1784,7 @@ class TrManager: ...@@ -1817,7 +1784,7 @@ class TrManager:
# #
# Read the template of the documentation, and remove the first # Read the template of the documentation, and remove the first
# attention lines. # attention lines.
f = codecs.open(fTplName, 'r', 'utf-8') f = xopen(fTplName)
doctpl = f.read() doctpl = f.read()
f.close() f.close()
...@@ -1829,7 +1796,7 @@ class TrManager: ...@@ -1829,7 +1796,7 @@ class TrManager:
# document template. # document template.
tplDic = {} tplDic = {}
s = u'Do not edit this file. It was generated by the %s script.\n * Instead edit %s and %s' % (self.script_name, self.languageTplFileName, self.maintainersFileName) s = u'Do not edit this file. It was generated by the %s script. * Instead edit %s and %s' % (self.script_name, self.languageTplFileName, self.maintainersFileName)
tplDic['editnote'] = s tplDic['editnote'] = s
tplDic['doxVersion'] = self.doxVersion tplDic['doxVersion'] = self.doxVersion
...@@ -1865,7 +1832,7 @@ class TrManager: ...@@ -1865,7 +1832,7 @@ class TrManager:
</table> </table>
\\endhtmlonly \\endhtmlonly
''' '''
htmlTableTpl = dedent(htmlTableTpl) htmlTableTpl = textwrap.dedent(htmlTableTpl)
htmlTrTpl = u'\n <tr bgcolor="#ffffff">%s\n </tr>' htmlTrTpl = u'\n <tr bgcolor="#ffffff">%s\n </tr>'
htmlTdTpl = u'\n <td>%s</td>' htmlTdTpl = u'\n <td>%s</td>'
htmlTdStatusColorTpl = u'\n <td bgcolor="%s">%s</td>' htmlTdStatusColorTpl = u'\n <td bgcolor="%s">%s</td>'
...@@ -1881,7 +1848,7 @@ class TrManager: ...@@ -1881,7 +1848,7 @@ class TrManager:
if obj.readableStatus.startswith('1.4'): if obj.readableStatus.startswith('1.4'):
bkcolor = self.getBgcolorByReadableStatus('1.4') bkcolor = self.getBgcolorByReadableStatus('1.4')
else: else:
bkcolor = '#ffffff' bkcolor = u'#ffffff'
lst = [ htmlTdStatusColorTpl % (bkcolor, obj.langReadable) ] lst = [ htmlTdStatusColorTpl % (bkcolor, obj.langReadable) ]
...@@ -1905,23 +1872,23 @@ class TrManager: ...@@ -1905,23 +1872,23 @@ class TrManager:
lm = [] lm = []
for maintainer in self.__maintainersDic[obj.classId]: for maintainer in self.__maintainersDic[obj.classId]:
name = maintainer[0] name = maintainer[0]
if name.startswith(u'--'): if name.startswith('--'):
name = u'<span style="color: red; background-color: yellow">'\ name = u'<span style="color: red; background-color: yellow">'\
+ name + u'</span>' + name + u'</span>'
lm.append(name) lm.append(name)
mm = u'<br/>'.join(lm) mm = '<br/>'.join(lm)
# The marked adresses (they start with the mark '[unreachable]', # The marked adresses (they start with the mark '[unreachable]',
# '[resigned]', whatever '[xxx]') will not be displayed at all. # '[resigned]', whatever '[xxx]') will not be displayed at all.
# Only the mark will be used instead. # Only the mark will be used instead.
rexMark = re.compile(ur'(?P<mark>\[.*?\])') rexMark = re.compile(u'(?P<mark>\\[.*?\\])')
le = [] le = []
for maintainer in self.__maintainersDic[obj.classId]: for maintainer in self.__maintainersDic[obj.classId]:
address = maintainer[1] address = maintainer[1]
m = rexMark.search(address) m = rexMark.search(address)
if m is not None: if m is not None:
address = u'<span style="color: brown">'\ address = u'<span style="color: brown">'\
+ m.group(u'mark') + u'</span>' + m.group('mark') + u'</span>'
le.append(address) le.append(address)
ee = u'<br/>'.join(le) ee = u'<br/>'.join(le)
...@@ -1940,7 +1907,7 @@ class TrManager: ...@@ -1940,7 +1907,7 @@ class TrManager:
htmlTable = htmlTableTpl % (''.join(trlst)) htmlTable = htmlTableTpl % (''.join(trlst))
# Define templates for LaTeX table parts of the documentation. # Define templates for LaTeX table parts of the documentation.
latexTableTpl = ur''' latexTableTpl = b'''
\latexonly \latexonly
\footnotesize \footnotesize
\begin{longtable}{|l|l|l|l|} \begin{longtable}{|l|l|l|l|}
...@@ -1952,9 +1919,9 @@ class TrManager: ...@@ -1952,9 +1919,9 @@ class TrManager:
\end{longtable} \end{longtable}
\normalsize \normalsize
\endlatexonly \endlatexonly
''' '''.decode('utf_8')
latexTableTpl = dedent(latexTableTpl) latexTableTpl = textwrap.dedent(latexTableTpl)
latexLineTpl = u'\n' + r' %s & %s & {\tt\tiny %s} & %s \\' latexLineTpl = u'\n %s & %s & {\\tt\\tiny %s} & %s \\\\'
# Loop through transl objects in the order of sorted readable names # Loop through transl objects in the order of sorted readable names
# and add generate the content of the LaTeX table. # and add generate the content of the LaTeX table.
...@@ -1965,7 +1932,7 @@ class TrManager: ...@@ -1965,7 +1932,7 @@ class TrManager:
# in the table is placed explicitly above the first # in the table is placed explicitly above the first
# maintainer. Prepare the arguments for the LaTeX row template. # maintainer. Prepare the arguments for the LaTeX row template.
maintainers = [] maintainers = []
if self.__maintainersDic.has_key(obj.classId): if obj.classId in self.__maintainersDic:
maintainers = self.__maintainersDic[obj.classId] maintainers = self.__maintainersDic[obj.classId]
lang = obj.langReadable lang = obj.langReadable
...@@ -1976,8 +1943,8 @@ class TrManager: ...@@ -1976,8 +1943,8 @@ class TrManager:
classId = obj.classId[:-2] classId = obj.classId[:-2]
if classId in self.__translDic: if classId in self.__translDic:
langNE = self.__translDic[classId].langReadable langNE = self.__translDic[classId].langReadable
maintainer = u'see the %s language' % langNE maintainer = 'see the %s language' % langNE
email = u'~' email = '~'
if not maintainer and (obj.classId in self.__maintainersDic): if not maintainer and (obj.classId in self.__maintainersDic):
lm = [ m[0] for m in self.__maintainersDic[obj.classId] ] lm = [ m[0] for m in self.__maintainersDic[obj.classId] ]
...@@ -1996,8 +1963,8 @@ class TrManager: ...@@ -1996,8 +1963,8 @@ class TrManager:
# List the other maintainers for the language. Do not set # List the other maintainers for the language. Do not set
# lang and status for them. # lang and status for them.
lang = u'~' lang = '~'
status = u'~' status = '~'
for m in maintainers[1:]: for m in maintainers[1:]:
maintainer = m[0] maintainer = m[0]
email = m[1] email = m[1]
...@@ -2012,14 +1979,20 @@ class TrManager: ...@@ -2012,14 +1979,20 @@ class TrManager:
tplDic['informationTable'] = htmlTable + u'\n' + latexTable tplDic['informationTable'] = htmlTable + u'\n' + latexTable
# Insert the symbols into the document template and write it down. # Insert the symbols into the document template and write it down.
f = codecs.open(fDocName, 'w', 'utf-8') f = xopen(fDocName, 'w')
f.write(doctpl % tplDic) f.write(doctpl % tplDic)
f.close() f.close()
if __name__ == '__main__': if __name__ == '__main__':
# Create the manager, build the transl objects, and parse the related # The Python 2.6+ or 3.3+ is required.
# sources. major, minor, patch = (int(e) for e in platfor m.python_version_tuple())
if (major == 2 and minor < 6) or (major == 3 and minor < 3):
print('Python 2.6+ or Python 3.3+ are required for the script')
sys.exit(1)
# The translator manager builds the transl objects, parses the related
# sources, and keeps them in memory.
trMan = TrManager() trMan = TrManager()
# Generate the language.doc. # Generate the language.doc.
......
...@@ -11601,7 +11601,9 @@ static inline bool is_arabic(unsigned short x) { ...@@ -11601,7 +11601,9 @@ static inline bool is_arabic(unsigned short x) {
((x >= 0xfb50) && (x <= 0xfdff)) || ((x >= 0xfb50) && (x <= 0xfdff)) ||
((x >= 0xfe70) && (x <= 0xfeff))); ((x >= 0xfe70) && (x <= 0xfeff)));
} }
#endif
#ifndef QT_NO_UNICODETABLES
static inline bool is_neutral(unsigned short dir) { static inline bool is_neutral(unsigned short dir) {
return ((dir == QChar::DirB) || return ((dir == QChar::DirB) ||
(dir == QChar::DirS) || (dir == QChar::DirS) ||
......
...@@ -329,44 +329,44 @@ class CallContext ...@@ -329,44 +329,44 @@ class CallContext
public: public:
struct Ctx struct Ctx
{ {
Ctx() : name(g_name), type(g_type), cd(0) {} Ctx() : name(g_name), type(g_type), d(0) {}
QCString name; QCString name;
QCString type; QCString type;
ClassDef *cd; Definition *d;
}; };
CallContext() CallContext()
{ {
m_classList.append(new Ctx); m_defList.append(new Ctx);
m_classList.setAutoDelete(TRUE); m_defList.setAutoDelete(TRUE);
} }
virtual ~CallContext() {} virtual ~CallContext() {}
void setClass(ClassDef *cd) void setScope(Definition *d)
{ {
Ctx *ctx = m_classList.getLast(); Ctx *ctx = m_defList.getLast();
if (ctx) if (ctx)
{ {
DBG_CTX((stderr,"** Set call context %s (%p)\n",cd==0 ? "<null>" : cd->name().data(),cd)); DBG_CTX((stderr,"** Set call context %s (%p)\n",d==0 ? "<null>" : d->name().data(),d));
ctx->cd=cd; ctx->d=d;
} }
} }
void pushScope() void pushScope()
{ {
m_classList.append(new Ctx); m_defList.append(new Ctx);
DBG_CTX((stderr,"** Push call context %d\n",m_classList.count())); DBG_CTX((stderr,"** Push call context %d\n",m_defList.count()));
} }
void popScope() void popScope()
{ {
if (m_classList.count()>1) if (m_defList.count()>1)
{ {
DBG_CTX((stderr,"** Pop call context %d\n",m_classList.count())); DBG_CTX((stderr,"** Pop call context %d\n",m_defList.count()));
Ctx *ctx = m_classList.getLast(); Ctx *ctx = m_defList.getLast();
if (ctx) if (ctx)
{ {
g_name = ctx->name; g_name = ctx->name;
g_type = ctx->type; g_type = ctx->type;
} }
m_classList.removeLast(); m_defList.removeLast();
} }
else else
{ {
...@@ -376,17 +376,17 @@ class CallContext ...@@ -376,17 +376,17 @@ class CallContext
void clear() void clear()
{ {
DBG_CTX((stderr,"** Clear call context\n")); DBG_CTX((stderr,"** Clear call context\n"));
m_classList.clear(); m_defList.clear();
m_classList.append(new Ctx); m_defList.append(new Ctx);
} }
ClassDef *getClass() const Definition *getScope() const
{ {
Ctx *ctx = m_classList.getLast(); Ctx *ctx = m_defList.getLast();
if (ctx) return ctx->cd; else return 0; if (ctx) return ctx->d; else return 0;
} }
private: private:
QList<Ctx> m_classList; QList<Ctx> m_defList;
}; };
static CallContext g_theCallContext; static CallContext g_theCallContext;
...@@ -732,7 +732,7 @@ static MemberDef *setCallContextForVar(const QCString &name) ...@@ -732,7 +732,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
if (md) if (md)
{ {
//printf("name=%s scope=%s\n",locName.data(),scope.data()); //printf("name=%s scope=%s\n",locName.data(),scope.data());
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope())); g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
return md; return md;
} }
} }
...@@ -745,7 +745,7 @@ static MemberDef *setCallContextForVar(const QCString &name) ...@@ -745,7 +745,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
if (md) if (md)
{ {
//printf("name=%s scope=%s\n",locName.data(),scope.data()); //printf("name=%s scope=%s\n",locName.data(),scope.data());
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope())); g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
return md; return md;
} }
} }
...@@ -760,7 +760,7 @@ static MemberDef *setCallContextForVar(const QCString &name) ...@@ -760,7 +760,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
if (mcd!=VariableContext::dummyContext) if (mcd!=VariableContext::dummyContext)
{ {
DBG_CTX((stderr,"local var `%s' mcd=%s\n",name.data(),mcd->name().data())); DBG_CTX((stderr,"local var `%s' mcd=%s\n",name.data(),mcd->name().data()));
g_theCallContext.setClass(mcd); g_theCallContext.setScope(mcd);
} }
} }
else else
...@@ -778,7 +778,7 @@ static MemberDef *setCallContextForVar(const QCString &name) ...@@ -778,7 +778,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
if (g_scopeStack.top()!=CLASSBLOCK) if (g_scopeStack.top()!=CLASSBLOCK)
{ {
DBG_CTX((stderr,"class member `%s' mcd=%s\n",name.data(),mcd->name().data())); DBG_CTX((stderr,"class member `%s' mcd=%s\n",name.data(),mcd->name().data()));
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope())); g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
} }
return md; return md;
} }
...@@ -794,7 +794,7 @@ static MemberDef *setCallContextForVar(const QCString &name) ...@@ -794,7 +794,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
MemberDef *md=mn->getFirst(); MemberDef *md=mn->getFirst();
if (!md->isStatic() || md->getBodyDef()==g_sourceFileDef) if (!md->isStatic() || md->getBodyDef()==g_sourceFileDef)
{ {
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope())); g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
return md; return md;
} }
return 0; return 0;
...@@ -816,7 +816,7 @@ static MemberDef *setCallContextForVar(const QCString &name) ...@@ -816,7 +816,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
(g_forceTagReference.isEmpty() || g_forceTagReference==md->getReference()) (g_forceTagReference.isEmpty() || g_forceTagReference==md->getReference())
) )
{ {
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope())); g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
//printf("returning member %s in source file %s\n",md->name().data(),g_sourceFileDef->name().data()); //printf("returning member %s in source file %s\n",md->name().data(),g_sourceFileDef->name().data());
return md; return md;
} }
...@@ -829,15 +829,15 @@ static MemberDef *setCallContextForVar(const QCString &name) ...@@ -829,15 +829,15 @@ static MemberDef *setCallContextForVar(const QCString &name)
static void updateCallContextForSmartPointer() static void updateCallContextForSmartPointer()
{ {
ClassDef *cd = g_theCallContext.getClass(); Definition *d = g_theCallContext.getScope();
//printf("updateCallContextForSmartPointer() cd=%s\n",cd ? cd->name().data() : "<none>"); //printf("updateCallContextForSmartPointer() cd=%s\n",cd ? d->name().data() : "<none>");
MemberDef *md; MemberDef *md;
if (cd && (md=cd->isSmartPointer())) if (d && d->definitionType()==Definition::TypeClass && (md=((ClassDef*)d)->isSmartPointer()))
{ {
ClassDef *ncd = stripClassName(md->typeString(),md->getOuterScope()); ClassDef *ncd = stripClassName(md->typeString(),md->getOuterScope());
if (ncd) if (ncd)
{ {
g_theCallContext.setClass(ncd); g_theCallContext.setScope(ncd);
//printf("Found smart pointer call %s->%s!\n",cd->name().data(),ncd->name().data()); //printf("Found smart pointer call %s->%s!\n",cd->name().data(),ncd->name().data());
} }
} }
...@@ -879,7 +879,7 @@ static bool getLinkInScope(const QCString &c, // scope ...@@ -879,7 +879,7 @@ static bool getLinkInScope(const QCString &c, // scope
if (md->getGroupDef()) d = md->getGroupDef(); if (md->getGroupDef()) d = md->getGroupDef();
if (d && d->isLinkable()) if (d && d->isLinkable())
{ {
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope())); g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
//printf("g_currentDefinition=%p g_currentMemberDef=%p g_insideBody=%d\n", //printf("g_currentDefinition=%p g_currentMemberDef=%p g_insideBody=%d\n",
// g_currentDefinition,g_currentMemberDef,g_insideBody); // g_currentDefinition,g_currentMemberDef,g_insideBody);
...@@ -965,8 +965,18 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName ...@@ -965,8 +965,18 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName
cd=getResolvedClass(d,g_sourceFileDef,bareName,&md); // try unspecialized version cd=getResolvedClass(d,g_sourceFileDef,bareName,&md); // try unspecialized version
} }
} }
NamespaceDef *nd = getResolvedNamespace(className);
if (nd)
{
g_theCallContext.setScope(nd);
addToSearchIndex(className);
writeMultiLineCodeLink(*g_code,nd,clName);
return;
}
//printf("md=%s\n",md?md->name().data():"<none>"); //printf("md=%s\n",md?md->name().data():"<none>");
DBG_CTX((stderr,"is found as a type %s\n",cd?cd->name().data():"<null>")); DBG_CTX((stderr,"is found as a type cd=%s nd=%s\n",
cd?cd->name().data():"<null>",
nd?nd->name().data():"<null>"));
if (cd==0 && md==0) // also see if it is variable or enum or enum value if (cd==0 && md==0) // also see if it is variable or enum or enum value
{ {
if (getLink(g_classScope,clName,ol,clName,varOnly)) if (getLink(g_classScope,clName,ol,clName,varOnly))
...@@ -981,7 +991,7 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName ...@@ -981,7 +991,7 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName
if (lcd!=VariableContext::dummyContext) if (lcd!=VariableContext::dummyContext)
{ {
//printf("non-dummy context lcd=%s!\n",lcd->name().data()); //printf("non-dummy context lcd=%s!\n",lcd->name().data());
g_theCallContext.setClass(lcd); g_theCallContext.setScope(lcd);
// to following is needed for links to a global variable, but is // to following is needed for links to a global variable, but is
// no good for a link to a local variable that is also a global symbol. // no good for a link to a local variable that is also a global symbol.
...@@ -1011,7 +1021,7 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName ...@@ -1011,7 +1021,7 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName
} }
writeMultiLineCodeLink(ol,cd,clName); writeMultiLineCodeLink(ol,cd,clName);
addToSearchIndex(className); addToSearchIndex(className);
g_theCallContext.setClass(cd); g_theCallContext.setScope(cd);
if (md) if (md)
{ {
Definition *d = md->getOuterScope()==Doxygen::globalScope ? Definition *d = md->getOuterScope()==Doxygen::globalScope ?
...@@ -1111,7 +1121,7 @@ static bool generateClassMemberLink(CodeOutputInterface &ol,MemberDef *xmd,const ...@@ -1111,7 +1121,7 @@ static bool generateClassMemberLink(CodeOutputInterface &ol,MemberDef *xmd,const
ClassDef *typeClass = stripClassName(removeAnonymousScopes(xmd->typeString()),xmd->getOuterScope()); ClassDef *typeClass = stripClassName(removeAnonymousScopes(xmd->typeString()),xmd->getOuterScope());
DBG_CTX((stderr,"%s -> typeName=%p\n",xmd->typeString(),typeClass)); DBG_CTX((stderr,"%s -> typeName=%p\n",xmd->typeString(),typeClass));
g_theCallContext.setClass(typeClass); g_theCallContext.setScope(typeClass);
Definition *xd = xmd->getOuterScope()==Doxygen::globalScope ? Definition *xd = xmd->getOuterScope()==Doxygen::globalScope ?
xmd->getFileDef() : xmd->getOuterScope(); xmd->getFileDef() : xmd->getOuterScope();
...@@ -1142,18 +1152,42 @@ static bool generateClassMemberLink(CodeOutputInterface &ol,MemberDef *xmd,const ...@@ -1142,18 +1152,42 @@ static bool generateClassMemberLink(CodeOutputInterface &ol,MemberDef *xmd,const
return FALSE; return FALSE;
} }
static bool generateClassMemberLink(CodeOutputInterface &ol,ClassDef *mcd,const char *memName) static bool generateClassMemberLink(CodeOutputInterface &ol,Definition *def,const char *memName)
{ {
if (mcd) if (def && def->definitionType()==Definition::TypeClass)
{ {
MemberDef *xmd = mcd->getMemberByName(memName); ClassDef *cd = (ClassDef*)def;
//printf("generateClassMemberLink(class=%s,member=%s)=%p\n",mcd->name().data(),memName,xmd); MemberDef *xmd = cd->getMemberByName(memName);
//printf("generateClassMemberLink(class=%s,member=%s)=%p\n",def->name().data(),memName,xmd);
if (xmd) if (xmd)
{ {
return generateClassMemberLink(ol,xmd,memName); return generateClassMemberLink(ol,xmd,memName);
} }
else
{
Definition *innerDef = cd->findInnerCompound(memName);
if (innerDef)
{
g_theCallContext.setScope(innerDef);
addToSearchIndex(memName);
writeMultiLineCodeLink(*g_code,innerDef,memName);
return TRUE;
}
}
}
else if (def && def->definitionType()==Definition::TypeNamespace)
{
NamespaceDef *nd = (NamespaceDef*)def;
//printf("Looking for %s inside namespace %s\n",memName,nd->name().data());
Definition *innerDef = nd->findInnerCompound(memName);
if (innerDef)
{
g_theCallContext.setScope(innerDef);
addToSearchIndex(memName);
writeMultiLineCodeLink(*g_code,innerDef,memName);
return TRUE;
}
} }
return FALSE; return FALSE;
} }
...@@ -1743,9 +1777,7 @@ B [ \t] ...@@ -1743,9 +1777,7 @@ B [ \t]
BN [ \t\n\r] BN [ \t\n\r]
ID "$"?[a-z_A-Z\x80-\xFF][a-z_A-Z0-9\x80-\xFF]* ID "$"?[a-z_A-Z\x80-\xFF][a-z_A-Z0-9\x80-\xFF]*
SEP ("::"|"\\") SEP ("::"|"\\")
SEPCS (".")
SCOPENAME ({SEP}{BN}*)?({ID}{BN}*{SEP}{BN}*)*("~"{BN}*)?{ID} SCOPENAME ({SEP}{BN}*)?({ID}{BN}*{SEP}{BN}*)*("~"{BN}*)?{ID}
SCOPENAMECS ({SEPCS}{BN}*)?({ID}{BN}*{SEPCS}{BN}*)*("~"{BN}*)?{ID}
TEMPLIST "<"[^\"\}\{\(\)\/\n\>]*">" TEMPLIST "<"[^\"\}\{\(\)\/\n\>]*">"
SCOPETNAME (((({ID}{TEMPLIST}?){BN}*)?{SEP}{BN}*)*)((~{BN}*)?{ID}) SCOPETNAME (((({ID}{TEMPLIST}?){BN}*)?{SEP}{BN}*)*)((~{BN}*)?{ID})
SCOPEPREFIX ({ID}{TEMPLIST}?{BN}*{SEP}{BN}*)+ SCOPEPREFIX ({ID}{TEMPLIST}?{BN}*{SEP}{BN}*)+
...@@ -2490,20 +2522,6 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\" ...@@ -2490,20 +2522,6 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
generateClassOrGlobalLink(*g_code,yytext); generateClassOrGlobalLink(*g_code,yytext);
g_name+=yytext; g_name+=yytext;
} }
<Body>{SCOPENAMECS}/{BN}*[;,)\]] { // "int var;" or "var, var2" or "debug(f) macro"
if (!g_insideCS && !g_insideJava)
{
REJECT;
}
else
{
addType();
// changed this to generateFunctionLink, see bug 624514
//generateClassOrGlobalLink(*g_code,yytext,FALSE,TRUE);
generateFunctionLink(*g_code,yytext);
g_name+=yytext;
}
}
<Body>{SCOPENAME}/{BN}*[;,)\]] { // "int var;" or "var, var2" or "debug(f) macro" <Body>{SCOPENAME}/{BN}*[;,)\]] { // "int var;" or "var, var2" or "debug(f) macro"
addType(); addType();
// changed this to generateFunctionLink, see bug 624514 // changed this to generateFunctionLink, see bug 624514
...@@ -2511,18 +2529,6 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\" ...@@ -2511,18 +2529,6 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
generateFunctionLink(*g_code,yytext); generateFunctionLink(*g_code,yytext);
g_name+=yytext; g_name+=yytext;
} }
<Body>{SCOPENAMECS}/{B}* { // p->func()
if (!g_insideCS && !g_insideJava)
{
REJECT;
}
else
{
addType();
generateClassOrGlobalLink(*g_code,yytext);
g_name+=yytext;
}
}
<Body>{SCOPENAME}/{B}* { // p->func() <Body>{SCOPENAME}/{B}* { // p->func()
addType(); addType();
generateClassOrGlobalLink(*g_code,yytext); generateClassOrGlobalLink(*g_code,yytext);
...@@ -2657,9 +2663,9 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\" ...@@ -2657,9 +2663,9 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
BEGIN( MemberCall ); BEGIN( MemberCall );
} }
<MemberCall>{SCOPETNAME}/{BN}*"(" { <MemberCall>{SCOPETNAME}/{BN}*"(" {
if (g_theCallContext.getClass()) if (g_theCallContext.getScope())
{ {
if (!generateClassMemberLink(*g_code,g_theCallContext.getClass(),yytext)) if (!generateClassMemberLink(*g_code,g_theCallContext.getScope(),yytext))
{ {
g_code->codify(yytext); g_code->codify(yytext);
addToSearchIndex(yytext); addToSearchIndex(yytext);
...@@ -2684,10 +2690,10 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\" ...@@ -2684,10 +2690,10 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
} }
} }
<MemberCall>{SCOPENAME}/{B}* { <MemberCall>{SCOPENAME}/{B}* {
if (g_theCallContext.getClass()) if (g_theCallContext.getScope())
{ {
DBG_CTX((stderr,"g_theCallContext.getClass()=%p\n",g_theCallContext.getClass())); DBG_CTX((stderr,"g_theCallContext.getClass()=%p\n",g_theCallContext.getScope()));
if (!generateClassMemberLink(*g_code,g_theCallContext.getClass(),yytext)) if (!generateClassMemberLink(*g_code,g_theCallContext.getScope(),yytext))
{ {
g_code->codify(yytext); g_code->codify(yytext);
addToSearchIndex(yytext); addToSearchIndex(yytext);
...@@ -2733,7 +2739,7 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\" ...@@ -2733,7 +2739,7 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
if (*yytext!='[' && !g_type.isEmpty()) if (*yytext!='[' && !g_type.isEmpty())
{ {
//printf("g_scopeStack.bottom()=%p\n",g_scopeStack.bottom()); //printf("g_scopeStack.bottom()=%p\n",g_scopeStack.bottom());
if (g_scopeStack.top()!=CLASSBLOCK) //if (g_scopeStack.top()!=CLASSBLOCK) // commented out for bug731363
{ {
//printf("AddVariable: '%s' '%s' context=%d\n", //printf("AddVariable: '%s' '%s' context=%d\n",
// g_type.data(),g_name.data(),g_theVarContext.count()); // g_type.data(),g_name.data(),g_theVarContext.count());
...@@ -3008,7 +3014,7 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\" ...@@ -3008,7 +3014,7 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
g_theVarContext.addVariable(g_type,g_name); g_theVarContext.addVariable(g_type,g_name);
} }
g_parmType.resize(0);g_parmName.resize(0); g_parmType.resize(0);g_parmName.resize(0);
g_theCallContext.setClass(0); g_theCallContext.setScope(0);
if (*yytext==';' || g_insideBody) if (*yytext==';' || g_insideBody)
{ {
if (!g_insideBody) if (!g_insideBody)
......
...@@ -3076,7 +3076,7 @@ to be found in the default search path. ...@@ -3076,7 +3076,7 @@ to be found in the default search path.
<option type='string' id='DOT_FONTNAME' format='string' defval='Helvetica' depends='HAVE_DOT'> <option type='string' id='DOT_FONTNAME' format='string' defval='Helvetica' depends='HAVE_DOT'>
<docs> <docs>
<![CDATA[ <![CDATA[
When you want a differently looking font n the dot files that doxygen generates When you want a differently looking font in the dot files that doxygen generates
you can specify the font name you can specify the font name
using \c DOT_FONTNAME. You need to make sure dot is able to find the font, using \c DOT_FONTNAME. You need to make sure dot is able to find the font,
which can be done by putting it in a standard location or by setting the which can be done by putting it in a standard location or by setting the
......
...@@ -18,7 +18,6 @@ import re ...@@ -18,7 +18,6 @@ import re
import textwrap import textwrap
from xml.dom import minidom, Node from xml.dom import minidom, Node
def transformDocs(doc): def transformDocs(doc):
# join lines, unless it is an empty line # join lines, unless it is an empty line
# remove doxygen layout constructs # remove doxygen layout constructs
...@@ -112,7 +111,7 @@ def addValues(var, node): ...@@ -112,7 +111,7 @@ def addValues(var, node):
if (n.nodeName == "value"): if (n.nodeName == "value"):
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
name = n.getAttribute('name') name = n.getAttribute('name')
print " %s->addValue(\"%s\");" % (var, name) print(" %s->addValue(\"%s\");" % (var, name))
def parseHeader(node,objName): def parseHeader(node,objName):
...@@ -123,15 +122,15 @@ def parseHeader(node,objName): ...@@ -123,15 +122,15 @@ def parseHeader(node,objName):
if (n.getAttribute('doxyfile') != "0"): if (n.getAttribute('doxyfile') != "0"):
doc += parseDocs(n) doc += parseDocs(n)
docC = transformDocs(doc) docC = transformDocs(doc)
print " %s->setHeader(" % (objName) print(" %s->setHeader(" % (objName))
rng = len(docC) rng = len(docC)
for i in range(rng): for i in range(rng):
line = docC[i] line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1 if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line) print(" \"%s\\n\"" % (line))
else: else:
print " \"%s\"" % (line) print(" \"%s\"" % (line))
print " );" print(" );")
def prepCDocs(node): def prepCDocs(node):
...@@ -201,7 +200,7 @@ def prepCDocs(node): ...@@ -201,7 +200,7 @@ def prepCDocs(node):
else: else:
if abspath == '1': if abspath == '1':
doc += "<br/>The file has to be specified with full path." doc += "<br/>The file has to be specified with full path."
elif file =='image': elif format =='image':
abspath = node.getAttribute('abspath') abspath = node.getAttribute('abspath')
if defval != '': if defval != '':
if abspath != '1': if abspath != '1':
...@@ -238,8 +237,8 @@ def parseOption(node): ...@@ -238,8 +237,8 @@ def parseOption(node):
setting = node.getAttribute('setting') setting = node.getAttribute('setting')
docC = prepCDocs(node); docC = prepCDocs(node);
if len(setting) > 0: if len(setting) > 0:
print "#if %s" % (setting) print("#if %s" % (setting))
print " //----" print(" //----")
if type == 'bool': if type == 'bool':
if len(adefval) > 0: if len(adefval) > 0:
enabled = adefval enabled = adefval
...@@ -247,108 +246,108 @@ def parseOption(node): ...@@ -247,108 +246,108 @@ def parseOption(node):
enabled = "TRUE" enabled = "TRUE"
else: else:
enabled = "FALSE" enabled = "FALSE"
print " cb = cfg->addBool(" print(" cb = cfg->addBool(")
print " \"%s\"," % (name) print(" \"%s\"," % (name))
rng = len(docC) rng = len(docC)
for i in range(rng): for i in range(rng):
line = docC[i] line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1 if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line) print(" \"%s\\n\"" % (line))
else: else:
print " \"%s\"," % (line) print(" \"%s\"," % (line))
print " %s" % (enabled) print(" %s" % (enabled))
print " );" print(" );")
if depends != '': if depends != '':
print " cb->addDependency(\"%s\");" % (depends) print(" cb->addDependency(\"%s\");" % (depends))
elif type == 'string': elif type == 'string':
print " cs = cfg->addString(" print(" cs = cfg->addString(")
print " \"%s\"," % (name) print(" \"%s\"," % (name))
rng = len(docC) rng = len(docC)
for i in range(rng): for i in range(rng):
line = docC[i] line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1 if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line) print(" \"%s\\n\"" % (line))
else: else:
print " \"%s\"" % (line) print(" \"%s\"" % (line))
print " );" print(" );")
if defval != '': if defval != '':
print " cs->setDefaultValue(\"%s\");" % (defval) print(" cs->setDefaultValue(\"%s\");" % (defval))
if format == 'file': if format == 'file':
print " cs->setWidgetType(ConfigString::File);" print(" cs->setWidgetType(ConfigString::File);")
elif format == 'image': elif format == 'image':
print " cs->setWidgetType(ConfigString::Image);" print(" cs->setWidgetType(ConfigString::Image);")
elif format == 'dir': elif format == 'dir':
print " cs->setWidgetType(ConfigString::Dir);" print(" cs->setWidgetType(ConfigString::Dir);")
if depends != '': if depends != '':
print " cs->addDependency(\"%s\");" % (depends) print(" cs->addDependency(\"%s\");" % (depends))
elif type == 'enum': elif type == 'enum':
print " ce = cfg->addEnum(" print(" ce = cfg->addEnum(")
print " \"%s\"," % (name) print(" \"%s\"," % (name))
rng = len(docC) rng = len(docC)
for i in range(rng): for i in range(rng):
line = docC[i] line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1 if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line) print(" \"%s\\n\"" % (line))
else: else:
print " \"%s\"," % (line) print(" \"%s\"," % (line))
print " \"%s\"" % (defval) print(" \"%s\"" % (defval))
print " );" print(" );")
addValues("ce", node) addValues("ce", node)
if depends != '': if depends != '':
print " ce->addDependency(\"%s\");" % (depends) print(" ce->addDependency(\"%s\");" % (depends))
elif type == 'int': elif type == 'int':
minval = node.getAttribute('minval') minval = node.getAttribute('minval')
maxval = node.getAttribute('maxval') maxval = node.getAttribute('maxval')
print " ci = cfg->addInt(" print(" ci = cfg->addInt(")
print " \"%s\"," % (name) print(" \"%s\"," % (name))
rng = len(docC) rng = len(docC)
for i in range(rng): for i in range(rng):
line = docC[i] line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1 if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line) print(" \"%s\\n\"" % (line))
else: else:
print " \"%s\"," % (line) print(" \"%s\"," % (line))
print " %s,%s,%s" % (minval, maxval, defval) print(" %s,%s,%s" % (minval, maxval, defval))
print " );" print(" );")
if depends != '': if depends != '':
print " ci->addDependency(\"%s\");" % (depends) print(" ci->addDependency(\"%s\");" % (depends))
elif type == 'list': elif type == 'list':
print " cl = cfg->addList(" print(" cl = cfg->addList(")
print " \"%s\"," % (name) print(" \"%s\"," % (name))
rng = len(docC) rng = len(docC)
for i in range(rng): for i in range(rng):
line = docC[i] line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1 if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line) print(" \"%s\\n\"" % (line))
else: else:
print " \"%s\"" % (line) print(" \"%s\"" % (line))
print " );" print(" );")
addValues("cl", node) addValues("cl", node)
if depends != '': if depends != '':
print " cl->addDependency(\"%s\");" % (depends) print(" cl->addDependency(\"%s\");" % (depends))
if format == 'file': if format == 'file':
print " cl->setWidgetType(ConfigList::File);" print(" cl->setWidgetType(ConfigList::File);")
elif format == 'dir': elif format == 'dir':
print " cl->setWidgetType(ConfigList::Dir);" print(" cl->setWidgetType(ConfigList::Dir);")
elif format == 'filedir': elif format == 'filedir':
print " cl->setWidgetType(ConfigList::FileAndDir);" print(" cl->setWidgetType(ConfigList::FileAndDir);")
elif type == 'obsolete': elif type == 'obsolete':
print " cfg->addObsolete(\"%s\");" % (name) print(" cfg->addObsolete(\"%s\");" % (name))
if len(setting) > 0: if len(setting) > 0:
print "#else" print("#else")
print " cfg->addDisabled(\"%s\");" % (name) print(" cfg->addDisabled(\"%s\");" % (name))
print "#endif" print("#endif")
def parseGroups(node): def parseGroups(node):
name = node.getAttribute('name') name = node.getAttribute('name')
doc = node.getAttribute('docs') doc = node.getAttribute('docs')
print "%s%s" % (" //-----------------------------------------", print("%s%s" % (" //-----------------------------------------",
"----------------------------------") "----------------------------------"))
print " cfg->addInfo(\"%s\",\"%s\");" % (name, doc) print(" cfg->addInfo(\"%s\",\"%s\");" % (name, doc))
print "%s%s" % (" //-----------------------------------------", print("%s%s" % (" //-----------------------------------------",
"----------------------------------") "----------------------------------"))
print print("")
for n in node.childNodes: for n in node.childNodes:
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
parseOption(n) parseOption(n)
...@@ -360,16 +359,16 @@ def parseGroupCDocs(node): ...@@ -360,16 +359,16 @@ def parseGroupCDocs(node):
name = n.getAttribute('id') name = n.getAttribute('id')
docC = prepCDocs(n); docC = prepCDocs(n);
if type != 'obsolete': if type != 'obsolete':
print " doc->add(" print(" doc->add(")
print " \"%s\"," % (name) print(" \"%s\"," % (name))
rng = len(docC) rng = len(docC)
for i in range(rng): for i in range(rng):
line = docC[i] line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1 if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line) print(" \"%s\\n\"" % (line))
else: else:
print " \"%s\"" % (line) print(" \"%s\"" % (line))
print " );" print(" );")
def parseOptionDoc(node, first): def parseOptionDoc(node, first):
# Handling part for documentation # Handling part for documentation
...@@ -388,52 +387,52 @@ def parseOptionDoc(node, first): ...@@ -388,52 +387,52 @@ def parseOptionDoc(node, first):
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
doc += parseDocs(n) doc += parseDocs(n)
if (first): if (first):
print " \\anchor cfg_%s" % (name.lower()) print(" \\anchor cfg_%s" % (name.lower()))
print "<dl>" print("<dl>")
print "" print("")
print "<dt>\\c %s <dd>" % (name) print("<dt>\\c %s <dd>" % (name))
else: else:
print " \\anchor cfg_%s" % (name.lower()) print(" \\anchor cfg_%s" % (name.lower()))
print "<dt>\\c %s <dd>" % (name) print("<dt>\\c %s <dd>" % (name))
print " \\addindex %s" % (name) print(" \\addindex %s" % (name))
print doc print(doc)
if (type == 'enum'): if (type == 'enum'):
values = collectValues(node) values = collectValues(node)
print "" print("")
print "Possible values are: " print("Possible values are: ")
rng = len(values) rng = len(values)
for i in range(rng): for i in range(rng):
val = values[i] val = values[i]
if i == rng - 2: if i == rng - 2:
print "%s and " % (val) print("%s and " % (val))
elif i == rng - 1: elif i == rng - 1:
print "%s." % (val) print("%s." % (val))
else: else:
print "%s, " % (val) print("%s, " % (val))
if (defval != ""): if (defval != ""):
print "" print("")
print "" print("")
print "The default value is: <code>%s</code>." % (defval) print("The default value is: <code>%s</code>." % (defval))
print "" print("")
elif (type == 'int'): elif (type == 'int'):
minval = node.getAttribute('minval') minval = node.getAttribute('minval')
maxval = node.getAttribute('maxval') maxval = node.getAttribute('maxval')
print "" print("")
print "" print("")
print "%s: %s%s%s, %s: %s%s%s, %s: %s%s%s." % ( print("%s: %s%s%s, %s: %s%s%s, %s: %s%s%s." % (
" Minimum value", "<code>", minval, "</code>", " Minimum value", "<code>", minval, "</code>",
"maximum value", "<code>", maxval, "</code>", "maximum value", "<code>", maxval, "</code>",
"default value", "<code>", defval, "</code>") "default value", "<code>", defval, "</code>"))
print "" print("")
elif (type == 'bool'): elif (type == 'bool'):
print "" print("")
print "" print("")
if (node.hasAttribute('altdefval')): if (node.hasAttribute('altdefval')):
print "The default value is: system dependent." print("The default value is: system dependent.")
else: else:
print "The default value is: <code>%s</code>." % ( print("The default value is: <code>%s</code>." % (
"YES" if (defval == "1") else "NO") "YES" if (defval == "1") else "NO"))
print "" print("")
elif (type == 'list'): elif (type == 'list'):
if format == 'string': if format == 'string':
values = collectValues(node) values = collectValues(node)
...@@ -441,67 +440,67 @@ def parseOptionDoc(node, first): ...@@ -441,67 +440,67 @@ def parseOptionDoc(node, first):
for i in range(rng): for i in range(rng):
val = values[i] val = values[i]
if i == rng - 2: if i == rng - 2:
print "%s and " % (val) print("%s and " % (val))
elif i == rng - 1: elif i == rng - 1:
print "%s." % (val) print("%s." % (val))
else: else:
print "%s, " % (val) print("%s, " % (val))
print "" print("")
elif (type == 'string'): elif (type == 'string'):
if format == 'dir': if format == 'dir':
if defval != '': if defval != '':
print "" print("")
print "The default directory is: <code>%s</code>." % ( print("The default directory is: <code>%s</code>." % (
defval) defval))
elif format == 'file': elif format == 'file':
abspath = node.getAttribute('abspath') abspath = node.getAttribute('abspath')
if defval != '': if defval != '':
print "" print("")
if abspath != '1': if abspath != '1':
print "The default file is: <code>%s</code>." % ( print("The default file is: <code>%s</code>." % (
defval) defval))
else: else:
print "%s: %s%s%s." % ( print("%s: %s%s%s." % (
"The default file (with absolute path) is", "The default file (with absolute path) is",
"<code>",defval,"</code>") "<code>",defval,"</code>"))
else: else:
if abspath == '1': if abspath == '1':
print "" print("")
print "The file has to be specified with full path." print("The file has to be specified with full path.")
elif file =='image': elif format =='image':
abspath = node.getAttribute('abspath') abspath = node.getAttribute('abspath')
if defval != '': if defval != '':
print "" print("")
if abspath != '1': if abspath != '1':
print "The default image is: <code>%s</code>." % ( print("The default image is: <code>%s</code>." % (
defval) defval))
else: else:
print "%s: %s%s%s." % ( print("%s: %s%s%s." % (
"The default image (with absolute path) is", "The default image (with absolute path) is",
"<code>",defval,"</code>") "<code>",defval,"</code>"))
else: else:
if abspath == '1': if abspath == '1':
print "" print("")
print "The image has to be specified with full path." print("The image has to be specified with full path.")
else: # format == 'string': else: # format == 'string':
if defval != '': if defval != '':
print "" print("")
print "The default value is: <code>%s</code>." % ( print("The default value is: <code>%s</code>." % (
defval) defval))
print "" print("")
# depends handling # depends handling
if (node.hasAttribute('depends')): if (node.hasAttribute('depends')):
depends = node.getAttribute('depends') depends = node.getAttribute('depends')
print "" print("")
print "%s \\ref cfg_%s \"%s\" is set to \\c YES." % ( print("%s \\ref cfg_%s \"%s\" is set to \\c YES." % (
"This tag requires that the tag", depends.lower(), depends.upper()) "This tag requires that the tag", depends.lower(), depends.upper()))
return False return False
def parseGroupsDoc(node): def parseGroupsDoc(node):
name = node.getAttribute('name') name = node.getAttribute('name')
doc = node.getAttribute('docs') doc = node.getAttribute('docs')
print "\section config_%s %s" % (name.lower(), doc) print("\section config_%s %s" % (name.lower(), doc))
# Start of list has been moved to the first option for better # Start of list has been moved to the first option for better
# anchor placement # anchor placement
# print "<dl>" # print "<dl>"
...@@ -511,7 +510,7 @@ def parseGroupsDoc(node): ...@@ -511,7 +510,7 @@ def parseGroupsDoc(node):
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
first = parseOptionDoc(n, first) first = parseOptionDoc(n, first)
if (not first): if (not first):
print "</dl>" print("</dl>")
def parseGroupsList(node, commandsList): def parseGroupsList(node, commandsList):
...@@ -542,7 +541,7 @@ def parseHeaderDoc(node): ...@@ -542,7 +541,7 @@ def parseHeaderDoc(node):
if (n.nodeName == "docs"): if (n.nodeName == "docs"):
if (n.getAttribute('documentation') != "0"): if (n.getAttribute('documentation') != "0"):
doc += parseDocs(n) doc += parseDocs(n)
print doc print(doc)
def parseFooterDoc(node): def parseFooterDoc(node):
...@@ -552,7 +551,7 @@ def parseFooterDoc(node): ...@@ -552,7 +551,7 @@ def parseFooterDoc(node):
if (n.nodeName == "docs"): if (n.nodeName == "docs"):
if (n.getAttribute('documentation') != "0"): if (n.getAttribute('documentation') != "0"):
doc += parseDocs(n) doc += parseDocs(n)
print doc print(doc)
def main(): def main():
...@@ -561,16 +560,17 @@ def main(): ...@@ -561,16 +560,17 @@ def main():
try: try:
doc = xml.dom.minidom.parse(sys.argv[2]) doc = xml.dom.minidom.parse(sys.argv[2])
except Exception as inst: except Exception as inst:
print >> sys.stderr sys.stdout = sys.stderr
print >> sys.stderr, inst print("")
print >> sys.stderr print(inst)
print("")
sys.exit(1) sys.exit(1)
elem = doc.documentElement elem = doc.documentElement
if (sys.argv[1] == "-doc"): if (sys.argv[1] == "-doc"):
print "/* WARNING: This file is generated!" print("/* WARNING: This file is generated!")
print " * Do not edit this file, but edit config.xml instead and run" print(" * Do not edit this file, but edit config.xml instead and run")
print " * python configgen.py -doc config.xml to regenerate this file!" print(" * python configgen.py -doc config.xml to regenerate this file!")
print " */" print(" */")
# process header # process header
for n in elem.childNodes: for n in elem.childNodes:
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
...@@ -582,10 +582,10 @@ def main(): ...@@ -582,10 +582,10 @@ def main():
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
if (n.nodeName == "group"): if (n.nodeName == "group"):
commandsList = parseGroupsList(n, commandsList) commandsList = parseGroupsList(n, commandsList)
print "\\secreflist" print("\\secreflist")
for x in sorted(commandsList): for x in sorted(commandsList):
print "\\refitem cfg_%s %s" % (x.lower(), x) print("\\refitem cfg_%s %s" % (x.lower(), x))
print "\\endsecreflist" print("\\endsecreflist")
# process groups and options # process groups and options
for n in elem.childNodes: for n in elem.childNodes:
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
...@@ -597,24 +597,24 @@ def main(): ...@@ -597,24 +597,24 @@ def main():
if (n.nodeName == "footer"): if (n.nodeName == "footer"):
parseFooterDoc(n) parseFooterDoc(n)
elif (sys.argv[1] == "-cpp"): elif (sys.argv[1] == "-cpp"):
print "/* WARNING: This file is generated!" print("/* WARNING: This file is generated!")
print " * Do not edit this file, but edit config.xml instead and run" print(" * Do not edit this file, but edit config.xml instead and run")
print " * python configgen.py -cpp config.xml to regenerate this file!" print(" * python configgen.py -cpp config.xml to regenerate this file!")
print " */" print(" */")
print "" print("")
print "#include \"configoptions.h\"" print("#include \"configoptions.h\"")
print "#include \"config.h\"" print("#include \"config.h\"")
print "#include \"portable.h\"" print("#include \"portable.h\"")
print "#include \"settings.h\"" print("#include \"settings.h\"")
print "" print("")
print "void addConfigOptions(Config *cfg)" print("void addConfigOptions(Config *cfg)")
print "{" print("{")
print " ConfigString *cs;" print(" ConfigString *cs;")
print " ConfigEnum *ce;" print(" ConfigEnum *ce;")
print " ConfigList *cl;" print(" ConfigList *cl;")
print " ConfigInt *ci;" print(" ConfigInt *ci;")
print " ConfigBool *cb;" print(" ConfigBool *cb;")
print "" print("")
# process header # process header
for n in elem.childNodes: for n in elem.childNodes:
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
...@@ -624,17 +624,17 @@ def main(): ...@@ -624,17 +624,17 @@ def main():
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
if (n.nodeName == "group"): if (n.nodeName == "group"):
parseGroups(n) parseGroups(n)
print "}" print("}")
elif (sys.argv[1] == "-wiz"): elif (sys.argv[1] == "-wiz"):
print "/* WARNING: This file is generated!" print("/* WARNING: This file is generated!")
print " * Do not edit this file, but edit config.xml instead and run" print(" * Do not edit this file, but edit config.xml instead and run")
print " * python configgen.py -wiz config.xml to regenerate this file!" print(" * python configgen.py -wiz config.xml to regenerate this file!")
print " */" print(" */")
print "#include \"configdoc.h\"" print("#include \"configdoc.h\"")
print "#include \"docintf.h\"" print("#include \"docintf.h\"")
print "" print("")
print "void addConfigDocs(DocIntf *doc)" print("void addConfigDocs(DocIntf *doc)")
print "{" print("{")
for n in elem.childNodes: for n in elem.childNodes:
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
if (n.nodeName == "header"): if (n.nodeName == "header"):
...@@ -643,7 +643,7 @@ def main(): ...@@ -643,7 +643,7 @@ def main():
if n.nodeType == Node.ELEMENT_NODE: if n.nodeType == Node.ELEMENT_NODE:
if (n.nodeName == "group"): if (n.nodeName == "group"):
parseGroupCDocs(n) parseGroupCDocs(n)
print "}" print("}")
if __name__ == '__main__': if __name__ == '__main__':
main() main()
...@@ -2,7 +2,7 @@ import sys ...@@ -2,7 +2,7 @@ import sys
if (len(sys.argv) > 1): if (len(sys.argv) > 1):
if (sys.argv[1] == "ENONLY"): if (sys.argv[1] == "ENONLY"):
print "#define ENGLISH_ONLY" print("#define ENGLISH_ONLY")
else: else:
for x in xrange(1, len(sys.argv)): for x in range(1, len(sys.argv)):
print "#define LANG_%s"%(sys.argv[x]) print("#define LANG_%s"%(sys.argv[x]))
...@@ -15,7 +15,7 @@ for f in files: ...@@ -15,7 +15,7 @@ for f in files:
# generating file is lang_cfg.py # generating file is lang_cfg.py
# the rules file has to output lang_cfg.h # the rules file has to output lang_cfg.h
# #
print """\ print("""\
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<VisualStudioToolFile <VisualStudioToolFile
Name="languages" Name="languages"
...@@ -52,7 +52,7 @@ print """\ ...@@ -52,7 +52,7 @@ print """\
/> />
</Values> </Values>
</EnumProperty> </EnumProperty>
""" """)
# #
# generate loop, English is mandatory (so cannot be chosen) # generate loop, English is mandatory (so cannot be chosen)
# #
...@@ -76,7 +76,7 @@ for f in new_list: ...@@ -76,7 +76,7 @@ for f in new_list:
l1 = l.replace("-","") l1 = l.replace("-","")
# capatalize first letter # capatalize first letter
l = l.title() l = l.title()
print """\ print("""\
<EnumProperty <EnumProperty
Name="%s" Name="%s"
DisplayName="Use %s" DisplayName="Use %s"
...@@ -96,11 +96,11 @@ for f in new_list: ...@@ -96,11 +96,11 @@ for f in new_list:
/> />
</Values> </Values>
</EnumProperty> </EnumProperty>
""" % (l1, l, l, l, f[1], l) """ % (l1, l, l, l, f[1], l))
print """\ print("""\
</Properties> </Properties>
</CustomBuildRule> </CustomBuildRule>
</Rules> </Rules>
</VisualStudioToolFile> </VisualStudioToolFile>
""" """)
...@@ -290,6 +290,7 @@ static void writeDefaultHeaderPart1(FTextStream &t) ...@@ -290,6 +290,7 @@ static void writeDefaultHeaderPart1(FTextStream &t)
// Load required packages // Load required packages
t << "% Packages required by doxygen\n" t << "% Packages required by doxygen\n"
"\\usepackage{fixltx2e}\n" // for \textsubscript
"\\usepackage{calc}\n" "\\usepackage{calc}\n"
"\\usepackage{doxygen}\n" "\\usepackage{doxygen}\n"
"\\usepackage{graphicx}\n" "\\usepackage{graphicx}\n"
...@@ -297,7 +298,6 @@ static void writeDefaultHeaderPart1(FTextStream &t) ...@@ -297,7 +298,6 @@ static void writeDefaultHeaderPart1(FTextStream &t)
"\\usepackage{makeidx}\n" "\\usepackage{makeidx}\n"
"\\usepackage{multicol}\n" "\\usepackage{multicol}\n"
"\\usepackage{multirow}\n" "\\usepackage{multirow}\n"
"\\usepackage{fixltx2e}\n" // for \textsubscript
"\\PassOptionsToPackage{warn}{textcomp}\n" "\\PassOptionsToPackage{warn}{textcomp}\n"
"\\usepackage{textcomp}\n" "\\usepackage{textcomp}\n"
"\\usepackage[nointegrals]{wasysym}\n" "\\usepackage[nointegrals]{wasysym}\n"
......
#if defined(__APPLE__) || defined(macintosh)
// define this before including iconv.h to avoid a mapping of
// iconv_open and friends to libicon_open (done by mac ports),
// while the symbols without 'lib' are linked from /usr/lib/libiconv
#define LIBICONV_PLUG
#endif
#include <iconv.h> #include <iconv.h>
// These functions are implemented in a C file, because there are different // These functions are implemented in a C file, because there are different
......
...@@ -6675,6 +6675,18 @@ static void parseCompounds(Entry *rt) ...@@ -6675,6 +6675,18 @@ static void parseCompounds(Entry *rt)
current = new Entry; current = new Entry;
gstat = FALSE; gstat = FALSE;
initEntry(); initEntry();
// deep copy group list from parent (see bug 727732)
if (rt->groups)
{
QListIterator<Grouping> gli(*rt->groups);
Grouping *g;
for (;(g=gli.current());++gli)
{
ce->groups->append(new Grouping(*g));
}
}
int ni=ce->name.findRev("::"); if (ni==-1) ni=0; else ni+=2; int ni=ce->name.findRev("::"); if (ni==-1) ni=0; else ni+=2;
// set default protection based on the compound type // set default protection based on the compound type
if( ce->section==Entry::CLASS_SEC ) // class if( ce->section==Entry::CLASS_SEC ) // class
......
...@@ -1798,6 +1798,7 @@ nextChar: ...@@ -1798,6 +1798,7 @@ nextChar:
) )
) )
{ {
if (c=='\t') c=' ';
if (c=='*' || c=='&' || c=='@' || c=='$') if (c=='*' || c=='&' || c=='@' || c=='$')
{ {
//uint rl=result.length(); //uint rl=result.length();
...@@ -1827,8 +1828,8 @@ nextChar: ...@@ -1827,8 +1828,8 @@ nextChar:
} }
} }
} }
//printf("removeRedundantWhiteSpace(`%s')=`%s'\n",s.data(),result.data());
growBuf.addChar(0); growBuf.addChar(0);
//printf("removeRedundantWhiteSpace(`%s')=`%s'\n",s.data(),growBuf.get());
//result.resize(resultPos); //result.resize(resultPos);
return growBuf.get(); return growBuf.get();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment