Commit 962ad74f authored by Petr Prikryl's avatar Petr Prikryl

Merge branch 'master' into devel

parents 39bdafef 8cac977d
......@@ -167,7 +167,7 @@ void InputString::setEnabled(bool state)
{
m_lab->setEnabled(state);
if (m_le) m_le->setEnabled(state);
if (m_im) m_le->setEnabled(state);
if (m_im) m_im->setEnabled(state);
if (m_br) m_br->setEnabled(state);
if (m_com) m_com->setEnabled(state);
updateDefault();
......
......@@ -594,9 +594,10 @@ fi
# - check for python ----------------------------------------------------------
python_version=0
printf " Checking for python... "
if test "$f_python" = NO; then
python_names="python2 python"
python_names="python3 python2 python"
python_dirs="$bin_dirs /usr/bin /usr/local/bin /bin /sbin"
python_prog=NO
python_found=NO
......@@ -604,9 +605,16 @@ if test "$f_python" = NO; then
for j in $python_dirs; do
if test -x "$j/$i"; then
python_found=YES
if test `$j/$i -c "import sys; print sys.version_info[0]"` = 2; then
python_prog="$j/$i"
if test `$j/$i -c "import sys; print(sys.version_info[0])"` = 3; then
python_prog="$j/$i";
python_version=`$j/$i -c "import platform; print(platform.python_version())"`;
break 2
elif test `$j/$i -c "import sys; print(sys.version_info[0])"` = 2; then
if test `$j/$i -c "import sys; print(sys.version_info[1])"` -ge 6; then
python_prog="$j/$i";
python_version=`$j/$i -c "import platform; print(platform.python_version())"`;
break 2
fi
fi
fi
done
......@@ -616,14 +624,14 @@ fi
if test "$f_python" = NO; then
if test "$python_found" = YES; then
echo "version should be python 2."
echo "version should be python 2.6 or higher."
else
echo "not found!";
fi
echo
exit 2
fi
echo "using $f_python";
echo "using $f_python (version $python_version)";
# - check for perl ------------------------------------------------------------
......
......@@ -556,6 +556,7 @@ open-source tools:
<li>GNU bison version 2.5 (Linux) and 2.3 (MacOSX)
<li>GNU make version 3.81
<li>Perl version 5.12
<li>Python verion 2.7 and 3.4
<li>TeX Live 2009 (or later)
</ul>
......
......@@ -66,11 +66,31 @@
of translators introduced.
"""
from __future__ import generators
import codecs
import os
import platform
import re
import sys
import textwrap
def xopen(fname, mode='r', encoding='utf-8-sig'):
'''Unified open of text files with UTF-8 default encoding.
The 'utf-8-sig' skips the BOM automatically.
'''
# Use UTF-8 without BOM when writing to a text file.
if encoding == 'utf-8-sig' and mode == 'w':
encoding = 'utf-8'
major, minor, patch = (int(e) for e in platform.python_version_tuple())
if major == 2:
if mode == 'w':
mode = 'wU'
import codecs
return codecs.open(fname, mode=mode, encoding=encoding) # Python 2
else:
return open(fname, mode=mode, encoding=encoding) # Python 3
def fill(s):
......@@ -99,58 +119,6 @@ def fill(s):
return '\n'.join(lines)
# The following function dedent() is the verbatim copy from the textwrap.py
# module. The textwrap.py was introduced in Python 2.3. To make this script
# working also in older Python versions, I have decided to copy it.
# Notice that the textwrap.py is copyrighted:
#
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
#
# The explicit permission to use the code here was sent by Guido van Rossum
# (4th June, 2004).
#
def dedent(text):
"""dedent(text : string) -> string
Remove any whitespace than can be uniformly removed from the left
of every line in `text`.
This can be used e.g. to make triple-quoted strings line up with
the left edge of screen/whatever, while still presenting it in the
source code in indented form.
For example:
def test():
# end first line with \ to avoid the empty line!
s = '''\
hello
world
'''
print repr(s) # prints ' hello\n world\n '
print repr(dedent(s)) # prints 'hello\n world\n'
"""
lines = text.expandtabs().split('\n')
margin = None
for line in lines:
content = line.lstrip()
if not content:
continue
indent = len(line) - len(content)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if margin is not None and margin > 0:
for i in range(len(lines)):
lines[i] = lines[i][margin:]
return '\n'.join(lines)
class Transl:
"""One instance is build for each translator.
......@@ -237,7 +205,7 @@ class Transl:
# Open the file for reading and extracting tokens until the eof.
# Initialize the finite automaton.
f = open(self.fname)
f = xopen(self.fname)
lineNo = 0
line = '' # init -- see the pos initialization below
linelen = 0 # init
......@@ -256,8 +224,6 @@ class Transl:
else:
lineNo += 1
line = f.readline()
if line.startswith('\xef\xbb\xbf'):
line = line[3:] # skip the BOM
linelen = len(line)
pos = 0
if line == '': # eof
......@@ -276,7 +242,7 @@ class Transl:
# If it is an unknown item, it can still be recognized
# here. Keywords and separators are the example.
if tokenId == 'unknown':
if tokenDic.has_key(tokenStr):
if tokenStr in tokenDic:
tokenId = tokenDic[tokenStr]
elif tokenStr.isdigit():
tokenId = 'num'
......@@ -329,7 +295,7 @@ class Transl:
tokenStr = c
tokenLineNo = lineNo
status = 8
elif tokenDic.has_key(c): # known one-char token
elif c in tokenDic: # known one-char token
tokenId = tokenDic[c]
tokenStr = c
tokenLineNo = lineNo
......@@ -424,7 +390,7 @@ class Transl:
if c.isspace():
pos += 1
status = 0 # tokenId may be determined later
elif tokenDic.has_key(c): # separator, don't move pos
elif c in tokenDic: # separator, don't move pos
status = 0
else:
tokenStr += c # collect
......@@ -457,7 +423,7 @@ class Transl:
# Always assume that the previous tokens were processed. Get
# the next one.
tokenId, tokenStr, tokenLineNo = tokenIterator.next()
tokenId, tokenStr, tokenLineNo = next(tokenIterator)
# Process the token and never return back.
if status == 0: # waiting for the 'class' keyword.
......@@ -588,7 +554,7 @@ class Transl:
while status != 777:
# Get the next token.
tokenId, tokenStr, tokenLineNo = tokenIterator.next()
tokenId, tokenStr, tokenLineNo = next(tokenIterator)
if status == 0: # waiting for 'public:'
if tokenId == 'public':
......@@ -670,7 +636,7 @@ class Transl:
elif status == 9: # after semicolon, produce the dic item
if tokenId == 'semic':
assert(not resultDic.has_key(uniPrototype))
assert(uniPrototype not in resultDic)
resultDic[uniPrototype] = prototype
status = 2
else:
......@@ -752,7 +718,7 @@ class Transl:
# Eat the rest of the source to cause closing the file.
while tokenId != 'eof':
tokenId, tokenStr, tokenLineNo = tokenIterator.next()
tokenId, tokenStr, tokenLineNo = next(tokenIterator)
# Return the resulting dictionary with 'uniPrototype -> prototype'.
return resultDic
......@@ -800,7 +766,7 @@ class Transl:
while status != 777:
# Get the next token.
tokenId, tokenStr, tokenLineNo = tokenIterator.next()
tokenId, tokenStr, tokenLineNo = next(tokenIterator)
if status == 0: # waiting for 'public:'
if tokenId == 'public':
......@@ -912,7 +878,7 @@ class Transl:
sys.stderr.write(msg)
assert False
assert(not self.prototypeDic.has_key(uniPrototype))
assert(uniPrototype not in self.prototypeDic)
# Insert new dictionary item.
self.prototypeDic[uniPrototype] = prototype
status = 2 # body consumed
......@@ -1056,12 +1022,12 @@ class Transl:
# For the required methods, update the dictionary of methods
# implemented by the adapter.
for protoUni in self.prototypeDic:
if reqDic.has_key(protoUni):
if protoUni in reqDic:
# This required method will be marked as implemented
# by this adapter class. This implementation assumes
# that newer adapters do not reimplement any required
# methods already implemented by older adapters.
assert(not adaptDic.has_key(protoUni))
assert(protoUni not in adaptDic)
adaptDic[protoUni] = (version, self.classId)
# Clear the dictionary object and the information related
......@@ -1094,7 +1060,7 @@ class Transl:
# Eat the rest of the source to cause closing the file.
while True:
try:
t = tokenIterator.next()
t = next(tokenIterator)
except StopIteration:
break
......@@ -1106,7 +1072,7 @@ class Transl:
# Build the list of obsolete methods.
self.obsoleteMethods = []
for p in myDic:
if not reqDic.has_key(p):
if p not in reqDic:
self.obsoleteMethods.append(p)
# Build the list of missing methods and the list of implemented
......@@ -1114,7 +1080,7 @@ class Transl:
self.missingMethods = []
self.implementedMethods = []
for p in reqDic:
if myDic.has_key(p):
if p in myDic:
self.implementedMethods.append(p)
else:
self.missingMethods.append(p)
......@@ -1133,7 +1099,7 @@ class Transl:
adaptMinVersion = '9.9.99'
adaptMinClass = 'TranslatorAdapter_9_9_99'
for uniProto in self.missingMethods:
if adaptDic.has_key(uniProto):
if uniProto in adaptDic:
version, cls = adaptDic[uniProto]
if version < adaptMinVersion:
adaptMinVersion = version
......@@ -1342,9 +1308,9 @@ class TrManager:
sys.exit(1)
else:
lst = os.listdir(self.src_path)
lst = filter(lambda x: x[:11] == 'translator_'
lst = [x for x in lst if x[:11] == 'translator_'
and x[-2:] == '.h'
and x != 'translator_adapter.h', lst)
and x != 'translator_adapter.h']
# Build the object for the translator_xx.h files, and process the
# content of the file. Then insert the object to the dictionary
......@@ -1366,7 +1332,7 @@ class TrManager:
# Build the auxiliary list with strings compound of the status,
# readable form of the language, and classId.
statLst = []
for obj in self.__translDic.values():
for obj in list(self.__translDic.values()):
assert(obj.classId != 'Translator')
s = obj.status + '|' + obj.langReadable + '|' + obj.classId
statLst.append(s)
......@@ -1384,9 +1350,10 @@ class TrManager:
# Build the list of tuples that contain (langReadable, obj).
# Sort it by readable name.
self.langLst = []
for obj in self.__translDic.values():
for obj in list(self.__translDic.values()):
self.langLst.append((obj.langReadable, obj))
self.langLst.sort(lambda a, b: cmp(a[0], b[0]))
self.langLst.sort(key=lambda x: x[0])
# Create the list with readable language names. If the language has
# also the English-based version, modify the item by appending
......@@ -1400,7 +1367,7 @@ class TrManager:
# of the English-based object. If the object exists, modify the
# name for the readable list of supported languages.
classIdEn = obj.classId + 'En'
if self.__translDic.has_key(classIdEn):
if classIdEn in self.__translDic:
name += ' (+En)'
# Append the result name of the language, possibly with note.
......@@ -1424,16 +1391,16 @@ class TrManager:
for name, obj in self.langLst:
if obj.status == 'En':
classId = obj.classId[:-2]
if self.__translDic.has_key(classId):
if classId in self.__translDic:
self.numLang -= 1 # the couple will be counted as one
# Extract the version of Doxygen.
f = open(os.path.join(self.doxy_path, 'VERSION'))
f = xopen(os.path.join(self.doxy_path, 'VERSION'))
self.doxVersion = f.readline().strip()
f.close()
# Update the last modification time.
for tr in self.__translDic.values():
for tr in list(self.__translDic.values()):
tim = tr.getmtime()
if tim > self.lastModificationTime:
self.lastModificationTime = tim
......@@ -1472,11 +1439,11 @@ class TrManager:
probably used should be checked first and the resulting reduced
dictionary should be used for checking the next files (speed up).
"""
lst_in = dic.keys() # identifiers to be searched for
lst_in = list(dic.keys()) # identifiers to be searched for
# Read content of the file as one string.
assert os.path.isfile(fname)
f = open(fname)
f = xopen(fname)
cont = f.read()
f.close()
......@@ -1497,7 +1464,7 @@ class TrManager:
# Build the dictionary of the required method prototypes with
# method identifiers used as keys.
trdic = {}
for prototype in self.requiredMethodsDic.keys():
for prototype in list(self.requiredMethodsDic.keys()):
ri = prototype.split('(')[0]
identifier = ri.split()[1].strip()
trdic[identifier] = prototype
......@@ -1553,7 +1520,7 @@ class TrManager:
output = os.path.join(self.doc_path, self.translatorReportFileName)
# Open the textual report file for the output.
f = open(output, 'w')
f = xopen(output, 'w')
# Output the information about the version.
f.write('(' + self.doxVersion + ')\n\n')
......@@ -1581,7 +1548,7 @@ class TrManager:
# The e-mail addresses of the maintainers will be collected to
# the auxiliary file in the order of translator classes listed
# in the translator report.
fmail = open('mailto.txt', 'w')
fmail = xopen('mailto.txt', 'w')
# Write the list of "up-to-date" translator classes.
if self.upToDateIdLst:
......@@ -1665,12 +1632,12 @@ class TrManager:
# adapters.
if not self.script_argLst:
to_remove = {}
for version, adaptClassId in self.adaptMethodsDic.values():
for version, adaptClassId in list(self.adaptMethodsDic.values()):
if version < adaptMinVersion:
to_remove[adaptClassId] = True
if to_remove:
lst = to_remove.keys()
lst = list(to_remove.keys())
lst.sort()
plural = len(lst) > 1
note = 'Note: The adapter class'
......@@ -1716,7 +1683,7 @@ class TrManager:
f.write('\n' + '=' * 70 + '\n')
f.write(fill(s) + '\n\n')
keys = dic.keys()
keys = list(dic.keys())
keys.sort()
for key in keys:
f.write(' ' + dic[key] + '\n')
......@@ -1726,7 +1693,7 @@ class TrManager:
f.write('\n' + '=' * 70)
f.write('\nDetails for translators (classes sorted alphabetically):\n')
cls = self.__translDic.keys()
cls = list(self.__translDic.keys())
cls.sort()
for c in cls:
......@@ -1753,7 +1720,7 @@ class TrManager:
self.lastModificationTime = tim
# Process the content of the maintainers file.
f = codecs.open(fname, 'r', 'utf-8')
f = xopen(fname)
inside = False # inside the record for the language
lineReady = True
classId = None
......@@ -1764,28 +1731,28 @@ class TrManager:
lineReady = line != '' # when eof, then line == ''
line = line.strip() # eof should also behave as separator
if line != u'' and line[0] == u'%': # skip the comment line
if line != '' and line[0] == '%': # skip the comment line
continue
if not inside: # if outside of the record
if line != u'': # should be language identifier
if line != '': # should be language identifier
classId = line
maintainersLst = []
inside = True
# Otherwise skip empty line that do not act as separator.
else: # if inside the record
if line == u'': # separator found
if line == '': # separator found
inside = False
else:
# If it is the first maintainer, create the empty list.
if not self.__maintainersDic.has_key(classId):
if classId not in self.__maintainersDic:
self.__maintainersDic[classId] = []
# Split the information about the maintainer and append
# the tuple. The address may be prefixed '[unreachable]'
# or whatever '[xxx]'. This will be processed later.
lst = line.split(u':', 1)
lst = line.split(':', 1)
assert(len(lst) == 2)
t = (lst[0].strip(), lst[1].strip())
self.__maintainersDic[classId].append(t)
......@@ -1817,7 +1784,7 @@ class TrManager:
#
# Read the template of the documentation, and remove the first
# attention lines.
f = codecs.open(fTplName, 'r', 'utf-8')
f = xopen(fTplName)
doctpl = f.read()
f.close()
......@@ -1829,7 +1796,7 @@ class TrManager:
# document template.
tplDic = {}
s = u'Do not edit this file. It was generated by the %s script.\n * Instead edit %s and %s' % (self.script_name, self.languageTplFileName, self.maintainersFileName)
s = u'Do not edit this file. It was generated by the %s script. * Instead edit %s and %s' % (self.script_name, self.languageTplFileName, self.maintainersFileName)
tplDic['editnote'] = s
tplDic['doxVersion'] = self.doxVersion
......@@ -1865,7 +1832,7 @@ class TrManager:
</table>
\\endhtmlonly
'''
htmlTableTpl = dedent(htmlTableTpl)
htmlTableTpl = textwrap.dedent(htmlTableTpl)
htmlTrTpl = u'\n <tr bgcolor="#ffffff">%s\n </tr>'
htmlTdTpl = u'\n <td>%s</td>'
htmlTdStatusColorTpl = u'\n <td bgcolor="%s">%s</td>'
......@@ -1881,7 +1848,7 @@ class TrManager:
if obj.readableStatus.startswith('1.4'):
bkcolor = self.getBgcolorByReadableStatus('1.4')
else:
bkcolor = '#ffffff'
bkcolor = u'#ffffff'
lst = [ htmlTdStatusColorTpl % (bkcolor, obj.langReadable) ]
......@@ -1905,23 +1872,23 @@ class TrManager:
lm = []
for maintainer in self.__maintainersDic[obj.classId]:
name = maintainer[0]
if name.startswith(u'--'):
if name.startswith('--'):
name = u'<span style="color: red; background-color: yellow">'\
+ name + u'</span>'
lm.append(name)
mm = u'<br/>'.join(lm)
mm = '<br/>'.join(lm)
# The marked adresses (they start with the mark '[unreachable]',
# '[resigned]', whatever '[xxx]') will not be displayed at all.
# Only the mark will be used instead.
rexMark = re.compile(ur'(?P<mark>\[.*?\])')
rexMark = re.compile(u'(?P<mark>\\[.*?\\])')
le = []
for maintainer in self.__maintainersDic[obj.classId]:
address = maintainer[1]
m = rexMark.search(address)
if m is not None:
address = u'<span style="color: brown">'\
+ m.group(u'mark') + u'</span>'
+ m.group('mark') + u'</span>'
le.append(address)
ee = u'<br/>'.join(le)
......@@ -1940,7 +1907,7 @@ class TrManager:
htmlTable = htmlTableTpl % (''.join(trlst))
# Define templates for LaTeX table parts of the documentation.
latexTableTpl = ur'''
latexTableTpl = b'''
\latexonly
\footnotesize
\begin{longtable}{|l|l|l|l|}
......@@ -1952,9 +1919,9 @@ class TrManager:
\end{longtable}
\normalsize
\endlatexonly
'''
latexTableTpl = dedent(latexTableTpl)
latexLineTpl = u'\n' + r' %s & %s & {\tt\tiny %s} & %s \\'
'''.decode('utf_8')
latexTableTpl = textwrap.dedent(latexTableTpl)
latexLineTpl = u'\n %s & %s & {\\tt\\tiny %s} & %s \\\\'
# Loop through transl objects in the order of sorted readable names
# and add generate the content of the LaTeX table.
......@@ -1965,7 +1932,7 @@ class TrManager:
# in the table is placed explicitly above the first
# maintainer. Prepare the arguments for the LaTeX row template.
maintainers = []
if self.__maintainersDic.has_key(obj.classId):
if obj.classId in self.__maintainersDic:
maintainers = self.__maintainersDic[obj.classId]
lang = obj.langReadable
......@@ -1976,8 +1943,8 @@ class TrManager:
classId = obj.classId[:-2]
if classId in self.__translDic:
langNE = self.__translDic[classId].langReadable
maintainer = u'see the %s language' % langNE
email = u'~'
maintainer = 'see the %s language' % langNE
email = '~'
if not maintainer and (obj.classId in self.__maintainersDic):
lm = [ m[0] for m in self.__maintainersDic[obj.classId] ]
......@@ -1996,8 +1963,8 @@ class TrManager:
# List the other maintainers for the language. Do not set
# lang and status for them.
lang = u'~'
status = u'~'
lang = '~'
status = '~'
for m in maintainers[1:]:
maintainer = m[0]
email = m[1]
......@@ -2012,14 +1979,20 @@ class TrManager:
tplDic['informationTable'] = htmlTable + u'\n' + latexTable
# Insert the symbols into the document template and write it down.
f = codecs.open(fDocName, 'w', 'utf-8')
f = xopen(fDocName, 'w')
f.write(doctpl % tplDic)
f.close()
if __name__ == '__main__':
# Create the manager, build the transl objects, and parse the related
# sources.
# The Python 2.6+ or 3.3+ is required.
major, minor, patch = (int(e) for e in platfor m.python_version_tuple())
if (major == 2 and minor < 6) or (major == 3 and minor < 3):
print('Python 2.6+ or Python 3.3+ are required for the script')
sys.exit(1)
# The translator manager builds the transl objects, parses the related
# sources, and keeps them in memory.
trMan = TrManager()
# Generate the language.doc.
......
......@@ -11601,7 +11601,9 @@ static inline bool is_arabic(unsigned short x) {
((x >= 0xfb50) && (x <= 0xfdff)) ||
((x >= 0xfe70) && (x <= 0xfeff)));
}
#endif
#ifndef QT_NO_UNICODETABLES
static inline bool is_neutral(unsigned short dir) {
return ((dir == QChar::DirB) ||
(dir == QChar::DirS) ||
......
......@@ -329,44 +329,44 @@ class CallContext
public:
struct Ctx
{
Ctx() : name(g_name), type(g_type), cd(0) {}
Ctx() : name(g_name), type(g_type), d(0) {}
QCString name;
QCString type;
ClassDef *cd;
Definition *d;
};
CallContext()
{
m_classList.append(new Ctx);
m_classList.setAutoDelete(TRUE);
m_defList.append(new Ctx);
m_defList.setAutoDelete(TRUE);
}
virtual ~CallContext() {}
void setClass(ClassDef *cd)
void setScope(Definition *d)
{
Ctx *ctx = m_classList.getLast();
Ctx *ctx = m_defList.getLast();
if (ctx)
{
DBG_CTX((stderr,"** Set call context %s (%p)\n",cd==0 ? "<null>" : cd->name().data(),cd));
ctx->cd=cd;
DBG_CTX((stderr,"** Set call context %s (%p)\n",d==0 ? "<null>" : d->name().data(),d));
ctx->d=d;
}
}
void pushScope()
{
m_classList.append(new Ctx);
DBG_CTX((stderr,"** Push call context %d\n",m_classList.count()));
m_defList.append(new Ctx);
DBG_CTX((stderr,"** Push call context %d\n",m_defList.count()));
}
void popScope()
{
if (m_classList.count()>1)
if (m_defList.count()>1)
{
DBG_CTX((stderr,"** Pop call context %d\n",m_classList.count()));
Ctx *ctx = m_classList.getLast();
DBG_CTX((stderr,"** Pop call context %d\n",m_defList.count()));
Ctx *ctx = m_defList.getLast();
if (ctx)
{
g_name = ctx->name;
g_type = ctx->type;
}
m_classList.removeLast();
m_defList.removeLast();
}
else
{
......@@ -376,17 +376,17 @@ class CallContext
void clear()
{
DBG_CTX((stderr,"** Clear call context\n"));
m_classList.clear();
m_classList.append(new Ctx);
m_defList.clear();
m_defList.append(new Ctx);
}
ClassDef *getClass() const
Definition *getScope() const
{
Ctx *ctx = m_classList.getLast();
if (ctx) return ctx->cd; else return 0;
Ctx *ctx = m_defList.getLast();
if (ctx) return ctx->d; else return 0;
}
private:
QList<Ctx> m_classList;
QList<Ctx> m_defList;
};
static CallContext g_theCallContext;
......@@ -732,7 +732,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
if (md)
{
//printf("name=%s scope=%s\n",locName.data(),scope.data());
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope()));
g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
return md;
}
}
......@@ -745,7 +745,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
if (md)
{
//printf("name=%s scope=%s\n",locName.data(),scope.data());
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope()));
g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
return md;
}
}
......@@ -760,7 +760,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
if (mcd!=VariableContext::dummyContext)
{
DBG_CTX((stderr,"local var `%s' mcd=%s\n",name.data(),mcd->name().data()));
g_theCallContext.setClass(mcd);
g_theCallContext.setScope(mcd);
}
}
else
......@@ -778,7 +778,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
if (g_scopeStack.top()!=CLASSBLOCK)
{
DBG_CTX((stderr,"class member `%s' mcd=%s\n",name.data(),mcd->name().data()));
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope()));
g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
}
return md;
}
......@@ -794,7 +794,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
MemberDef *md=mn->getFirst();
if (!md->isStatic() || md->getBodyDef()==g_sourceFileDef)
{
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope()));
g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
return md;
}
return 0;
......@@ -816,7 +816,7 @@ static MemberDef *setCallContextForVar(const QCString &name)
(g_forceTagReference.isEmpty() || g_forceTagReference==md->getReference())
)
{
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope()));
g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
//printf("returning member %s in source file %s\n",md->name().data(),g_sourceFileDef->name().data());
return md;
}
......@@ -829,15 +829,15 @@ static MemberDef *setCallContextForVar(const QCString &name)
static void updateCallContextForSmartPointer()
{
ClassDef *cd = g_theCallContext.getClass();
//printf("updateCallContextForSmartPointer() cd=%s\n",cd ? cd->name().data() : "<none>");
Definition *d = g_theCallContext.getScope();
//printf("updateCallContextForSmartPointer() cd=%s\n",cd ? d->name().data() : "<none>");
MemberDef *md;
if (cd && (md=cd->isSmartPointer()))
if (d && d->definitionType()==Definition::TypeClass && (md=((ClassDef*)d)->isSmartPointer()))
{
ClassDef *ncd = stripClassName(md->typeString(),md->getOuterScope());
if (ncd)
{
g_theCallContext.setClass(ncd);
g_theCallContext.setScope(ncd);
//printf("Found smart pointer call %s->%s!\n",cd->name().data(),ncd->name().data());
}
}
......@@ -879,7 +879,7 @@ static bool getLinkInScope(const QCString &c, // scope
if (md->getGroupDef()) d = md->getGroupDef();
if (d && d->isLinkable())
{
g_theCallContext.setClass(stripClassName(md->typeString(),md->getOuterScope()));
g_theCallContext.setScope(stripClassName(md->typeString(),md->getOuterScope()));
//printf("g_currentDefinition=%p g_currentMemberDef=%p g_insideBody=%d\n",
// g_currentDefinition,g_currentMemberDef,g_insideBody);
......@@ -965,8 +965,18 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName
cd=getResolvedClass(d,g_sourceFileDef,bareName,&md); // try unspecialized version
}
}
NamespaceDef *nd = getResolvedNamespace(className);
if (nd)
{
g_theCallContext.setScope(nd);
addToSearchIndex(className);
writeMultiLineCodeLink(*g_code,nd,clName);
return;
}
//printf("md=%s\n",md?md->name().data():"<none>");
DBG_CTX((stderr,"is found as a type %s\n",cd?cd->name().data():"<null>"));
DBG_CTX((stderr,"is found as a type cd=%s nd=%s\n",
cd?cd->name().data():"<null>",
nd?nd->name().data():"<null>"));
if (cd==0 && md==0) // also see if it is variable or enum or enum value
{
if (getLink(g_classScope,clName,ol,clName,varOnly))
......@@ -981,7 +991,7 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName
if (lcd!=VariableContext::dummyContext)
{
//printf("non-dummy context lcd=%s!\n",lcd->name().data());
g_theCallContext.setClass(lcd);
g_theCallContext.setScope(lcd);
// to following is needed for links to a global variable, but is
// no good for a link to a local variable that is also a global symbol.
......@@ -1011,7 +1021,7 @@ static void generateClassOrGlobalLink(CodeOutputInterface &ol,const char *clName
}
writeMultiLineCodeLink(ol,cd,clName);
addToSearchIndex(className);
g_theCallContext.setClass(cd);
g_theCallContext.setScope(cd);
if (md)
{
Definition *d = md->getOuterScope()==Doxygen::globalScope ?
......@@ -1111,7 +1121,7 @@ static bool generateClassMemberLink(CodeOutputInterface &ol,MemberDef *xmd,const
ClassDef *typeClass = stripClassName(removeAnonymousScopes(xmd->typeString()),xmd->getOuterScope());
DBG_CTX((stderr,"%s -> typeName=%p\n",xmd->typeString(),typeClass));
g_theCallContext.setClass(typeClass);
g_theCallContext.setScope(typeClass);
Definition *xd = xmd->getOuterScope()==Doxygen::globalScope ?
xmd->getFileDef() : xmd->getOuterScope();
......@@ -1142,18 +1152,42 @@ static bool generateClassMemberLink(CodeOutputInterface &ol,MemberDef *xmd,const
return FALSE;
}
static bool generateClassMemberLink(CodeOutputInterface &ol,ClassDef *mcd,const char *memName)
static bool generateClassMemberLink(CodeOutputInterface &ol,Definition *def,const char *memName)
{
if (mcd)
if (def && def->definitionType()==Definition::TypeClass)
{
MemberDef *xmd = mcd->getMemberByName(memName);
//printf("generateClassMemberLink(class=%s,member=%s)=%p\n",mcd->name().data(),memName,xmd);
ClassDef *cd = (ClassDef*)def;
MemberDef *xmd = cd->getMemberByName(memName);
//printf("generateClassMemberLink(class=%s,member=%s)=%p\n",def->name().data(),memName,xmd);
if (xmd)
{
return generateClassMemberLink(ol,xmd,memName);
}
else
{
Definition *innerDef = cd->findInnerCompound(memName);
if (innerDef)
{
g_theCallContext.setScope(innerDef);
addToSearchIndex(memName);
writeMultiLineCodeLink(*g_code,innerDef,memName);
return TRUE;
}
}
}
else if (def && def->definitionType()==Definition::TypeNamespace)
{
NamespaceDef *nd = (NamespaceDef*)def;
//printf("Looking for %s inside namespace %s\n",memName,nd->name().data());
Definition *innerDef = nd->findInnerCompound(memName);
if (innerDef)
{
g_theCallContext.setScope(innerDef);
addToSearchIndex(memName);
writeMultiLineCodeLink(*g_code,innerDef,memName);
return TRUE;
}
}
return FALSE;
}
......@@ -1743,9 +1777,7 @@ B [ \t]
BN [ \t\n\r]
ID "$"?[a-z_A-Z\x80-\xFF][a-z_A-Z0-9\x80-\xFF]*
SEP ("::"|"\\")
SEPCS (".")
SCOPENAME ({SEP}{BN}*)?({ID}{BN}*{SEP}{BN}*)*("~"{BN}*)?{ID}
SCOPENAMECS ({SEPCS}{BN}*)?({ID}{BN}*{SEPCS}{BN}*)*("~"{BN}*)?{ID}
TEMPLIST "<"[^\"\}\{\(\)\/\n\>]*">"
SCOPETNAME (((({ID}{TEMPLIST}?){BN}*)?{SEP}{BN}*)*)((~{BN}*)?{ID})
SCOPEPREFIX ({ID}{TEMPLIST}?{BN}*{SEP}{BN}*)+
......@@ -2490,20 +2522,6 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
generateClassOrGlobalLink(*g_code,yytext);
g_name+=yytext;
}
<Body>{SCOPENAMECS}/{BN}*[;,)\]] { // "int var;" or "var, var2" or "debug(f) macro"
if (!g_insideCS && !g_insideJava)
{
REJECT;
}
else
{
addType();
// changed this to generateFunctionLink, see bug 624514
//generateClassOrGlobalLink(*g_code,yytext,FALSE,TRUE);
generateFunctionLink(*g_code,yytext);
g_name+=yytext;
}
}
<Body>{SCOPENAME}/{BN}*[;,)\]] { // "int var;" or "var, var2" or "debug(f) macro"
addType();
// changed this to generateFunctionLink, see bug 624514
......@@ -2511,18 +2529,6 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
generateFunctionLink(*g_code,yytext);
g_name+=yytext;
}
<Body>{SCOPENAMECS}/{B}* { // p->func()
if (!g_insideCS && !g_insideJava)
{
REJECT;
}
else
{
addType();
generateClassOrGlobalLink(*g_code,yytext);
g_name+=yytext;
}
}
<Body>{SCOPENAME}/{B}* { // p->func()
addType();
generateClassOrGlobalLink(*g_code,yytext);
......@@ -2657,9 +2663,9 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
BEGIN( MemberCall );
}
<MemberCall>{SCOPETNAME}/{BN}*"(" {
if (g_theCallContext.getClass())
if (g_theCallContext.getScope())
{
if (!generateClassMemberLink(*g_code,g_theCallContext.getClass(),yytext))
if (!generateClassMemberLink(*g_code,g_theCallContext.getScope(),yytext))
{
g_code->codify(yytext);
addToSearchIndex(yytext);
......@@ -2684,10 +2690,10 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
}
}
<MemberCall>{SCOPENAME}/{B}* {
if (g_theCallContext.getClass())
if (g_theCallContext.getScope())
{
DBG_CTX((stderr,"g_theCallContext.getClass()=%p\n",g_theCallContext.getClass()));
if (!generateClassMemberLink(*g_code,g_theCallContext.getClass(),yytext))
DBG_CTX((stderr,"g_theCallContext.getClass()=%p\n",g_theCallContext.getScope()));
if (!generateClassMemberLink(*g_code,g_theCallContext.getScope(),yytext))
{
g_code->codify(yytext);
addToSearchIndex(yytext);
......@@ -2733,7 +2739,7 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
if (*yytext!='[' && !g_type.isEmpty())
{
//printf("g_scopeStack.bottom()=%p\n",g_scopeStack.bottom());
if (g_scopeStack.top()!=CLASSBLOCK)
//if (g_scopeStack.top()!=CLASSBLOCK) // commented out for bug731363
{
//printf("AddVariable: '%s' '%s' context=%d\n",
// g_type.data(),g_name.data(),g_theVarContext.count());
......@@ -3008,7 +3014,7 @@ RAWEND ")"[^ \t\(\)\\]{0,16}\"
g_theVarContext.addVariable(g_type,g_name);
}
g_parmType.resize(0);g_parmName.resize(0);
g_theCallContext.setClass(0);
g_theCallContext.setScope(0);
if (*yytext==';' || g_insideBody)
{
if (!g_insideBody)
......
......@@ -3076,7 +3076,7 @@ to be found in the default search path.
<option type='string' id='DOT_FONTNAME' format='string' defval='Helvetica' depends='HAVE_DOT'>
<docs>
<![CDATA[
When you want a differently looking font n the dot files that doxygen generates
When you want a differently looking font in the dot files that doxygen generates
you can specify the font name
using \c DOT_FONTNAME. You need to make sure dot is able to find the font,
which can be done by putting it in a standard location or by setting the
......
......@@ -18,7 +18,6 @@ import re
import textwrap
from xml.dom import minidom, Node
def transformDocs(doc):
# join lines, unless it is an empty line
# remove doxygen layout constructs
......@@ -112,7 +111,7 @@ def addValues(var, node):
if (n.nodeName == "value"):
if n.nodeType == Node.ELEMENT_NODE:
name = n.getAttribute('name')
print " %s->addValue(\"%s\");" % (var, name)
print(" %s->addValue(\"%s\");" % (var, name))
def parseHeader(node,objName):
......@@ -123,15 +122,15 @@ def parseHeader(node,objName):
if (n.getAttribute('doxyfile') != "0"):
doc += parseDocs(n)
docC = transformDocs(doc)
print " %s->setHeader(" % (objName)
print(" %s->setHeader(" % (objName))
rng = len(docC)
for i in range(rng):
line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line)
print(" \"%s\\n\"" % (line))
else:
print " \"%s\"" % (line)
print " );"
print(" \"%s\"" % (line))
print(" );")
def prepCDocs(node):
......@@ -201,7 +200,7 @@ def prepCDocs(node):
else:
if abspath == '1':
doc += "<br/>The file has to be specified with full path."
elif file =='image':
elif format =='image':
abspath = node.getAttribute('abspath')
if defval != '':
if abspath != '1':
......@@ -238,8 +237,8 @@ def parseOption(node):
setting = node.getAttribute('setting')
docC = prepCDocs(node);
if len(setting) > 0:
print "#if %s" % (setting)
print " //----"
print("#if %s" % (setting))
print(" //----")
if type == 'bool':
if len(adefval) > 0:
enabled = adefval
......@@ -247,108 +246,108 @@ def parseOption(node):
enabled = "TRUE"
else:
enabled = "FALSE"
print " cb = cfg->addBool("
print " \"%s\"," % (name)
print(" cb = cfg->addBool(")
print(" \"%s\"," % (name))
rng = len(docC)
for i in range(rng):
line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line)
print(" \"%s\\n\"" % (line))
else:
print " \"%s\"," % (line)
print " %s" % (enabled)
print " );"
print(" \"%s\"," % (line))
print(" %s" % (enabled))
print(" );")
if depends != '':
print " cb->addDependency(\"%s\");" % (depends)
print(" cb->addDependency(\"%s\");" % (depends))
elif type == 'string':
print " cs = cfg->addString("
print " \"%s\"," % (name)
print(" cs = cfg->addString(")
print(" \"%s\"," % (name))
rng = len(docC)
for i in range(rng):
line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line)
print(" \"%s\\n\"" % (line))
else:
print " \"%s\"" % (line)
print " );"
print(" \"%s\"" % (line))
print(" );")
if defval != '':
print " cs->setDefaultValue(\"%s\");" % (defval)
print(" cs->setDefaultValue(\"%s\");" % (defval))
if format == 'file':
print " cs->setWidgetType(ConfigString::File);"
print(" cs->setWidgetType(ConfigString::File);")
elif format == 'image':
print " cs->setWidgetType(ConfigString::Image);"
print(" cs->setWidgetType(ConfigString::Image);")
elif format == 'dir':
print " cs->setWidgetType(ConfigString::Dir);"
print(" cs->setWidgetType(ConfigString::Dir);")
if depends != '':
print " cs->addDependency(\"%s\");" % (depends)
print(" cs->addDependency(\"%s\");" % (depends))
elif type == 'enum':
print " ce = cfg->addEnum("
print " \"%s\"," % (name)
print(" ce = cfg->addEnum(")
print(" \"%s\"," % (name))
rng = len(docC)
for i in range(rng):
line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line)
print(" \"%s\\n\"" % (line))
else:
print " \"%s\"," % (line)
print " \"%s\"" % (defval)
print " );"
print(" \"%s\"," % (line))
print(" \"%s\"" % (defval))
print(" );")
addValues("ce", node)
if depends != '':
print " ce->addDependency(\"%s\");" % (depends)
print(" ce->addDependency(\"%s\");" % (depends))
elif type == 'int':
minval = node.getAttribute('minval')
maxval = node.getAttribute('maxval')
print " ci = cfg->addInt("
print " \"%s\"," % (name)
print(" ci = cfg->addInt(")
print(" \"%s\"," % (name))
rng = len(docC)
for i in range(rng):
line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line)
print(" \"%s\\n\"" % (line))
else:
print " \"%s\"," % (line)
print " %s,%s,%s" % (minval, maxval, defval)
print " );"
print(" \"%s\"," % (line))
print(" %s,%s,%s" % (minval, maxval, defval))
print(" );")
if depends != '':
print " ci->addDependency(\"%s\");" % (depends)
print(" ci->addDependency(\"%s\");" % (depends))
elif type == 'list':
print " cl = cfg->addList("
print " \"%s\"," % (name)
print(" cl = cfg->addList(")
print(" \"%s\"," % (name))
rng = len(docC)
for i in range(rng):
line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line)
print(" \"%s\\n\"" % (line))
else:
print " \"%s\"" % (line)
print " );"
print(" \"%s\"" % (line))
print(" );")
addValues("cl", node)
if depends != '':
print " cl->addDependency(\"%s\");" % (depends)
print(" cl->addDependency(\"%s\");" % (depends))
if format == 'file':
print " cl->setWidgetType(ConfigList::File);"
print(" cl->setWidgetType(ConfigList::File);")
elif format == 'dir':
print " cl->setWidgetType(ConfigList::Dir);"
print(" cl->setWidgetType(ConfigList::Dir);")
elif format == 'filedir':
print " cl->setWidgetType(ConfigList::FileAndDir);"
print(" cl->setWidgetType(ConfigList::FileAndDir);")
elif type == 'obsolete':
print " cfg->addObsolete(\"%s\");" % (name)
print(" cfg->addObsolete(\"%s\");" % (name))
if len(setting) > 0:
print "#else"
print " cfg->addDisabled(\"%s\");" % (name)
print "#endif"
print("#else")
print(" cfg->addDisabled(\"%s\");" % (name))
print("#endif")
def parseGroups(node):
name = node.getAttribute('name')
doc = node.getAttribute('docs')
print "%s%s" % (" //-----------------------------------------",
"----------------------------------")
print " cfg->addInfo(\"%s\",\"%s\");" % (name, doc)
print "%s%s" % (" //-----------------------------------------",
"----------------------------------")
print
print("%s%s" % (" //-----------------------------------------",
"----------------------------------"))
print(" cfg->addInfo(\"%s\",\"%s\");" % (name, doc))
print("%s%s" % (" //-----------------------------------------",
"----------------------------------"))
print("")
for n in node.childNodes:
if n.nodeType == Node.ELEMENT_NODE:
parseOption(n)
......@@ -360,16 +359,16 @@ def parseGroupCDocs(node):
name = n.getAttribute('id')
docC = prepCDocs(n);
if type != 'obsolete':
print " doc->add("
print " \"%s\"," % (name)
print(" doc->add(")
print(" \"%s\"," % (name))
rng = len(docC)
for i in range(rng):
line = docC[i]
if i != rng - 1: # since we go from 0 to rng-1
print " \"%s\\n\"" % (line)
print(" \"%s\\n\"" % (line))
else:
print " \"%s\"" % (line)
print " );"
print(" \"%s\"" % (line))
print(" );")
def parseOptionDoc(node, first):
# Handling part for documentation
......@@ -388,52 +387,52 @@ def parseOptionDoc(node, first):
if n.nodeType == Node.ELEMENT_NODE:
doc += parseDocs(n)
if (first):
print " \\anchor cfg_%s" % (name.lower())
print "<dl>"
print ""
print "<dt>\\c %s <dd>" % (name)
print(" \\anchor cfg_%s" % (name.lower()))
print("<dl>")
print("")
print("<dt>\\c %s <dd>" % (name))
else:
print " \\anchor cfg_%s" % (name.lower())
print "<dt>\\c %s <dd>" % (name)
print " \\addindex %s" % (name)
print doc
print(" \\anchor cfg_%s" % (name.lower()))
print("<dt>\\c %s <dd>" % (name))
print(" \\addindex %s" % (name))
print(doc)
if (type == 'enum'):
values = collectValues(node)
print ""
print "Possible values are: "
print("")
print("Possible values are: ")
rng = len(values)
for i in range(rng):
val = values[i]
if i == rng - 2:
print "%s and " % (val)
print("%s and " % (val))
elif i == rng - 1:
print "%s." % (val)
print("%s." % (val))
else:
print "%s, " % (val)
print("%s, " % (val))
if (defval != ""):
print ""
print ""
print "The default value is: <code>%s</code>." % (defval)
print ""
print("")
print("")
print("The default value is: <code>%s</code>." % (defval))
print("")
elif (type == 'int'):
minval = node.getAttribute('minval')
maxval = node.getAttribute('maxval')
print ""
print ""
print "%s: %s%s%s, %s: %s%s%s, %s: %s%s%s." % (
print("")
print("")
print("%s: %s%s%s, %s: %s%s%s, %s: %s%s%s." % (
" Minimum value", "<code>", minval, "</code>",
"maximum value", "<code>", maxval, "</code>",
"default value", "<code>", defval, "</code>")
print ""
"default value", "<code>", defval, "</code>"))
print("")
elif (type == 'bool'):
print ""
print ""
print("")
print("")
if (node.hasAttribute('altdefval')):
print "The default value is: system dependent."
print("The default value is: system dependent.")
else:
print "The default value is: <code>%s</code>." % (
"YES" if (defval == "1") else "NO")
print ""
print("The default value is: <code>%s</code>." % (
"YES" if (defval == "1") else "NO"))
print("")
elif (type == 'list'):
if format == 'string':
values = collectValues(node)
......@@ -441,67 +440,67 @@ def parseOptionDoc(node, first):
for i in range(rng):
val = values[i]
if i == rng - 2:
print "%s and " % (val)
print("%s and " % (val))
elif i == rng - 1:
print "%s." % (val)
print("%s." % (val))
else:
print "%s, " % (val)
print ""
print("%s, " % (val))
print("")
elif (type == 'string'):
if format == 'dir':
if defval != '':
print ""
print "The default directory is: <code>%s</code>." % (
defval)
print("")
print("The default directory is: <code>%s</code>." % (
defval))
elif format == 'file':
abspath = node.getAttribute('abspath')
if defval != '':
print ""
print("")
if abspath != '1':
print "The default file is: <code>%s</code>." % (
defval)
print("The default file is: <code>%s</code>." % (
defval))
else:
print "%s: %s%s%s." % (
print("%s: %s%s%s." % (
"The default file (with absolute path) is",
"<code>",defval,"</code>")
"<code>",defval,"</code>"))
else:
if abspath == '1':
print ""
print "The file has to be specified with full path."
elif file =='image':
print("")
print("The file has to be specified with full path.")
elif format =='image':
abspath = node.getAttribute('abspath')
if defval != '':
print ""
print("")
if abspath != '1':
print "The default image is: <code>%s</code>." % (
defval)
print("The default image is: <code>%s</code>." % (
defval))
else:
print "%s: %s%s%s." % (
print("%s: %s%s%s." % (
"The default image (with absolute path) is",
"<code>",defval,"</code>")
"<code>",defval,"</code>"))
else:
if abspath == '1':
print ""
print "The image has to be specified with full path."
print("")
print("The image has to be specified with full path.")
else: # format == 'string':
if defval != '':
print ""
print "The default value is: <code>%s</code>." % (
defval)
print ""
print("")
print("The default value is: <code>%s</code>." % (
defval))
print("")
# depends handling
if (node.hasAttribute('depends')):
depends = node.getAttribute('depends')
print ""
print "%s \\ref cfg_%s \"%s\" is set to \\c YES." % (
"This tag requires that the tag", depends.lower(), depends.upper())
print("")
print("%s \\ref cfg_%s \"%s\" is set to \\c YES." % (
"This tag requires that the tag", depends.lower(), depends.upper()))
return False
def parseGroupsDoc(node):
name = node.getAttribute('name')
doc = node.getAttribute('docs')
print "\section config_%s %s" % (name.lower(), doc)
print("\section config_%s %s" % (name.lower(), doc))
# Start of list has been moved to the first option for better
# anchor placement
# print "<dl>"
......@@ -511,7 +510,7 @@ def parseGroupsDoc(node):
if n.nodeType == Node.ELEMENT_NODE:
first = parseOptionDoc(n, first)
if (not first):
print "</dl>"
print("</dl>")
def parseGroupsList(node, commandsList):
......@@ -542,7 +541,7 @@ def parseHeaderDoc(node):
if (n.nodeName == "docs"):
if (n.getAttribute('documentation') != "0"):
doc += parseDocs(n)
print doc
print(doc)
def parseFooterDoc(node):
......@@ -552,7 +551,7 @@ def parseFooterDoc(node):
if (n.nodeName == "docs"):
if (n.getAttribute('documentation') != "0"):
doc += parseDocs(n)
print doc
print(doc)
def main():
......@@ -561,16 +560,17 @@ def main():
try:
doc = xml.dom.minidom.parse(sys.argv[2])
except Exception as inst:
print >> sys.stderr
print >> sys.stderr, inst
print >> sys.stderr
sys.stdout = sys.stderr
print("")
print(inst)
print("")
sys.exit(1)
elem = doc.documentElement
if (sys.argv[1] == "-doc"):
print "/* WARNING: This file is generated!"
print " * Do not edit this file, but edit config.xml instead and run"
print " * python configgen.py -doc config.xml to regenerate this file!"
print " */"
print("/* WARNING: This file is generated!")
print(" * Do not edit this file, but edit config.xml instead and run")
print(" * python configgen.py -doc config.xml to regenerate this file!")
print(" */")
# process header
for n in elem.childNodes:
if n.nodeType == Node.ELEMENT_NODE:
......@@ -582,10 +582,10 @@ def main():
if n.nodeType == Node.ELEMENT_NODE:
if (n.nodeName == "group"):
commandsList = parseGroupsList(n, commandsList)
print "\\secreflist"
print("\\secreflist")
for x in sorted(commandsList):
print "\\refitem cfg_%s %s" % (x.lower(), x)
print "\\endsecreflist"
print("\\refitem cfg_%s %s" % (x.lower(), x))
print("\\endsecreflist")
# process groups and options
for n in elem.childNodes:
if n.nodeType == Node.ELEMENT_NODE:
......@@ -597,24 +597,24 @@ def main():
if (n.nodeName == "footer"):
parseFooterDoc(n)
elif (sys.argv[1] == "-cpp"):
print "/* WARNING: This file is generated!"
print " * Do not edit this file, but edit config.xml instead and run"
print " * python configgen.py -cpp config.xml to regenerate this file!"
print " */"
print ""
print "#include \"configoptions.h\""
print "#include \"config.h\""
print "#include \"portable.h\""
print "#include \"settings.h\""
print ""
print "void addConfigOptions(Config *cfg)"
print "{"
print " ConfigString *cs;"
print " ConfigEnum *ce;"
print " ConfigList *cl;"
print " ConfigInt *ci;"
print " ConfigBool *cb;"
print ""
print("/* WARNING: This file is generated!")
print(" * Do not edit this file, but edit config.xml instead and run")
print(" * python configgen.py -cpp config.xml to regenerate this file!")
print(" */")
print("")
print("#include \"configoptions.h\"")
print("#include \"config.h\"")
print("#include \"portable.h\"")
print("#include \"settings.h\"")
print("")
print("void addConfigOptions(Config *cfg)")
print("{")
print(" ConfigString *cs;")
print(" ConfigEnum *ce;")
print(" ConfigList *cl;")
print(" ConfigInt *ci;")
print(" ConfigBool *cb;")
print("")
# process header
for n in elem.childNodes:
if n.nodeType == Node.ELEMENT_NODE:
......@@ -624,17 +624,17 @@ def main():
if n.nodeType == Node.ELEMENT_NODE:
if (n.nodeName == "group"):
parseGroups(n)
print "}"
print("}")
elif (sys.argv[1] == "-wiz"):
print "/* WARNING: This file is generated!"
print " * Do not edit this file, but edit config.xml instead and run"
print " * python configgen.py -wiz config.xml to regenerate this file!"
print " */"
print "#include \"configdoc.h\""
print "#include \"docintf.h\""
print ""
print "void addConfigDocs(DocIntf *doc)"
print "{"
print("/* WARNING: This file is generated!")
print(" * Do not edit this file, but edit config.xml instead and run")
print(" * python configgen.py -wiz config.xml to regenerate this file!")
print(" */")
print("#include \"configdoc.h\"")
print("#include \"docintf.h\"")
print("")
print("void addConfigDocs(DocIntf *doc)")
print("{")
for n in elem.childNodes:
if n.nodeType == Node.ELEMENT_NODE:
if (n.nodeName == "header"):
......@@ -643,7 +643,7 @@ def main():
if n.nodeType == Node.ELEMENT_NODE:
if (n.nodeName == "group"):
parseGroupCDocs(n)
print "}"
print("}")
if __name__ == '__main__':
main()
......@@ -2,7 +2,7 @@ import sys
if (len(sys.argv) > 1):
if (sys.argv[1] == "ENONLY"):
print "#define ENGLISH_ONLY"
print("#define ENGLISH_ONLY")
else:
for x in xrange(1, len(sys.argv)):
print "#define LANG_%s"%(sys.argv[x])
for x in range(1, len(sys.argv)):
print("#define LANG_%s"%(sys.argv[x]))
......@@ -15,7 +15,7 @@ for f in files:
# generating file is lang_cfg.py
# the rules file has to output lang_cfg.h
#
print """\
print("""\
<?xml version="1.0" encoding="utf-8"?>
<VisualStudioToolFile
Name="languages"
......@@ -52,7 +52,7 @@ print """\
/>
</Values>
</EnumProperty>
"""
""")
#
# generate loop, English is mandatory (so cannot be chosen)
#
......@@ -76,7 +76,7 @@ for f in new_list:
l1 = l.replace("-","")
# capatalize first letter
l = l.title()
print """\
print("""\
<EnumProperty
Name="%s"
DisplayName="Use %s"
......@@ -96,11 +96,11 @@ for f in new_list:
/>
</Values>
</EnumProperty>
""" % (l1, l, l, l, f[1], l)
""" % (l1, l, l, l, f[1], l))
print """\
print("""\
</Properties>
</CustomBuildRule>
</Rules>
</VisualStudioToolFile>
"""
""")
......@@ -290,6 +290,7 @@ static void writeDefaultHeaderPart1(FTextStream &t)
// Load required packages
t << "% Packages required by doxygen\n"
"\\usepackage{fixltx2e}\n" // for \textsubscript
"\\usepackage{calc}\n"
"\\usepackage{doxygen}\n"
"\\usepackage{graphicx}\n"
......@@ -297,7 +298,6 @@ static void writeDefaultHeaderPart1(FTextStream &t)
"\\usepackage{makeidx}\n"
"\\usepackage{multicol}\n"
"\\usepackage{multirow}\n"
"\\usepackage{fixltx2e}\n" // for \textsubscript
"\\PassOptionsToPackage{warn}{textcomp}\n"
"\\usepackage{textcomp}\n"
"\\usepackage[nointegrals]{wasysym}\n"
......
#if defined(__APPLE__) || defined(macintosh)
// define this before including iconv.h to avoid a mapping of
// iconv_open and friends to libicon_open (done by mac ports),
// while the symbols without 'lib' are linked from /usr/lib/libiconv
#define LIBICONV_PLUG
#endif
#include <iconv.h>
// These functions are implemented in a C file, because there are different
......
......@@ -6675,6 +6675,18 @@ static void parseCompounds(Entry *rt)
current = new Entry;
gstat = FALSE;
initEntry();
// deep copy group list from parent (see bug 727732)
if (rt->groups)
{
QListIterator<Grouping> gli(*rt->groups);
Grouping *g;
for (;(g=gli.current());++gli)
{
ce->groups->append(new Grouping(*g));
}
}
int ni=ce->name.findRev("::"); if (ni==-1) ni=0; else ni+=2;
// set default protection based on the compound type
if( ce->section==Entry::CLASS_SEC ) // class
......
......@@ -1798,6 +1798,7 @@ nextChar:
)
)
{
if (c=='\t') c=' ';
if (c=='*' || c=='&' || c=='@' || c=='$')
{
//uint rl=result.length();
......@@ -1827,8 +1828,8 @@ nextChar:
}
}
}
//printf("removeRedundantWhiteSpace(`%s')=`%s'\n",s.data(),result.data());
growBuf.addChar(0);
//printf("removeRedundantWhiteSpace(`%s')=`%s'\n",s.data(),growBuf.get());
//result.resize(resultPos);
return growBuf.get();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment