translator.py
Go to the documentation of this file.
1 """Script to generate reports on translator classes from Doxygen sources.
2 
3  The main purpose of the script is to extract the information from sources
4  related to internationalization (the translator classes). It uses the
5  information to generate documentation (language.doc,
6  translator_report.txt) from templates (language.tpl, maintainers.txt).
7 
8  Simply run the script without parameters to get the reports and
9  documentation for all supported languages. If you want to generate the
10  translator report only for some languages, pass their codes as arguments
11  to the script. In that case, the language.doc will not be generated.
12  Example:
13 
14  python translator.py en nl cz
15 
16  Originally, the script was written in Perl and was known as translator.pl.
17  The last Perl version was dated 2002/05/21 (plus some later corrections)
18 
19  Petr Prikryl (prikryl at atlas dot cz)
20 
21  History:
22  --------
23  2002/05/21 - This was the last Perl version.
24  2003/05/16 - List of language marks can be passed as arguments.
25  2004/01/24 - Total reimplementation started: classes TrManager, and Transl.
26  2004/02/05 - First version that produces translator report. No language.doc yet.
27  2004/02/10 - First fully functional version that generates both the translator
28  report and the documentation. It is a bit slower than the
29  Perl version, but is much less tricky and much more flexible.
30  It also solves some problems that were not solved by the Perl
31  version. The translator report content should be more useful
32  for developers.
33  2004/02/11 - Some tuning-up to provide more useful information.
34  2004/04/16 - Added new tokens to the tokenizer (to remove some warnings).
35  2004/05/25 - Added from __future__ import generators not to force Python 2.3.
36  2004/06/03 - Removed dependency on textwrap module.
37  2004/07/07 - Fixed the bug in the fill() function.
38  2004/07/21 - Better e-mail mangling for HTML part of language.doc.
39  - Plural not used for reporting a single missing method.
40  - Removal of not used translator adapters is suggested only
41  when the report is not restricted to selected languages
42  explicitly via script arguments.
43  2004/07/26 - Better reporting of not-needed adapters.
44  2004/10/04 - Reporting of not called translator methods added.
45  2004/10/05 - Modified to check only doxygen/src sources for the previous report.
46  2005/02/28 - Slight modification to generate "mailto.txt" auxiliary file.
47  2005/08/15 - Doxygen's root directory determined primarily from DOXYGEN
48  environment variable. When not found, then relatively to the script.
49  2007/03/20 - The "translate me!" searched in comments and reported if found.
50  2008/06/09 - Warning when the MAX_DOT_GRAPH_HEIGHT is still part of trLegendDocs().
51  2009/05/09 - Changed HTML output to fit it with XHTML DTD
52  2009/09/02 - Added percentage info to the report (implemented / to be implemented).
53  2010/02/09 - Added checking/suggestion 'Reimplementation using UTF-8 suggested.
54  2010/03/03 - Added [unreachable] prefix used in maintainers.txt.
55  2010/05/28 - BOM skipped; minor code cleaning.
56  2010/05/31 - e-mail mangled already in maintainers.txt
57  2010/08/20 - maintainers.txt to UTF-8, related processin of unicode strings
58  - [any mark] introduced instead of [unreachable] only
59  - marks hihglighted in HTML
60  2010/08/30 - Highlighting in what will be the table in langhowto.html modified.
61  2010/09/27 - The underscore in \latexonly part of the generated language.doc
62  was prefixed by backslash (was LaTeX related error).
63  2013/02/19 - Better diagnostics when translator_xx.h is too crippled.
64  2013/06/25 - TranslatorDecoder checks removed after removing the class.
65  2013/09/04 - Coloured status in langhowto. *ALMOST up-to-date* category
66  of translators introduced.
67  2014/06/16 - unified for Python 2.6+ and 3.0+
68  """
69 
70 from __future__ import print_function
71 
72 import os
73 import platform
74 import re
75 import sys
76 import textwrap
77 
78 
79 def xopen(fname, mode='r', encoding='utf-8-sig'):
80  '''Unified file opening for Python 2 an Python 3.
81 
82  Python 2 does not have the encoding argument. Python 3 has one, and
83  the default 'utf-8-sig' is used (skips the BOM automatically).
84  '''
85 
86  major, minor, patch = (int(e) for e in platform.python_version_tuple())
87  if major == 2:
88  return open(fname, mode=mode) # Python 2 without encoding
89  else:
90  return open(fname, mode=mode, encoding=encoding) # Python 3 with encoding
91 
92 
93 def fill(s):
94  """Returns string formated to the wrapped paragraph multiline string.
95 
96  Replaces whitespaces by one space and then uses he textwrap.fill()."""
97 
98  # Replace all whitespace by spaces, remove whitespaces that are not
99  # necessary, strip the left and right whitespaces, and break the string
100  # to list of words.
101  rexWS = re.compile(r'\s+')
102  lst = rexWS.sub(' ', s).strip().split()
103 
104  # If the list is not empty, put the words together and form the lines
105  # of maximum 70 characters. Build the list of lines.
106  lines = []
107  if lst:
108  line = lst.pop(0) # no separation space in front of the first word
109  for word in lst:
110  if len(line) + len(word) < 70:
111  line += ' ' + word
112  else:
113  lines.append(line) # another full line formed
114  line = word # next line started
115  lines.append(line) # the last line
116  return '\n'.join(lines)
117 
118 
119 class Transl:
120  """One instance is build for each translator.
121 
122  The abbreviation of the source file--part after 'translator_'--is used as
123  the identification of the object. The empty string is used for the
124  abstract Translator class from translator.h. The other information is
125  extracted from inside the source file."""
126 
127  def __init__(self, fname, manager):
128  """Bind to the manager and initialize."""
129 
130  # Store the filename and the reference to the manager object.
131  self.fname = fname
132  self.manager = manager
133 
134  # The instance is responsible for loading the source file, so it checks
135  # for its existence and quits if something goes wrong.
136  if not os.path.isfile(fname):
137  sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
138  sys.exit(1)
139 
140  # Initialize the other collected information.
141  self.classId = None
142  self.baseClassId = None
143  self.readableStatus = None # 'up-to-date', '1.2.3', '1.3', etc.
144  self.status = None # '', '1.2.03', '1.3.00', etc.
145  self.lang = None # like 'Brasilian'
146  self.langReadable = None # like 'Brasilian Portuguese'
147  self.note = None # like 'should be cleaned up'
148  self.prototypeDic = {} # uniPrototype -> prototype
149  self.translateMeText = 'translate me!'
150  self.translateMeFlag = False # comments with "translate me!" found
151  self.txtMAX_DOT_GRAPH_HEIGHT_flag = False # found in string in trLegendDocs()
152  self.obsoleteMethods = None # list of prototypes to be removed
153  self.missingMethods = None # list of prototypes to be implemented
154  self.implementedMethods = None # list of implemented required methods
155  self.adaptMinClass = None # The newest adapter class that can be used
156 
157  def __tokenGenerator(self):
158  """Generator that reads the file and yields tokens as 4-tuples.
159 
160  The tokens have the form (tokenId, tokenString, lineNo). The
161  last returned token has the form ('eof', None, None). When trying
162  to access next token afer that, the exception would be raised."""
163 
164  # Set the dictionary for recognizing tokenId for keywords, separators
165  # and the similar categories. The key is the string to be recognized,
166  # the value says its token identification.
167  tokenDic = { 'class': 'class',
168  'const': 'const',
169  'public': 'public',
170  'protected': 'protected',
171  'private': 'private',
172  'static': 'static',
173  'virtual': 'virtual',
174  ':': 'colon',
175  ';': 'semic',
176  ',': 'comma',
177  '[': 'lsqbra',
178  ']': 'rsqbra',
179  '(': 'lpar',
180  ')': 'rpar',
181  '{': 'lcurly',
182  '}': 'rcurly',
183  '=': 'assign',
184  '*': 'star',
185  '&': 'amp',
186  '+': 'plus',
187  '-': 'minus',
188  '!': 'excl',
189  '?': 'qmark',
190  '<': 'lt',
191  '>': 'gt',
192  "'": 'quot',
193  '"': 'dquot',
194  '.': 'dot',
195  '%': 'perc',
196  '~': 'tilde',
197  '^': 'caret',
198  }
199 
200  # Regular expression for recognizing identifiers.
201  rexId = re.compile(r'^[a-zA-Z]\w*$')
202 
203  # Open the file for reading and extracting tokens until the eof.
204  # Initialize the finite automaton.
205  f = xopen(self.fname)
206  lineNo = 0
207  line = '' # init -- see the pos initialization below
208  linelen = 0 # init
209  pos = 100 # init -- pos after the end of line
210  status = 0
211 
212  tokenId = None # init
213  tokenStr = '' # init -- the characters will be appended.
214  tokenLineNo = 0
215 
216  while status != 777:
217 
218  # Get the next character. Read next line first, if necessary.
219  if pos < linelen:
220  c = line[pos]
221  else:
222  lineNo += 1
223  line = f.readline()
224  linelen = len(line)
225  pos = 0
226  if line == '': # eof
227  status = 777
228  else:
229  c = line[pos]
230 
231  # Consume the character based on the status
232 
233  if status == 0: # basic status
234 
235  # This is the initial status. If tokenId is set, yield the
236  # token here and only here (except when eof is found).
237  # Initialize the token variables after the yield.
238  if tokenId:
239  # If it is an unknown item, it can still be recognized
240  # here. Keywords and separators are the example.
241  if tokenId == 'unknown':
242  if tokenStr in tokenDic:
243  tokenId = tokenDic[tokenStr]
244  elif tokenStr.isdigit():
245  tokenId = 'num'
246  elif rexId.match(tokenStr):
247  tokenId = 'id'
248  else:
249  msg = '\aWarning: unknown token "' + tokenStr + '"'
250  msg += '\tfound on line %d' % tokenLineNo
251  msg += ' in "' + self.fname + '".\n'
252  sys.stderr.write(msg)
253 
254  yield (tokenId, tokenStr, tokenLineNo)
255 
256  # If it is a comment that contains the self.translateMeText
257  # string, set the flag -- the situation will be reported.
258  if tokenId == 'comment' and tokenStr.find(self.translateMeText) >= 0:
259  self.translateMeFlag = True
260 
261  tokenId = None
262  tokenStr = ''
263  tokenLineNo = 0
264 
265  # Now process the character. When we just skip it (spaces),
266  # stay in this status. All characters that will be part of
267  # some token cause moving to the specific status. And only
268  # when moving to the status == 0 (or the final state 777),
269  # the token is yielded. With respect to that the automaton
270  # behaves as Moore's one (output bound to status). When
271  # collecting tokens, the automaton is the Mealy's one
272  # (actions bound to transitions).
273  if c.isspace():
274  pass # just skip whitespace characters
275  elif c == '/': # Possibly comment starts here, but
276  tokenId = 'unknown' # it could be only a slash in code.
277  tokenStr = c
278  tokenLineNo = lineNo
279  status = 1
280  elif c == '#':
281  tokenId = 'preproc' # preprocessor directive
282  tokenStr = c
283  tokenLineNo = lineNo
284  status = 5
285  elif c == '"': # string starts here
286  tokenId = 'string'
287  tokenStr = c
288  tokenLineNo = lineNo
289  status = 6
290  elif c == "'": # char literal starts here
291  tokenId = 'charlit'
292  tokenStr = c
293  tokenLineNo = lineNo
294  status = 8
295  elif c in tokenDic: # known one-char token
296  tokenId = tokenDic[c]
297  tokenStr = c
298  tokenLineNo = lineNo
299  # stay in this state to yield token immediately
300  else:
301  tokenId = 'unknown' # totally unknown
302  tokenStr = c
303  tokenLineNo = lineNo
304  status = 333
305 
306  pos += 1 # move position in any case
307 
308  elif status == 1: # possibly a comment
309  if c == '/': # ... definitely the C++ comment
310  tokenId = 'comment'
311  tokenStr += c
312  pos += 1
313  status = 2
314  elif c == '*': # ... definitely the C comment
315  tokenId = 'comment'
316  tokenStr += c
317  pos += 1
318  status = 3
319  else:
320  status = 0 # unrecognized, don't move pos
321 
322  elif status == 2: # inside the C++ comment
323  if c == '\n': # the end of C++ comment
324  status = 0 # yield the token
325  else:
326  tokenStr += c # collect the C++ comment
327  pos += 1
328 
329  elif status == 3: # inside the C comment
330  if c == '*': # possibly the end of the C comment
331  tokenStr += c
332  status = 4
333  else:
334  tokenStr += c # collect the C comment
335  pos += 1
336 
337  elif status == 4: # possibly the end of the C comment
338  if c == '/': # definitely the end of the C comment
339  tokenStr += c
340  status = 0 # yield the token
341  elif c == '*': # more stars inside the comment
342  tokenStr += c
343  else:
344  tokenStr += c # this cannot be the end of comment
345  status = 3
346  pos += 1
347 
348  elif status == 5: # inside the preprocessor directive
349  if c == '\n': # the end of the preproc. command
350  status = 0 # yield the token
351  else:
352  tokenStr += c # collect the preproc
353  pos += 1
354 
355  elif status == 6: # inside the string
356  if c == '\\': # escaped char inside the string
357  tokenStr += c
358  status = 7
359  elif c == '"': # end of the string
360  tokenStr += c
361  status = 0
362  else:
363  tokenStr += c # collect the chars of the string
364  pos += 1
365 
366  elif status == 7: # escaped char inside the string
367  tokenStr += c # collect the char of the string
368  status = 6
369  pos += 1
370 
371  elif status == 8: # inside the char literal
372  tokenStr += c # collect the char of the literal
373  status = 9
374  pos += 1
375 
376  elif status == 9: # end of char literal expected
377  if c == "'": # ... and found
378  tokenStr += c
379  status = 0
380  pos += 1
381  else:
382  tokenId = 'error' # end of literal was expected
383  tokenStr += c
384  status = 0
385 
386  elif status == 333: # start of the unknown token
387  if c.isspace():
388  pos += 1
389  status = 0 # tokenId may be determined later
390  elif c in tokenDic: # separator, don't move pos
391  status = 0
392  else:
393  tokenStr += c # collect
394  pos += 1
395 
396  # We should have finished in the final status. If some token
397  # have been extracted, yield it first.
398  assert(status == 777)
399  if tokenId:
400  yield (tokenId, tokenStr, tokenLineNo)
401  tokenId = None
402  tokenStr = ''
403  tokenLineNo = 0
404 
405  # The file content is processed. Close the file. Then always yield
406  # the eof token.
407  f.close()
408  yield ('eof', None, None)
409 
410 
411  def __collectClassInfo(self, tokenIterator):
412  """Collect the information about the class and base class.
413 
414  The tokens including the opening left curly brace of the class are
415  consumed."""
416 
417  status = 0 # initial state
418 
419  while status != 777: # final state
420 
421  # Always assume that the previous tokens were processed. Get
422  # the next one.
423  tokenId, tokenStr, tokenLineNo = next(tokenIterator)
424 
425  # Process the token and never return back.
426  if status == 0: # waiting for the 'class' keyword.
427  if tokenId == 'class':
428  status = 1
429 
430  elif status == 1: # expecting the class identification
431  if tokenId == 'id':
432  self.classId = tokenStr
433  status = 2
434  else:
435  self.__unexpectedToken(status, tokenId, tokenLineNo)
436 
437  elif status == 2: # expecting the curly brace or base class info
438  if tokenId == 'lcurly':
439  status = 777 # correctly finished
440  elif tokenId == 'colon':
441  status = 3
442  else:
443  self.__unexpectedToken(status, tokenId, tokenLineNo)
444 
445  elif status == 3: # expecting the 'public' in front of base class id
446  if tokenId == 'public':
447  status = 4
448  else:
449  self.__unexpectedToken(status, tokenId, tokenLineNo)
450 
451  elif status == 4: # expecting the base class id
452  if tokenId == 'id':
453  self.baseClassId = tokenStr
454  status = 5
455  else:
456  self.__unexpectedToken(status, tokenId, tokenLineNo)
457 
458  elif status == 5: # expecting the curly brace and quitting
459  if tokenId == 'lcurly':
460  status = 777 # correctly finished
461  elif tokenId == 'comment':
462  pass
463  else:
464  self.__unexpectedToken(status, tokenId, tokenLineNo)
465 
466  # Extract the status of the TranslatorXxxx class. The readable form
467  # will be used in reports the status form is a string that can be
468  # compared lexically (unified length, padding with zeros, etc.).
469  if self.baseClassId:
470  lst = self.baseClassId.split('_')
471  if lst[0] == 'Translator':
472  self.readableStatus = 'up-to-date'
473  self.status = ''
474  elif lst[0] == 'TranslatorAdapter':
475  self.status = lst[1] + '.' + lst[2]
476  self.readableStatus = self.status
477  if len(lst) > 3: # add the last part of the number
478  self.status += '.' + ('%02d' % int(lst[3]))
479  self.readableStatus += '.' + lst[3]
480  else:
481  self.status += '.00'
482  elif lst[0] == 'TranslatorEnglish':
483  # Obsolete or Based on English.
484  if self.classId[-2:] == 'En':
485  self.readableStatus = 'English based'
486  self.status = 'En'
487  else:
488  self.readableStatus = 'obsolete'
489  self.status = '0.0.00'
490 
491  # Check whether status was set, or set 'strange'.
492  if self.status == None:
493  self.status = 'strange'
494  if not self.readableStatus:
495  self.readableStatus = 'strange'
496 
497  # Extract the name of the language and the readable form.
498  self.lang = self.classId[10:] # without 'Translator'
499  if self.lang == 'Brazilian':
500  self.langReadable = 'Brazilian Portuguese'
501  elif self.lang == 'Chinesetraditional':
502  self.langReadable = 'Chinese Traditional'
503  else:
504  self.langReadable = self.lang
505 
506 
507  def __unexpectedToken(self, status, tokenId, tokenLineNo):
508  """Reports unexpected token and quits with exit code 1."""
509 
510  import inspect
511  calledFrom = inspect.stack()[1][3]
512  msg = "\a\nUnexpected token '%s' on the line %d in '%s'.\n"
513  msg = msg % (tokenId, tokenLineNo, self.fname)
514  msg += 'status = %d in %s()\n' % (status, calledFrom)
515  sys.stderr.write(msg)
516  sys.exit(1)
517 
518 
520  """Returns dictionary 'unified prototype' -> 'full prototype'.
521 
522  The method is expected to be called only for the translator.h. It
523  extracts only the pure virtual method and build the dictionary where
524  key is the unified prototype without argument identifiers."""
525 
526  # Prepare empty dictionary that will be returned.
527  resultDic = {}
528 
529  # Start the token generator which parses the class source file.
530  tokenIterator = self.__tokenGenerator()
531 
532  # Collect the class and the base class identifiers.
533  self.__collectClassInfo(tokenIterator)
534  assert(self.classId == 'Translator')
535 
536  # Let's collect readable form of the public virtual pure method
537  # prototypes in the readable form -- as defined in translator.h.
538  # Let's collect also unified form of the same prototype that omits
539  # everything that can be omitted, namely 'virtual' and argument
540  # identifiers.
541  prototype = '' # readable prototype (with everything)
542  uniPrototype = '' # unified prototype (without arg. identifiers)
543 
544  # Collect the pure virtual method prototypes. Stop on the closing
545  # curly brace followed by the semicolon (end of class).
546  status = 0
547  curlyCnt = 0 # counter for the level of curly braces
548 
549  # Loop until the final state 777 is reached. The errors are processed
550  # immediately. In this implementation, it always quits the application.
551  while status != 777:
552 
553  # Get the next token.
554  tokenId, tokenStr, tokenLineNo = next(tokenIterator)
555 
556  if status == 0: # waiting for 'public:'
557  if tokenId == 'public':
558  status = 1
559 
560  elif status == 1: # colon after the 'public'
561  if tokenId == 'colon':
562  status = 2
563  else:
564  self.__unexpectedToken(status, tokenId, tokenLineNo)
565 
566  elif status == 2: # waiting for 'virtual'
567  if tokenId == 'virtual':
568  prototype = tokenStr # but not to unified prototype
569  status = 3
570  elif tokenId == 'comment':
571  pass
572  elif tokenId == 'rcurly':
573  status = 11 # expected end of class
574  else:
575  self.__unexpectedToken(status, tokenId, tokenLineNo)
576 
577  elif status == 3: # return type of the method expected
578  if tokenId == 'id':
579  prototype += ' ' + tokenStr
580  uniPrototype = tokenStr # start collecting the unified prototype
581  status = 4
582  elif tokenId == 'tilde':
583  status = 4
584  else:
585  self.__unexpectedToken(status, tokenId, tokenLineNo)
586 
587  elif status == 4: # method identifier expected
588  if tokenId == 'id':
589  prototype += ' ' + tokenStr
590  uniPrototype += ' ' + tokenStr
591  status = 5
592  else:
593  self.__unexpectedToken(status, tokenId, tokenLineNo)
594 
595  elif status == 5: # left bracket of the argument list expected
596  if tokenId == 'lpar':
597  prototype += tokenStr
598  uniPrototype += tokenStr
599  status = 6
600  else:
601  self.__unexpectedToken(status, tokenId, tokenLineNo)
602 
603  elif status == 6: # collecting arguments of the method
604  if tokenId == 'rpar':
605  prototype += tokenStr
606  uniPrototype += tokenStr
607  status = 7
608  elif tokenId == 'const':
609  prototype += tokenStr
610  uniPrototype += tokenStr
611  status = 12
612  elif tokenId == 'id': # type identifier
613  prototype += tokenStr
614  uniPrototype += tokenStr
615  status = 13
616  else:
617  self.__unexpectedToken(status, tokenId, tokenLineNo)
618 
619  elif status == 7: # assignment expected or left curly brace
620  if tokenId == 'assign':
621  status = 8
622  elif tokenId == 'lcurly':
623  curlyCnt = 1 # method body entered
624  status = 10
625  else:
626  self.__unexpectedToken(status, tokenId, tokenLineNo)
627 
628  elif status == 8: # zero expected
629  if tokenId == 'num' and tokenStr == '0':
630  status = 9
631  else:
632  self.__unexpectedToken(status, tokenId, tokenLineNo)
633 
634  elif status == 9: # after semicolon, produce the dic item
635  if tokenId == 'semic':
636  assert(uniPrototype not in resultDic)
637  resultDic[uniPrototype] = prototype
638  status = 2
639  else:
640  self.__unexpectedToken(status, tokenId, tokenLineNo)
641 
642  elif status == 10: # consuming the body of the method
643  if tokenId == 'rcurly':
644  curlyCnt -= 1
645  if curlyCnt == 0:
646  status = 2 # body consumed
647  elif tokenId == 'lcurly':
648  curlyCnt += 1
649 
650  elif status == 11: # probably the end of class
651  if tokenId == 'semic':
652  status = 777
653  else:
654  self.__unexpectedToken(status, tokenId, tokenLineNo)
655 
656  elif status == 12: # type id for argument expected
657  if tokenId == 'id':
658  prototype += ' ' + tokenStr
659  uniPrototype += ' ' + tokenStr
660  status = 13
661  else:
662  self.__unexpectedToken(status, tokenId, tokenLineNo)
663 
664  elif status == 13: # namespace qualification or * or & expected
665  if tokenId == 'colon': # was namespace id
666  prototype += tokenStr
667  uniPrototype += tokenStr
668  status = 14
669  elif tokenId == 'star' or tokenId == 'amp': # pointer or reference
670  prototype += ' ' + tokenStr
671  uniPrototype += ' ' + tokenStr
672  status = 16
673  elif tokenId == 'id': # argument identifier
674  prototype += ' ' + tokenStr
675  # don't put this into unified prototype
676  status = 17
677  else:
678  self.__unexpectedToken(status, tokenId, tokenLineNo)
679 
680  elif status == 14: # second colon for namespace:: expected
681  if tokenId == 'colon':
682  prototype += tokenStr
683  uniPrototype += tokenStr
684  status = 15
685  else:
686  self.__unexpectedToken(status, tokenId, tokenLineNo)
687 
688  elif status == 15: # type after namespace:: expected
689  if tokenId == 'id':
690  prototype += tokenStr
691  uniPrototype += tokenStr
692  status = 13
693  else:
694  self.__unexpectedToken(status, tokenId, tokenLineNo)
695 
696  elif status == 16: # argument identifier expected
697  if tokenId == 'id':
698  prototype += ' ' + tokenStr
699  # don't put this into unified prototype
700  status = 17
701  else:
702  self.__unexpectedToken(status, tokenId, tokenLineNo)
703 
704  elif status == 17: # comma or ')' after argument identifier expected
705  if tokenId == 'comma':
706  prototype += ', '
707  uniPrototype += ', '
708  status = 6
709  elif tokenId == 'rpar':
710  prototype += tokenStr
711  uniPrototype += tokenStr
712  status = 7
713  else:
714  self.__unexpectedToken(status, tokenId, tokenLineNo)
715 
716  # Eat the rest of the source to cause closing the file.
717  while tokenId != 'eof':
718  tokenId, tokenStr, tokenLineNo = next(tokenIterator)
719 
720  # Return the resulting dictionary with 'uniPrototype -> prototype'.
721  return resultDic
722 
723 
724  def __collectPublicMethodPrototypes(self, tokenIterator):
725  """Collects prototypes of public methods and fills self.prototypeDic.
726 
727  The dictionary is filled by items: uniPrototype -> prototype.
728  The method is expected to be called only for TranslatorXxxx classes,
729  i.e. for the classes that implement translation to some language.
730  It assumes that the openning curly brace of the class was already
731  consumed. The source is consumed until the end of the class.
732  The caller should consume the source until the eof to cause closing
733  the source file."""
734 
735  assert(self.classId != 'Translator')
736  assert(self.baseClassId != None)
737 
738  # The following finite automaton slightly differs from the one
739  # inside self.collectPureVirtualPrototypes(). It produces the
740  # dictionary item just after consuming the body of the method
741  # (transition from from state 10 to state 2). It also does not allow
742  # definitions of public pure virtual methods, except for
743  # TranslatorAdapterBase (states 8 and 9). Argument identifier inside
744  # method argument lists can be omitted or commented.
745  #
746  # Let's collect readable form of all public method prototypes in
747  # the readable form -- as defined in the source file.
748  # Let's collect also unified form of the same prototype that omits
749  # everything that can be omitted, namely 'virtual' and argument
750  # identifiers.
751  prototype = '' # readable prototype (with everything)
752  uniPrototype = '' # unified prototype (without arg. identifiers)
753  warning = '' # warning message -- if something special detected
754  methodId = None # processed method id
755 
756  # Collect the method prototypes. Stop on the closing
757  # curly brace followed by the semicolon (end of class).
758  status = 0
759  curlyCnt = 0 # counter for the level of curly braces
760 
761  # Loop until the final state 777 is reached. The errors are processed
762  # immediately. In this implementation, it always quits the application.
763  while status != 777:
764 
765  # Get the next token.
766  tokenId, tokenStr, tokenLineNo = next(tokenIterator)
767 
768  if status == 0: # waiting for 'public:'
769  if tokenId == 'public':
770  status = 1
771  elif tokenId == 'eof': # non-public things until the eof
772  status = 777
773 
774  elif status == 1: # colon after the 'public'
775  if tokenId == 'colon':
776  status = 2
777  else:
778  self.__unexpectedToken(status, tokenId, tokenLineNo)
779 
780  elif status == 2: # waiting for 'virtual' (can be omitted)
781  if tokenId == 'virtual':
782  prototype = tokenStr # but not to unified prototype
783  status = 3
784  elif tokenId == 'id': # 'virtual' was omitted
785  prototype = tokenStr
786  uniPrototype = tokenStr # start collecting the unified prototype
787  status = 4
788  elif tokenId == 'comment':
789  pass
790  elif tokenId == 'protected' or tokenId == 'private':
791  status = 0
792  elif tokenId == 'rcurly':
793  status = 11 # expected end of class
794  else:
795  self.__unexpectedToken(status, tokenId, tokenLineNo)
796 
797  elif status == 3: # return type of the method expected
798  if tokenId == 'id':
799  prototype += ' ' + tokenStr
800  uniPrototype = tokenStr # start collecting the unified prototype
801  status = 4
802  else:
803  self.__unexpectedToken(status, tokenId, tokenLineNo)
804 
805  elif status == 4: # method identifier expected
806  if tokenId == 'id':
807  prototype += ' ' + tokenStr
808  uniPrototype += ' ' + tokenStr
809  methodId = tokenStr # for reporting
810  status = 5
811  else:
812  self.__unexpectedToken(status, tokenId, tokenLineNo)
813 
814  elif status == 5: # left bracket of the argument list expected
815  if tokenId == 'lpar':
816  prototype += tokenStr
817  uniPrototype += tokenStr
818  status = 6
819  else:
820  self.__unexpectedToken(status, tokenId, tokenLineNo)
821 
822  elif status == 6: # collecting arguments of the method
823  if tokenId == 'rpar':
824  prototype += tokenStr
825  uniPrototype += tokenStr
826  status = 7
827  elif tokenId == 'const':
828  prototype += tokenStr
829  uniPrototype += tokenStr
830  status = 12
831  elif tokenId == 'id': # type identifier
832  prototype += tokenStr
833  uniPrototype += tokenStr
834  status = 13
835  else:
836  self.__unexpectedToken(status, tokenId, tokenLineNo)
837 
838  elif status == 7: # left curly brace expected
839  if tokenId == 'lcurly':
840  curlyCnt = 1 # method body entered
841  status = 10
842  elif tokenId == 'comment':
843  pass
844  elif tokenId == 'assign': # allowed only for TranslatorAdapterBase
845  assert(self.classId == 'TranslatorAdapterBase')
846  status = 8
847  else:
848  self.__unexpectedToken(status, tokenId, tokenLineNo)
849 
850  elif status == 8: # zero expected (TranslatorAdapterBase)
851  assert(self.classId == 'TranslatorAdapterBase')
852  if tokenId == 'num' and tokenStr == '0':
853  status = 9
854  else:
855  self.__unexpectedToken(status, tokenId, tokenLineNo)
856 
857  elif status == 9: # after semicolon (TranslatorAdapterBase)
858  assert(self.classId == 'TranslatorAdapterBase')
859  if tokenId == 'semic':
860  status = 2
861  else:
862  self.__unexpectedToken(status, tokenId, tokenLineNo)
863 
864  elif status == 10: # consuming the body of the method, then dic item
865  if tokenId == 'rcurly':
866  curlyCnt -= 1
867  if curlyCnt == 0:
868  # Check for possible copy/paste error when name
869  # of the method was not corrected (i.e. the same
870  # name already exists).
871  if uniPrototype in self.prototypeDic:
872  msg = "'%s' prototype found again (duplicity)\n"
873  msg += "in '%s'.\n" % self.fname
874  msg = msg % uniPrototype
875  sys.stderr.write(msg)
876  assert False
877 
878  assert(uniPrototype not in self.prototypeDic)
879  # Insert new dictionary item.
880  self.prototypeDic[uniPrototype] = prototype
881  status = 2 # body consumed
882  methodId = None # outside of any method
883  elif tokenId == 'lcurly':
884  curlyCnt += 1
885 
886  # Warn in special case.
887  elif methodId == 'trLegendDocs' and tokenId == 'string' \
888  and tokenStr.find('MAX_DOT_GRAPH_HEIGHT') >= 0:
889  self.txtMAX_DOT_GRAPH_HEIGHT_flag = True
890 
891 
892  elif status == 11: # probably the end of class
893  if tokenId == 'semic':
894  status = 777
895  else:
896  self.__unexpectedToken(status, tokenId, tokenLineNo)
897 
898  elif status == 12: # type id for argument expected
899  if tokenId == 'id':
900  prototype += ' ' + tokenStr
901  uniPrototype += ' ' + tokenStr
902  status = 13
903  else:
904  self.__unexpectedToken(status, tokenId, tokenLineNo)
905 
906  elif status == 13: # :: or * or & or id or ) expected
907  if tokenId == 'colon': # was namespace id
908  prototype += tokenStr
909  uniPrototype += tokenStr
910  status = 14
911  elif tokenId == 'star' or tokenId == 'amp': # pointer or reference
912  prototype += ' ' + tokenStr
913  uniPrototype += ' ' + tokenStr
914  status = 16
915  elif tokenId == 'id': # argument identifier
916  prototype += ' ' + tokenStr
917  # don't put this into unified prototype
918  status = 17
919  elif tokenId == 'comment': # probably commented-out identifier
920  prototype += tokenStr
921  elif tokenId == 'rpar':
922  prototype += tokenStr
923  uniPrototype += tokenStr
924  status = 7
925  elif tokenId == 'comma':
926  prototype += ', '
927  uniPrototype += ', '
928  status = 6
929  else:
930  self.__unexpectedToken(status, tokenId, tokenLineNo)
931 
932  elif status == 14: # second colon for namespace:: expected
933  if tokenId == 'colon':
934  prototype += tokenStr
935  uniPrototype += tokenStr
936  status = 15
937  else:
938  self.__unexpectedToken(status, tokenId, tokenLineNo)
939 
940  elif status == 15: # type after namespace:: expected
941  if tokenId == 'id':
942  prototype += tokenStr
943  uniPrototype += tokenStr
944  status = 13
945  else:
946  self.__unexpectedToken(status, tokenId, tokenLineNo)
947 
948  elif status == 16: # argument identifier or ) expected
949  if tokenId == 'id':
950  prototype += ' ' + tokenStr
951  # don't put this into unified prototype
952  status = 17
953  elif tokenId == 'rpar':
954  prototype += tokenStr
955  uniPrototype += tokenStr
956  status = 7
957  elif tokenId == 'comment':
958  prototype += tokenStr
959  else:
960  self.__unexpectedToken(status, tokenId, tokenLineNo)
961 
962  elif status == 17: # comma or ')' after argument identifier expected
963  if tokenId == 'comma':
964  prototype += ', '
965  uniPrototype += ', '
966  status = 6
967  elif tokenId == 'rpar':
968  prototype += tokenStr
969  uniPrototype += tokenStr
970  status = 7
971  else:
972  self.__unexpectedToken(status, tokenId, tokenLineNo)
973 
974 
975 
977  """Returns the dictionary of prototypes implemented by adapters.
978 
979  It is created to process the translator_adapter.h. The returned
980  dictionary has the form: unifiedPrototype -> (version, classId)
981  thus by looking for the prototype, we get the information what is
982  the newest (least adapting) adapter that is sufficient for
983  implementing the method."""
984 
985  # Start the token generator which parses the class source file.
986  assert(os.path.split(self.fname)[1] == 'translator_adapter.h')
987  tokenIterator = self.__tokenGenerator()
988 
989  # Get the references to the involved dictionaries.
990  reqDic = self.manager.requiredMethodsDic
991 
992  # Create the empty dictionary that will be returned.
993  adaptDic = {}
994 
995 
996  # Loop through the source of the adapter file until no other adapter
997  # class is found.
998  while True:
999  try:
1000  # Collect the class and the base class identifiers.
1001  self.__collectClassInfo(tokenIterator)
1002 
1003  # Extract the comparable version of the adapter class.
1004  # Note: The self.status as set by self.__collectClassInfo()
1005  # contains similar version, but is related to the base class,
1006  # not to the class itself.
1007  lst = self.classId.split('_')
1008  version = ''
1009  if lst[0] == 'TranslatorAdapter': # TranslatorAdapterBase otherwise
1010  version = lst[1] + '.' + lst[2]
1011  if len(lst) > 3: # add the last part of the number
1012  version += '.' + ('%02d' % int(lst[3]))
1013  else:
1014  version += '.00'
1015 
1016  # Collect the prototypes of implemented public methods.
1017  self.__collectPublicMethodPrototypes(tokenIterator)
1018 
1019  # For the required methods, update the dictionary of methods
1020  # implemented by the adapter.
1021  for protoUni in self.prototypeDic:
1022  if protoUni in reqDic:
1023  # This required method will be marked as implemented
1024  # by this adapter class. This implementation assumes
1025  # that newer adapters do not reimplement any required
1026  # methods already implemented by older adapters.
1027  assert(protoUni not in adaptDic)
1028  adaptDic[protoUni] = (version, self.classId)
1029 
1030  # Clear the dictionary object and the information related
1031  # to the class as the next adapter class is to be processed.
1032  self.prototypeDic.clear()
1033  self.classId = None
1034  self.baseClassId = None
1035 
1036  except StopIteration:
1037  break
1038 
1039  # Return the result dictionary.
1040  return adaptDic
1041 
1042 
1043  def processing(self):
1044  """Processing of the source file -- only for TranslatorXxxx classes."""
1045 
1046  # Start the token generator which parses the class source file.
1047  tokenIterator = self.__tokenGenerator()
1048 
1049  # Collect the class and the base class identifiers.
1050  self.__collectClassInfo(tokenIterator)
1051  assert(self.classId != 'Translator')
1052  assert(self.classId[:17] != 'TranslatorAdapter')
1053 
1054  # Collect the prototypes of implemented public methods.
1055  self.__collectPublicMethodPrototypes(tokenIterator)
1056 
1057  # Eat the rest of the source to cause closing the file.
1058  while True:
1059  try:
1060  t = next(tokenIterator)
1061  except StopIteration:
1062  break
1063 
1064  # Shorthands for the used dictionaries.
1065  reqDic = self.manager.requiredMethodsDic
1066  adaptDic = self.manager.adaptMethodsDic
1067  myDic = self.prototypeDic
1068 
1069  # Build the list of obsolete methods.
1070  self.obsoleteMethods = []
1071  for p in myDic:
1072  if p not in reqDic:
1073  self.obsoleteMethods.append(p)
1074  self.obsoleteMethods.sort()
1075 
1076  # Build the list of missing methods and the list of implemented
1077  # required methods.
1078  self.missingMethods = []
1079  self.implementedMethods = []
1080  for p in reqDic:
1081  if p in myDic:
1082  self.implementedMethods.append(p)
1083  else:
1084  self.missingMethods.append(p)
1085  self.missingMethods.sort()
1086  self.implementedMethods.sort()
1087 
1088  # Check whether adapter must be used or suggest the newest one.
1089  # Change the status and set the note accordingly.
1090  if self.baseClassId != 'Translator':
1091  if not self.missingMethods:
1092  self.note = 'Change the base class to Translator.'
1093  self.status = ''
1094  self.readableStatus = 'almost up-to-date'
1095  elif self.baseClassId != 'TranslatorEnglish':
1096  # The translator uses some of the adapters.
1097  # Look at the missing methods and check what adapter
1098  # implements them. Remember the one with the lowest version.
1099  adaptMinVersion = '9.9.99'
1100  adaptMinClass = 'TranslatorAdapter_9_9_99'
1101  for uniProto in self.missingMethods:
1102  if uniProto in adaptDic:
1103  version, cls = adaptDic[uniProto]
1104  if version < adaptMinVersion:
1105  adaptMinVersion = version
1106  adaptMinClass = cls
1107 
1108  # Test against the current status -- preserve the self.status.
1109  # Possibly, the translator implements enough methods to
1110  # use some newer adapter.
1111  status = self.status
1112 
1113  # If the version of the used adapter is smaller than
1114  # the required, set the note and update the status as if
1115  # the newer adapter was used.
1116  if adaptMinVersion > status:
1117  self.note = 'Change the base class to %s.' % adaptMinClass
1118  self.status = adaptMinVersion
1119  self.adaptMinClass = adaptMinClass
1120  self.readableStatus = adaptMinVersion # simplified
1121 
1122  # If everything seems OK, some explicit warning flags still could
1123  # be set.
1124  if not self.note and self.status == '' and \
1126  self.note = ''
1127  if self.translateMeFlag:
1128  self.note += 'The "%s" found in a comment.' % self.translateMeText
1129  if self.note != '':
1130  self.note += '\n\t\t'
1132  self.note += 'The MAX_DOT_GRAPH_HEIGHT found in trLegendDocs()'
1133 
1134  # If everything seems OK, but there are obsolete methods, set
1135  # the note to clean-up source. This note will be used only when
1136  # the previous code did not set another note (priority).
1137  if not self.note and self.status == '' and self.obsoleteMethods:
1138  self.note = 'Remove the obsolete methods (never used).'
1139 
1140  # If there is at least some note but the status suggests it is
1141  # otherwise up-to-date, mark is as ALMOST up-to-date.
1142  if self.note and self.status == '':
1143  self.readableStatus = 'almost up-to-date'
1144 
1145 
1146  def report(self, fout):
1147  """Returns the report part for the source as a multiline string.
1148 
1149  No output for up-to-date translators without problem."""
1150 
1151  # If there is nothing to report, return immediately.
1152  if self.status == '' and not self.note:
1153  return
1154 
1155  # Report the number of not implemented methods.
1156  fout.write('\n\n\n')
1157  fout.write(self.classId + ' (' + self.baseClassId + ')')
1158  percentImplemented = 100 # init
1159  allNum = len(self.manager.requiredMethodsDic)
1160  if self.missingMethods:
1161  num = len(self.missingMethods)
1162  percentImplemented = 100 * (allNum - num) / allNum
1163  fout.write(' %d' % num)
1164  fout.write(' method')
1165  if num > 1:
1166  fout.write('s')
1167  fout.write(' to implement (%d %%)' % (100 * num / allNum))
1168  fout.write('\n' + '-' * len(self.classId))
1169 
1170  # Write the info about the implemented required methods.
1171  fout.write('\n\n Implements %d' % len(self.implementedMethods))
1172  fout.write(' of the required methods (%d %%).' % percentImplemented)
1173 
1174  # Report the missing method, but only when it is not English-based
1175  # translator.
1176  if self.missingMethods and self.status != 'En':
1177  fout.write('\n\n Missing methods (should be implemented):\n')
1178  reqDic = self.manager.requiredMethodsDic
1179  for p in self.missingMethods:
1180  fout.write('\n ' + reqDic[p])
1181 
1182  # Always report obsolete methods.
1183  if self.obsoleteMethods:
1184  fout.write('\n\n Obsolete methods (should be removed, never used):\n')
1185  myDic = self.prototypeDic
1186  for p in self.obsoleteMethods:
1187  fout.write('\n ' + myDic[p])
1188 
1189  # For English-based translator, report the implemented methods.
1190  if self.status == 'En' and self.implementedMethods:
1191  fout.write('\n\n This English-based translator implements ')
1192  fout.write('the following methods:\n')
1193  reqDic = self.manager.requiredMethodsDic
1194  for p in self.implementedMethods:
1195  fout.write('\n ' + reqDic[p])
1196 
1197 
1198  def getmtime(self):
1199  """Returns the last modification time of the source file."""
1200  assert(os.path.isfile(self.fname))
1201  return os.path.getmtime(self.fname)
1202 
1203 
1205  """Collects basic info and builds subordinate Transl objects."""
1206 
1207  def __init__(self):
1208  """Determines paths, creates and initializes structures.
1209 
1210  The arguments of the script may explicitly say what languages should
1211  be processed. Write the two letter identifications that are used
1212  for composing the source filenames, so...
1213 
1214  python translator.py cz
1215 
1216  this will process only translator_cz.h source.
1217  """
1218 
1219  # Determine the path to the script and its name.
1220  self.script = os.path.abspath(sys.argv[0])
1221  self.script_path, self.script_name = os.path.split(self.script)
1222  self.script_path = os.path.abspath(self.script_path)
1223 
1224  # Determine the absolute path to the Doxygen's root subdirectory.
1225  # If DOXYGEN environment variable is not found, the directory is
1226  # determined from the path of the script.
1227  doxy_default = os.path.join(self.script_path, '..')
1228  self.doxy_path = os.path.abspath(os.getenv('DOXYGEN', doxy_default))
1229 
1230  # Get the explicit arguments of the script.
1231  self.script_argLst = sys.argv[1:]
1232 
1233  # Build the path names based on the Doxygen's root knowledge.
1234  self.doc_path = os.path.join(self.doxy_path, 'doc')
1235  self.src_path = os.path.join(self.doxy_path, 'src')
1236 
1237  # Create the empty dictionary for Transl object identitied by the
1238  # class identifier of the translator.
1239  self.__translDic = {}
1240 
1241  # Create the None dictionary of required methods. The key is the
1242  # unified prototype, the value is the full prototype. Set inside
1243  # the self.__build().
1245 
1246  # Create the empty dictionary that says what method is implemented
1247  # by what adapter.
1249 
1250  # The last modification time will capture the modification of this
1251  # script, of the translator.h, of the translator_adapter.h (see the
1252  # self.__build() for the last two) of all the translator_xx.h files
1253  # and of the template for generating the documentation. So, this
1254  # time can be compared with modification time of the generated
1255  # documentation to decide, whether the doc should be re-generated.
1256  self.lastModificationTime = os.path.getmtime(self.script)
1257 
1258  # Set the names of the translator report text file, of the template
1259  # for generating "Internationalization" document, for the generated
1260  # file itself, and for the maintainers list.
1261  self.translatorReportFileName = 'translator_report.txt'
1262  self.maintainersFileName = 'maintainers.txt'
1263  self.languageTplFileName = 'language.tpl'
1264  self.languageDocFileName = 'language.doc'
1265 
1266  # The information about the maintainers will be stored
1267  # in the dictionary with the following name.
1268  self.__maintainersDic = None
1269 
1270  # Define the other used structures and variables for information.
1271  self.langLst = None # including English based
1272  self.supportedLangReadableStr = None # coupled En-based as a note
1273  self.numLang = None # excluding coupled En-based
1274  self.doxVersion = None # Doxygen version
1275 
1276  # Build objects where each one is responsible for one translator.
1277  self.__build()
1278 
1279 
1280  def __build(self):
1281  """Find the translator files and build the objects for translators."""
1282 
1283  # The translator.h must exist (the Transl object will check it),
1284  # create the object for it and let it build the dictionary of
1285  # required methods.
1286  tr = Transl(os.path.join(self.src_path, 'translator.h'), self)
1287  self.requiredMethodsDic = tr.collectPureVirtualPrototypes()
1288  tim = tr.getmtime()
1289  if tim > self.lastModificationTime:
1290  self.lastModificationTime = tim
1291 
1292  # The translator_adapter.h must exist (the Transl object will check it),
1293  # create the object for it and store the reference in the dictionary.
1294  tr = Transl(os.path.join(self.src_path, 'translator_adapter.h'), self)
1295  self.adaptMethodsDic = tr.collectAdapterPrototypes()
1296  tim = tr.getmtime()
1297  if tim > self.lastModificationTime:
1298  self.lastModificationTime = tim
1299 
1300  # Create the list of the filenames with language translator sources.
1301  # If the explicit arguments of the script were typed, process only
1302  # those files.
1303  if self.script_argLst:
1304  lst = ['translator_' + x + '.h' for x in self.script_argLst]
1305  for fname in lst:
1306  if not os.path.isfile(os.path.join(self.src_path, fname)):
1307  sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
1308  sys.exit(1)
1309  else:
1310  lst = os.listdir(self.src_path)
1311  lst = [x for x in lst if x[:11] == 'translator_'
1312  and x[-2:] == '.h'
1313  and x != 'translator_adapter.h']
1314 
1315  # Build the object for the translator_xx.h files, and process the
1316  # content of the file. Then insert the object to the dictionary
1317  # accessed via classId.
1318  for fname in lst:
1319  fullname = os.path.join(self.src_path, fname)
1320  tr = Transl(fullname, self)
1321  tr.processing()
1322  assert(tr.classId != 'Translator')
1323  self.__translDic[tr.classId] = tr
1324 
1325  # Extract the global information of the processed info.
1326  self.__extractProcessedInfo()
1327 
1328 
1330  """Build lists and strings of the processed info."""
1331 
1332  # Build the auxiliary list with strings compound of the status,
1333  # readable form of the language, and classId.
1334  statLst = []
1335  for obj in list(self.__translDic.values()):
1336  assert(obj.classId != 'Translator')
1337  s = obj.status + '|' + obj.langReadable + '|' + obj.classId
1338  statLst.append(s)
1339 
1340  # Sort the list and extract the object identifiers (classId's) for
1341  # the up-to-date translators and English-based translators.
1342  statLst.sort()
1343  self.upToDateIdLst = [x.split('|')[2] for x in statLst if x[0] == '|']
1344  self.EnBasedIdLst = [x.split('|')[2] for x in statLst if x[:2] == 'En']
1345 
1346  # Reverse the list and extract the TranslatorAdapter based translators.
1347  statLst.reverse()
1348  self.adaptIdLst = [x.split('|')[2] for x in statLst if x[0].isdigit()]
1349 
1350  # Build the list of tuples that contain (langReadable, obj).
1351  # Sort it by readable name.
1352  self.langLst = []
1353  for obj in list(self.__translDic.values()):
1354  self.langLst.append((obj.langReadable, obj))
1355 
1356  self.langLst.sort(key=lambda x: x[0])
1357 
1358  # Create the list with readable language names. If the language has
1359  # also the English-based version, modify the item by appending
1360  # the note. Number of the supported languages is equal to the length
1361  # of the list.
1362  langReadableLst = []
1363  for name, obj in self.langLst:
1364  if obj.status == 'En': continue
1365 
1366  # Append the 'En' to the classId to possibly obtain the classId
1367  # of the English-based object. If the object exists, modify the
1368  # name for the readable list of supported languages.
1369  classIdEn = obj.classId + 'En'
1370  if classIdEn in self.__translDic:
1371  name += ' (+En)'
1372 
1373  # Append the result name of the language, possibly with note.
1374  langReadableLst.append(name)
1375 
1376  # Create the multiline string of readable language names,
1377  # with punctuation, wrapped to paragraph.
1378  if len(langReadableLst) == 1:
1379  s = langReadableLst[0]
1380  elif len(langReadableLst) == 2:
1381  s = ' and '.join(langReadableLst)
1382  else:
1383  s = ', '.join(langReadableLst[:-1]) + ', and '
1384  s += langReadableLst[-1]
1385 
1386  self.supportedLangReadableStr = fill(s + '.')
1387 
1388  # Find the number of the supported languages. The English based
1389  # languages are not counted if the non-English based also exists.
1390  self.numLang = len(self.langLst)
1391  for name, obj in self.langLst:
1392  if obj.status == 'En':
1393  classId = obj.classId[:-2]
1394  if classId in self.__translDic:
1395  self.numLang -= 1 # the couple will be counted as one
1396 
1397  # Extract the version of Doxygen.
1398  f = xopen(os.path.join(self.doxy_path, 'VERSION'))
1399  self.doxVersion = f.readline().strip()
1400  f.close()
1401 
1402  # Update the last modification time.
1403  for tr in list(self.__translDic.values()):
1404  tim = tr.getmtime()
1405  if tim > self.lastModificationTime:
1406  self.lastModificationTime = tim
1407 
1408 
1410  """Returns the list of sources to be checked.
1411 
1412  All .cpp files and also .h files that do not declare or define
1413  the translator methods are included in the list. The file names
1414  are searched in doxygen/src directory.
1415  """
1416  files = []
1417  for item in os.listdir(self.src_path):
1418  # Split the bare name to get the extension.
1419  name, ext = os.path.splitext(item)
1420  ext = ext.lower()
1421 
1422  # Include only .cpp and .h files (case independent) and exclude
1423  # the files where the checked identifiers are defined.
1424  if ext == '.cpp' or (ext == '.h' and name.find('translator') == -1):
1425  fname = os.path.join(self.src_path, item)
1426  assert os.path.isfile(fname) # assumes no directory with the ext
1427  files.append(fname) # full name
1428  return files
1429 
1430 
1431  def __removeUsedInFiles(self, fname, dic):
1432  """Removes items for method identifiers that are found in fname.
1433 
1434  The method reads the content of the file as one string and searches
1435  for all identifiers from dic. The identifiers that were found in
1436  the file are removed from the dictionary.
1437 
1438  Note: If more files is to be checked, the files where most items are
1439  probably used should be checked first and the resulting reduced
1440  dictionary should be used for checking the next files (speed up).
1441  """
1442  lst_in = list(dic.keys()) # identifiers to be searched for
1443 
1444  # Read content of the file as one string.
1445  assert os.path.isfile(fname)
1446  f = xopen(fname)
1447  cont = f.read()
1448  f.close()
1449 
1450  # Remove the items for identifiers that were found in the file.
1451  while lst_in:
1452  item = lst_in.pop(0)
1453  if cont.find(item) != -1:
1454  del dic[item]
1455 
1456 
1458  """Returns the dictionary of not used translator methods.
1459 
1460  The method can be called only after self.requiredMethodsDic has been
1461  built. The stripped prototypes are the values, the method identifiers
1462  are the keys.
1463  """
1464  # Build the dictionary of the required method prototypes with
1465  # method identifiers used as keys.
1466  trdic = {}
1467  for prototype in list(self.requiredMethodsDic.keys()):
1468  ri = prototype.split('(')[0]
1469  identifier = ri.split()[1].strip()
1470  trdic[identifier] = prototype
1471 
1472  # Build the list of source files where translator method identifiers
1473  # can be used.
1474  files = self.__getNoTrSourceFilesLst()
1475 
1476  # Loop through the files and reduce the dictionary of id -> proto.
1477  for fname in files:
1478  self.__removeUsedInFiles(fname, trdic)
1479 
1480  # Return the dictionary of not used translator methods.
1481  return trdic
1482 
1483 
1484  def __emails(self, classId):
1485  """Returns the list of maintainer emails.
1486 
1487  The method returns the list of e-mail adresses for the translator
1488  class, but only the addresses that were not marked as [xxx]."""
1489  lst = []
1490  for m in self.__maintainersDic[classId]:
1491  if not m[1].startswith('['):
1492  email = m[1]
1493  email = email.replace(' at ', '@') # Unmangle the mangled e-mail
1494  email = email.replace(' dot ', '.')
1495  lst.append(email)
1496  return lst
1497 
1498 
1499  def getBgcolorByReadableStatus(self, readableStatus):
1500  if readableStatus == 'up-to-date':
1501  color = '#ccffcc' # green
1502  elif readableStatus.startswith('almost'):
1503  color = '#ffffff' # white
1504  elif readableStatus.startswith('English'):
1505  color = '#ccffcc' # green
1506  elif readableStatus.startswith('1.8'):
1507  color = '#ffffcc' # yellow
1508  elif readableStatus.startswith('1.7'):
1509  color = '#ffcccc' # pink
1510  elif readableStatus.startswith('1.6'):
1511  color = '#ffcccc' # pink
1512  else:
1513  color = '#ff5555' # red
1514  return color
1515 
1516 
1518  """Generates the translator report."""
1519 
1520  output = os.path.join(self.doc_path, self.translatorReportFileName)
1521 
1522  # Open the textual report file for the output.
1523  f = xopen(output, 'w')
1524 
1525  # Output the information about the version.
1526  f.write('(' + self.doxVersion + ')\n\n')
1527 
1528  # Output the information about the number of the supported languages
1529  # and the list of the languages, or only the note about the explicitly
1530  # given languages to process.
1531  if self.script_argLst:
1532  f.write('The report was generated for the following, explicitly')
1533  f.write(' identified languages:\n\n')
1534  f.write(self.supportedLangReadableStr + '\n\n')
1535  else:
1536  f.write('Doxygen supports the following ')
1537  f.write(str(self.numLang))
1538  f.write(' languages (sorted alphabetically):\n\n')
1539  f.write(self.supportedLangReadableStr + '\n\n')
1540 
1541  # Write the summary about the status of language translators (how
1542  # many translators) are up-to-date, etc.
1543  s = 'Of them, %d translators are up-to-date, ' % len(self.upToDateIdLst)
1544  s += '%d translators are based on some adapter class, ' % len(self.adaptIdLst)
1545  s += 'and %d are English based.' % len(self.EnBasedIdLst)
1546  f.write(fill(s) + '\n\n')
1547 
1548  # The e-mail addresses of the maintainers will be collected to
1549  # the auxiliary file in the order of translator classes listed
1550  # in the translator report.
1551  fmail = xopen('mailto.txt', 'w')
1552 
1553  # Write the list of "up-to-date" translator classes.
1554  if self.upToDateIdLst:
1555  s = '''The following translator classes are up-to-date (sorted
1556  alphabetically). This means that they derive from the
1557  Translator class, they implement all %d of the required
1558  methods, and even minor problems were not spotted by the script:'''
1559  s = s % len(self.requiredMethodsDic)
1560  f.write('-' * 70 + '\n')
1561  f.write(fill(s) + '\n\n')
1562 
1563  mailtoLst = []
1564  for x in self.upToDateIdLst:
1565  obj = self.__translDic[x]
1566  if obj.note is None:
1567  f.write(' ' + obj.classId + '\n')
1568  mailtoLst.extend(self.__emails(obj.classId))
1569 
1570  fmail.write('up-to-date\n')
1571  fmail.write('; '.join(mailtoLst))
1572 
1573 
1574  # Write separately the list of "ALMOST up-to-date" translator classes.
1575  s = '''The following translator classes are ALMOST up-to-date (sorted
1576  alphabetically). This means that they derive from the
1577  Translator class, but there still may be some minor problems
1578  listed for them:'''
1579  f.write('\n' + ('-' * 70) + '\n')
1580  f.write(fill(s) + '\n\n')
1581  mailtoLst = []
1582  for x in self.upToDateIdLst:
1583  obj = self.__translDic[x]
1584  if obj.note is not None:
1585  f.write(' ' + obj.classId + '\t-- ' + obj.note + '\n')
1586  mailtoLst.extend(self.__emails(obj.classId))
1587 
1588  fmail.write('\n\nalmost up-to-date\n')
1589  fmail.write('; '.join(mailtoLst))
1590 
1591  # Write the list of the adapter based classes. The very obsolete
1592  # translators that derive from TranslatorEnglish are included.
1593  if self.adaptIdLst:
1594  s = '''The following translator classes need maintenance
1595  (the most obsolete at the end). The other info shows the
1596  estimation of Doxygen version when the class was last
1597  updated and number of methods that must be implemented to
1598  become up-to-date:'''
1599  f.write('\n' + '-' * 70 + '\n')
1600  f.write(fill(s) + '\n\n')
1601 
1602  # Find also whether some adapter classes may be removed.
1603  adaptMinVersion = '9.9.99'
1604 
1605  mailtoLst = []
1606  numRequired = len(self.requiredMethodsDic)
1607  for x in self.adaptIdLst:
1608  obj = self.__translDic[x]
1609  f.write(' %-30s' % obj.classId)
1610  f.write(' %-6s' % obj.readableStatus)
1611  numimpl = len(obj.missingMethods)
1612  pluralS = ''
1613  if numimpl > 1: pluralS = 's'
1614  percent = 100 * numimpl / numRequired
1615  f.write('\t%2d method%s to implement (%d %%)' % (
1616  numimpl, pluralS, percent))
1617  if obj.note:
1618  f.write('\n\tNote: ' + obj.note + '\n')
1619  f.write('\n')
1620  mailtoLst.extend(self.__emails(obj.classId)) # to maintainer
1621 
1622  # Check the level of required adapter classes.
1623  if obj.status != '0.0.00' and obj.status < adaptMinVersion:
1624  adaptMinVersion = obj.status
1625 
1626  fmail.write('\n\ntranslator based\n')
1627  fmail.write('; '.join(mailtoLst))
1628 
1629  # Set the note if some old translator adapters are not needed
1630  # any more. Do it only when the script is called without arguments,
1631  # i.e. all languages were checked against the needed translator
1632  # adapters.
1633  if not self.script_argLst:
1634  to_remove = {}
1635  for version, adaptClassId in list(self.adaptMethodsDic.values()):
1636  if version < adaptMinVersion:
1637  to_remove[adaptClassId] = True
1638 
1639  if to_remove:
1640  lst = list(to_remove.keys())
1641  lst.sort()
1642  plural = len(lst) > 1
1643  note = 'Note: The adapter class'
1644  if plural: note += 'es'
1645  note += ' ' + ', '.join(lst)
1646  if not plural:
1647  note += ' is'
1648  else:
1649  note += ' are'
1650  note += ' not used and can be removed.'
1651  f.write('\n' + fill(note) + '\n')
1652 
1653  # Write the list of the English-based classes.
1654  if self.EnBasedIdLst:
1655  s = '''The following translator classes derive directly from the
1656  TranslatorEnglish. The class identifier has the suffix 'En'
1657  that says that this is intentional. Usually, there is also
1658  a non-English based version of the translator for
1659  the language:'''
1660  f.write('\n' + '-' * 70 + '\n')
1661  f.write(fill(s) + '\n\n')
1662 
1663  for x in self.EnBasedIdLst:
1664  obj = self.__translDic[x]
1665  f.write(' ' + obj.classId)
1666  f.write('\timplements %d methods' % len(obj.implementedMethods))
1667  if obj.note:
1668  f.write(' -- ' + obj.note)
1669  f.write('\n')
1670 
1671  # Check for not used translator methods and generate warning if found.
1672  # The check is rather time consuming, so it is not done when report
1673  # is restricted to explicitly given language identifiers.
1674  if not self.script_argLst:
1675  dic = self.__checkForNotUsedTrMethods()
1676  if dic:
1677  s = '''WARNING: The following translator methods are declared
1678  in the Translator class but their identifiers do not appear
1679  in source files. The situation should be checked. The .cpp
1680  files and .h files excluding the '*translator*' files
1681  in doxygen/src directory were simply searched for occurrence
1682  of the method identifiers:'''
1683  f.write('\n' + '=' * 70 + '\n')
1684  f.write(fill(s) + '\n\n')
1685 
1686  keys = list(dic.keys())
1687  keys.sort()
1688  for key in keys:
1689  f.write(' ' + dic[key] + '\n')
1690  f.write('\n')
1691 
1692  # Write the details for the translators.
1693  f.write('\n' + '=' * 70)
1694  f.write('\nDetails for translators (classes sorted alphabetically):\n')
1695 
1696  cls = list(self.__translDic.keys())
1697  cls.sort()
1698 
1699  for c in cls:
1700  obj = self.__translDic[c]
1701  assert(obj.classId != 'Translator')
1702  obj.report(f)
1703 
1704  # Close the report file and the auxiliary file with e-mails.
1705  f.close()
1706  fmail.close()
1707 
1708 
1710  """Load and process the file with the maintainers.
1711 
1712  Fills the dictionary classId -> [(name, e-mail), ...]."""
1713 
1714  fname = os.path.join(self.doc_path, self.maintainersFileName)
1715 
1716  # Include the maintainers file to the group of files checked with
1717  # respect to the modification time.
1718  tim = os.path.getmtime(fname)
1719  if tim > self.lastModificationTime:
1720  self.lastModificationTime = tim
1721 
1722  # Process the content of the maintainers file.
1723  f = xopen(fname)
1724  inside = False # inside the record for the language
1725  lineReady = True
1726  classId = None
1727  maintainersLst = None
1728  self.__maintainersDic = {}
1729  while lineReady:
1730  line = f.readline() # next line
1731  lineReady = line != '' # when eof, then line == ''
1732 
1733  line = line.strip() # eof should also behave as separator
1734  if line != '' and line[0] == '%': # skip the comment line
1735  continue
1736 
1737  if not inside: # if outside of the record
1738  if line != '': # should be language identifier
1739  classId = line
1740  maintainersLst = []
1741  inside = True
1742  # Otherwise skip empty line that do not act as separator.
1743 
1744  else: # if inside the record
1745  if line == '': # separator found
1746  inside = False
1747  else:
1748  # If it is the first maintainer, create the empty list.
1749  if classId not in self.__maintainersDic:
1750  self.__maintainersDic[classId] = []
1751 
1752  # Split the information about the maintainer and append
1753  # the tuple. The address may be prefixed '[unreachable]'
1754  # or whatever '[xxx]'. This will be processed later.
1755  lst = line.split(':', 1)
1756  assert(len(lst) == 2)
1757  t = (lst[0].strip(), lst[1].strip())
1758  self.__maintainersDic[classId].append(t)
1759  f.close()
1760 
1761 
1763  """Checks the modtime of files and generates language.doc."""
1764  self.__loadMaintainers()
1765 
1766  # Check the last modification time of the template file. It is the
1767  # last file from the group that decide whether the documentation
1768  # should or should not be generated.
1769  fTplName = os.path.join(self.doc_path, self.languageTplFileName)
1770  tim = os.path.getmtime(fTplName)
1771  if tim > self.lastModificationTime:
1772  self.lastModificationTime = tim
1773 
1774  # If the generated documentation exists and is newer than any of
1775  # the source files from the group, do not generate it and quit
1776  # quietly.
1777  fDocName = os.path.join(self.doc_path, self.languageDocFileName)
1778  if os.path.isfile(fDocName):
1779  if os.path.getmtime(fDocName) > self.lastModificationTime:
1780  return
1781 
1782  # The document or does not exist or is older than some of the
1783  # sources. It must be generated again.
1784  #
1785  # Read the template of the documentation, and remove the first
1786  # attention lines.
1787  f = xopen(fTplName)
1788  doctpl = f.read()
1789  f.close()
1790 
1791  pos = doctpl.find('/***')
1792  assert pos != -1
1793  doctpl = doctpl[pos:]
1794 
1795  # Fill the tplDic by symbols that will be inserted into the
1796  # document template.
1797  tplDic = {}
1798 
1799  s = ('Do not edit this file. It was generated by the %s script.\n' +\
1800  ' * Edit the %s and %s files instead.') % (
1802  tplDic['editnote'] = s
1803 
1804  tplDic['doxVersion'] = self.doxVersion
1805  tplDic['supportedLangReadableStr'] = self.supportedLangReadableStr
1806  tplDic['translatorReportFileName'] = self.translatorReportFileName
1807 
1808  ahref = '<a href="../doc/' + self.translatorReportFileName
1809  ahref += '"\n><code>doxygen/doc/' + self.translatorReportFileName
1810  ahref += '</code></a>'
1811  tplDic['translatorReportLink'] = ahref
1812  tplDic['numLangStr'] = str(self.numLang)
1813 
1814  # Define templates for HTML table parts of the documentation.
1815  htmlTableTpl = '''\
1816  \\htmlonly
1817  </p>
1818  <table align="center" cellspacing="0" cellpadding="0" border="0">
1819  <tr bgcolor="#000000">
1820  <td>
1821  <table cellspacing="1" cellpadding="2" border="0">
1822  <tr bgcolor="#4040c0">
1823  <td ><b><font size="+1" color="#ffffff"> Language </font></b></td>
1824  <td ><b><font size="+1" color="#ffffff"> Maintainer </font></b></td>
1825  <td ><b><font size="+1" color="#ffffff"> Contact address </font>
1826  <font size="-2" color="#ffffff">(replace the at and dot)</font></b></td>
1827  <td ><b><font size="+1" color="#ffffff"> Status </font></b></td>
1828  </tr>
1829  <!-- table content begin -->
1830  %s
1831  <!-- table content end -->
1832  </table>
1833  </td>
1834  </tr>
1835  </table>
1836  <p>
1837  \\endhtmlonly
1838  '''
1839  htmlTableTpl = textwrap.dedent(htmlTableTpl)
1840  htmlTrTpl = '\n <tr bgcolor="#ffffff">%s\n </tr>'
1841  htmlTdTpl = '\n <td>%s</td>'
1842  htmlTdStatusColorTpl = '\n <td bgcolor="%s">%s</td>'
1843 
1844  # Loop through transl objects in the order of sorted readable names
1845  # and add generate the content of the HTML table.
1846  trlst = []
1847  for name, obj in self.langLst:
1848  # Fill the table data elements for one row. The first element
1849  # contains the readable name of the language. Only the oldest
1850  # translator are colour marked in the language columnt. Less
1851  # "heavy" color is used (when compared with the Status column).
1852  if obj.readableStatus.startswith('1.4'):
1853  bkcolor = self.getBgcolorByReadableStatus('1.4')
1854  else:
1855  bkcolor = '#ffffff'
1856 
1857  lst = [ htmlTdStatusColorTpl % (bkcolor, obj.langReadable) ]
1858 
1859  # The next two elements contain the list of maintainers
1860  # and the list of their mangled e-mails. For English-based
1861  # translators that are coupled with the non-English based,
1862  # insert the 'see' note.
1863  mm = None # init -- maintainer
1864  ee = None # init -- e-mail address
1865  if obj.status == 'En':
1866  # Check whether there is the coupled non-English.
1867  classId = obj.classId[:-2]
1868  if classId in self.__translDic:
1869  lang = self.__translDic[classId].langReadable
1870  mm = 'see the %s language' % lang
1871  ee = '&nbsp;'
1872 
1873  if not mm and obj.classId in self.__maintainersDic:
1874  # Build a string of names separated by the HTML break element.
1875  # Special notes used instead of names are highlighted.
1876  lm = []
1877  for maintainer in self.__maintainersDic[obj.classId]:
1878  name = maintainer[0]
1879  if name.startswith('--'):
1880  name = '<span style="color: red; background-color: yellow">'\
1881  + name + '</span>'
1882  lm.append(name)
1883  mm = '<br/>'.join(lm)
1884 
1885  # The marked adresses (they start with the mark '[unreachable]',
1886  # '[resigned]', whatever '[xxx]') will not be displayed at all.
1887  # Only the mark will be used instead.
1888  rexMark = re.compile('(?P<mark>\\[.*?\\])')
1889  le = []
1890  for maintainer in self.__maintainersDic[obj.classId]:
1891  address = maintainer[1]
1892  m = rexMark.search(address)
1893  if m is not None:
1894  address = '<span style="color: brown">'\
1895  + m.group('mark') + '</span>'
1896  le.append(address)
1897  ee = '<br/>'.join(le)
1898 
1899  # Append the maintainer and e-mail elements.
1900  lst.append(htmlTdTpl % mm)
1901  lst.append(htmlTdTpl % ee)
1902 
1903  # The last element contains the readable form of the status.
1904  bgcolor = self.getBgcolorByReadableStatus(obj.readableStatus)
1905  lst.append(htmlTdStatusColorTpl % (bgcolor, obj.readableStatus))
1906 
1907  # Join the table data to one table row.
1908  trlst.append(htmlTrTpl % (''.join(lst)))
1909 
1910  # Join the table rows and insert into the template.
1911  htmlTable = htmlTableTpl % (''.join(trlst))
1912 
1913  # Define templates for LaTeX table parts of the documentation.
1914  latexTableTpl = r'''
1915  \latexonly
1916  \footnotesize
1917  \begin{longtable}{|l|l|l|l|}
1918  \hline
1919  {\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\
1920  \hline
1921  %s
1922  \hline
1923  \end{longtable}
1924  \normalsize
1925  \endlatexonly
1926  '''
1927  latexTableTpl = textwrap.dedent(latexTableTpl)
1928  latexLineTpl = '\n' + r' %s & %s & {\tt\tiny %s} & %s \\'
1929 
1930  # Loop through transl objects in the order of sorted readable names
1931  # and add generate the content of the LaTeX table.
1932  trlst = []
1933  for name, obj in self.langLst:
1934  # For LaTeX, more maintainers for the same language are
1935  # placed on separate rows in the table. The line separator
1936  # in the table is placed explicitly above the first
1937  # maintainer. Prepare the arguments for the LaTeX row template.
1938  maintainers = []
1939  if obj.classId in self.__maintainersDic:
1940  maintainers = self.__maintainersDic[obj.classId]
1941 
1942  lang = obj.langReadable
1943  maintainer = None # init
1944  email = None # init
1945  if obj.status == 'En':
1946  # Check whether there is the coupled non-English.
1947  classId = obj.classId[:-2]
1948  if classId in self.__translDic:
1949  langNE = self.__translDic[classId].langReadable
1950  maintainer = 'see the %s language' % langNE
1951  email = '~'
1952 
1953  if not maintainer and (obj.classId in self.__maintainersDic):
1954  lm = [ m[0] for m in self.__maintainersDic[obj.classId] ]
1955  maintainer = maintainers[0][0]
1956  email = maintainers[0][1]
1957 
1958  status = obj.readableStatus
1959 
1960  # Use the template to produce the line of the table and insert
1961  # the hline plus the constructed line into the table content.
1962  # The underscore character must be escaped.
1963  trlst.append('\n \\hline')
1964  s = latexLineTpl % (lang, maintainer, email, status)
1965  s = s.replace('_', '\\_')
1966  trlst.append(s)
1967 
1968  # List the other maintainers for the language. Do not set
1969  # lang and status for them.
1970  lang = '~'
1971  status = '~'
1972  for m in maintainers[1:]:
1973  maintainer = m[0]
1974  email = m[1]
1975  s = latexLineTpl % (lang, maintainer, email, status)
1976  s = s.replace('_', '\\_')
1977  trlst.append(s)
1978 
1979  # Join the table lines and insert into the template.
1980  latexTable = latexTableTpl % (''.join(trlst))
1981 
1982  # Put the HTML and LaTeX parts together and define the dic item.
1983  tplDic['informationTable'] = htmlTable + '\n' + latexTable
1984 
1985  # Insert the symbols into the document template and write it down.
1986  f = xopen(fDocName, 'w')
1987  f.write(doctpl % tplDic)
1988  f.close()
1989 
1990 if __name__ == '__main__':
1991 
1992  # The Python 2.6+ or 3.3+ is required.
1993  major, minor, patch = (int(e) for e in platform.python_version_tuple())
1994  if (major == 2 and minor < 6) or (major == 3 and minor < 0):
1995  print('Python 2.6+ or Python 3.0+ are required for the script')
1996  sys.exit(1)
1997 
1998  # The translator manager builds the Transl objects, parses the related
1999  # sources, and keeps them in memory.
2000  trMan = TrManager()
2001 
2002  # Process the Transl objects and generate the output files.
2003  trMan.generateLanguageDoc()
2004  trMan.generateTranslatorReport()
def generateLanguageDoc(self)
Definition: translator.py:1762
def xopen(fname, mode='r', encoding='utf-8-sig')
Definition: translator.py:79
def __getNoTrSourceFilesLst(self)
Definition: translator.py:1409
def __tokenGenerator(self)
Definition: translator.py:157
int open(const char *, int)
Opens a file descriptor.
def report(self, fout)
Definition: translator.py:1146
def __checkForNotUsedTrMethods(self)
Definition: translator.py:1457
def __emails(self, classId)
Definition: translator.py:1484
def collectAdapterPrototypes(self)
Definition: translator.py:976
def getmtime(self)
Definition: translator.py:1198
def __unexpectedToken(self, status, tokenId, tokenLineNo)
Definition: translator.py:507
def generateTranslatorReport(self)
Definition: translator.py:1517
def __collectPublicMethodPrototypes(self, tokenIterator)
Definition: translator.py:724
def __loadMaintainers(self)
Definition: translator.py:1709
def __extractProcessedInfo(self)
Definition: translator.py:1329
def __removeUsedInFiles(self, fname, dic)
Definition: translator.py:1431
def collectPureVirtualPrototypes(self)
Definition: translator.py:519
def fill(s)
Definition: translator.py:93
def __collectClassInfo(self, tokenIterator)
Definition: translator.py:411
void split(std::string const &s, char c, OutIter dest)
Definition: split.h:35
def processing(self)
Definition: translator.py:1043
static QCString str
def getBgcolorByReadableStatus(self, readableStatus)
Definition: translator.py:1499
def __init__(self, fname, manager)
Definition: translator.py:127