Skip to content
This repository has been archived by the owner on Jun 1, 2020. It is now read-only.

Commit

Permalink
fix up for newer dump formats
Browse files Browse the repository at this point in the history
  • Loading branch information
eliben committed Feb 24, 2015
1 parent d3fb1a1 commit dada3dc
Showing 1 changed file with 11 additions and 4 deletions.
15 changes: 11 additions & 4 deletions tools/htmlize-ast-dump.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,13 +324,17 @@ def analyze_line(tokens):
"""
assert(len(tokens) > 2)

# The nesting level is always the first token
nesting = tokens[1].text
# The top-level TranslationUnitDecl node has no nesting
if tokens[1].text.startswith('Translation'):
nesting = ''
itok = 1
else:
nesting = tokens[1].text
itok = 2

# The name is a concat of the following non-empty tokens, until something
# that looks like the ID is encountered, or the line ends.
name_parts = []
itok = 2
while itok < len(tokens):
t = tokens[itok].text.strip()
if len(t) > 0:
Expand Down Expand Up @@ -412,7 +416,8 @@ def new_data_entry(line_entry):
# Finally, add 'users' fields to all entries. This is an inversion of 'uses'
for id, entry in nav_data.items():
for used_id in entry['uses']:
nav_data[used_id]['users'].append(id)
if used_id in nav_data:
nav_data[used_id]['users'].append(id)

return nav_data

Expand Down Expand Up @@ -465,6 +470,8 @@ def main():
input_stream = (open(sys.argv[1], 'rb') if args.dump_file != '-' else
io.BufferedReader(sys.stdin.buffer))
print(htmlize(input_stream))
#tokens = list(tokenize_line(l) for l in input_stream)
#print(list(tokens[0]))
finally:
input_stream.close()

Expand Down

0 comments on commit dada3dc

Please sign in to comment.