diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index c8d23b56..62c2026a 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -30,7 +30,7 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 # Unfortunately, asciidoctor-pdf gets pathname-specific errors # building under the usual $GITHUB_WORKSPACE (/__w). As a workaround, @@ -40,8 +40,11 @@ jobs: run: | cd adoc make OUTDIR=/tmp/out QUIET= html pdf + - name: Verify reflow conformance + run: | + ./adoc/scripts/verify_reflow_conformance.sh - name: Archive generated files - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: spec-outputs path: | diff --git a/adoc/scripts/apiconventions.py b/adoc/scripts/apiconventions.py new file mode 100644 index 00000000..a1a1161a --- /dev/null +++ b/adoc/scripts/apiconventions.py @@ -0,0 +1,14 @@ +#!/usr/bin/python3 -i +# +# Copyright 2021-2023 The Khronos Group Inc. +# SPDX-License-Identifier: Apache-2.0 + +# Generic alias for working group-specific API conventions interface. + +# This import should be changed at the repository / working group level to +# specify the correct API's conventions. + + +import os + +from syclconventions import SYCLConventions as APIConventions diff --git a/adoc/scripts/doctransformer.py b/adoc/scripts/doctransformer.py new file mode 100644 index 00000000..05f6600f --- /dev/null +++ b/adoc/scripts/doctransformer.py @@ -0,0 +1,449 @@ +# Copyright 2023 The Khronos Group Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Utilities for automatic transformation of spec sources. Most of the logic +has to do with detecting asciidoc markup or block types that should not be +transformed (tables, code) and ignoring them. It is very likely there are many +asciidoc constructs not yet accounted for in the script, our usage of asciidoc +markup is intentionally somewhat limited. +""" + +import re +import sys +from reflib import logDiag, logWarn + +# Vulkan-specific - will consolidate into scripts/ like OpenXR soon +sys.path.insert(0, 'xml') + +from apiconventions import APIConventions +conventions = APIConventions() + +# Start of an asciidoctor conditional +# ifdef:: +# ifndef:: +conditionalStart = re.compile(r'^(ifdef|ifndef)::') + +# Markup that always ends a paragraph +# empty line or whitespace +# [block options] +# [[anchor]] +# // comment +# <<<< page break +# :attribute-setting +# macro-directive::terms +# + standalone list item continuation +# label:: labelled list - label must be standalone +endPara = re.compile(r'^( *|\[.*\]|//.*|<<<<|:.*|[a-z]+::.*|\+|.*::)$') + +# Special case of markup ending a paragraph, used to track the current +# command/structure. This allows for either OpenXR or Vulkan API path +# conventions. Nominally it should use the file suffix defined by the API +# conventions (conventions.file_suffix), except that XR uses '.txt' for +# generated API include files, not '.adoc' like its other includes. +includePat = re.compile( + r'include::(?P((../){1,4}|\{generated\}/)(generated/)?)(?P[\w]+)/(?P\w+)/(?P[^./]+).adoc[\[][\]]') + +# Markup that is OK in a contiguous paragraph but otherwise passed through +# .anything (except .., which indicates a literal block) +# === Section Titles +# image::path_to_image[attributes] (apparently a single colon is OK but less idiomatic) +endParaContinue = re.compile(r'^(\.[^.].*|=+ .*|image:.*\[.*\])$') + +# Markup for block delimiters whose contents *should* be reformatted +# -- (exactly two) (open block) +# **** (4 or more) (sidebar block) +# ==== (4 or more) (example block) +# ____ (4 or more) (quote block) +blockTransform = re.compile(r'^(--|[*=_]{4,})$') + +# Fake block delimiters for "common" VU statements +blockCommonTransform = '// Common Valid Usage\n' + +# Markup for block delimiters whose contents should *not* be transformed +# |=== (3 or more) (table) +# ``` (3 or more) (listing block) +# //// (4 or more) (comment block) +# ---- (4 or more) (listing block) +# .... (4 or more) (literal block) +# ++++ (4 or more) (passthrough block) +blockPassthrough = re.compile(r'^(\|={3,}|[`]{3}|[\-+./]{4,})$') + +# Markup for introducing lists (hanging paragraphs) +# * bullet +# ** bullet +# -- bullet +# . bullet +# :: bullet (no longer supported by asciidoctor 2) +# {empty}:: bullet +# 1. list item +# <1> source listing callout +beginBullet = re.compile(r'^ *([-*.]+|\{empty\}::|::|[0-9]+[.]|<([0-9]+)>) ') + +class TransformState: + """State machine for transforming documents. + + Represents the state of the transform operation""" + def __init__(self): + self.blockStack = [ None ] + """The last element is a line with the asciidoc block delimiter that is + currently in effect, such as '--', '----', '****', '====', or '++++'. + This affects whether or not the block contents should be transformed.""" + self.transformStack = [ True ] + """The last element is True or False if the current blockStack contents + should be transformed.""" + self.vuStack = [ False ] + """the last element is True or False if the current blockStack contents + are an explicit Valid Usage block.""" + + self.para = [] + """list of lines in the paragraph being accumulated. + When this is non-empty, there is a current paragraph.""" + + self.lastTitle = False + """true if the previous line was a document title line + (e.g. :leveloffset: 0 - no attempt to track changes to this is made).""" + + self.leadIndent = 0 + """indent level (in spaces) of the first line of a paragraph.""" + + self.hangIndent = 0 + """indent level of the remaining lines of a paragraph.""" + + self.lineNumber = 0 + """line number being read from the input file.""" + + self.defaultApiName = '{refpage}' + self.apiName = self.defaultApiName + """String name of an API structure or command for VUID tag generation, + or {refpage} if one has not been included in this file yet.""" + + def incrLineNumber(self): + self.lineNumber = self.lineNumber + 1 + + def isOpenBlockDelimiter(self, line): + """Returns True if line is an open block delimiter. + This does not and should not match the listing block delimiter, + which is used inside refpage blocks both as a listing block and, + via an extension, as a nested open block.""" + return line.rstrip() == '--' + + def resetPara(self): + """Reset the paragraph, including its indentation level""" + self.para = [] + self.leadIndent = 0 + self.hangIndent = 0 + + def endBlock(self, line, transform, vuBlock): + """If beginning a block, tag whether or not to transform the contents. + + vuBlock is True if the previous line indicates this is a Valid Usage + block.""" + if self.blockStack[-1] == line: + logDiag('endBlock line', self.lineNumber, + ': popping block end depth:', len(self.blockStack), + ':', line, end='') + + # Reset apiName at the end of an open block. + # Open blocks cannot be nested (at present), so this is safe. + if self.isOpenBlockDelimiter(line): + logDiag('reset apiName to empty at line', self.lineNumber) + self.apiName = self.defaultApiName + else: + logDiag('NOT resetting apiName to default at line', + self.lineNumber) + + self.blockStack.pop() + self.transformStack.pop() + self.vuStack.pop() + else: + # Start a block + self.blockStack.append(line) + self.transformStack.append(transform) + self.vuStack.append(vuBlock) + + logDiag('endBlock transform =', transform, ' line', self.lineNumber, + ': pushing block start depth', len(self.blockStack), + ':', line, end='') + + def addLine(self, line, indent): + """Add a line to the current paragraph""" + if self.para == []: + # Begin a new paragraph + self.para = [line] + self.leadIndent = indent + self.hangIndent = indent + else: + # Add a line to a paragraph. Increase the hanging indentation + # level - once. + if self.hangIndent == self.leadIndent: + self.hangIndent = indent + self.para.append(line) + + +class TransformCallbackState: + """State given to the transformer callback object, derived from + TransformState.""" + def __init__(self, state): + self.isVU = state.vuStack[-1] if len(state.vuStack) > 0 else False + """Whether this paragraph is a VU.""" + + self.apiName = state.apiName + """String name of an API structure or command this paragraph belongs + to.""" + + self.leadIndent = state.leadIndent + """indent level (in spaces) of the first line of a paragraph.""" + + self.hangIndent = state.hangIndent + """indent level of the remaining lines of a paragraph.""" + + self.lineNumber = state.lineNumber + """line number being read from the input file.""" + + +class DocTransformer: + """A transformer that recursively goes over all spec files under a path. + + The transformer goes over all spec files under a path and does some basic + parsing. In particular, it tracks which section the current text belongs + to, whether it references a VU, etc and processes them in 'paragraph' + granularity. + The transformer takes a callback object with the following methods: + + - transformParagraph: Called when a paragraph is parsed. The paragraph + along with some information (such as whether it is a VU) is passed. The + function may transform the paragraph as necessary. + - onEmbeddedVUConditional: Called when an embedded VU conditional is + encountered. + """ + def __init__(self, + filename, + outfile, + callback): + self.filename = filename + """base name of file being read from.""" + + self.outfile = outfile + """file handle to write to.""" + + self.state = TransformState() + """State of transformation""" + + self.callback = callback + """The transformation callback object""" + + def printLines(self, lines): + """Print an array of lines with newlines already present""" + if len(lines) > 0: + logDiag(':: printLines:', len(lines), 'lines: ', lines[0], end='') + + if self.outfile is not None: + for line in lines: + print(line, file=self.outfile, end='') + + def emitPara(self): + """Emit a paragraph, possibly transforming it depending on the block + context. + + Resets the paragraph accumulator.""" + if self.state.para != []: + transformedPara = self.state.para + + if self.state.transformStack[-1]: + callbackState = TransformCallbackState(self.state) + + transformedPara = self.callback.transformParagraph( + self.state.para, + callbackState) + + self.printLines(transformedPara) + + self.state.resetPara() + + def endPara(self, line): + """'line' ends a paragraph and should itself be emitted. + line may be None to indicate EOF or other exception.""" + logDiag('endPara line', self.state.lineNumber, ': emitting paragraph') + + # Emit current paragraph, this line, and reset tracker + self.emitPara() + + if line: + self.printLines([line]) + + def endParaContinue(self, line): + """'line' ends a paragraph (unless there is already a paragraph being + accumulated, e.g. len(para) > 0 - currently not implemented)""" + self.endPara(line) + + def endBlock(self, line, transform = False, vuBlock = False): + """'line' begins or ends a block. + + If beginning a block, tag whether or not to transform the contents. + + vuBlock is True if the previous line indicates this is a Valid Usage + block.""" + self.endPara(line) + self.state.endBlock(line, transform, vuBlock) + + def endParaBlockTransform(self, line, vuBlock): + """'line' begins or ends a block. The paragraphs in the block *should* be + reformatted (e.g. a NOTE).""" + self.endBlock(line, transform = True, vuBlock = vuBlock) + + def endParaBlockPassthrough(self, line): + """'line' begins or ends a block. The paragraphs in the block should + *not* be reformatted (e.g. a code listing).""" + self.endBlock(line, transform = False) + + def addLine(self, line): + """'line' starts or continues a paragraph. + + Paragraphs may have "hanging indent", e.g. + + ``` + * Bullet point... + ... continued + ``` + + In this case, when the higher indentation level ends, so does the + paragraph.""" + logDiag('addLine line', self.state.lineNumber, ':', line, end='') + + # See https://stackoverflow.com/questions/13648813/what-is-the-pythonic-way-to-count-the-leading-spaces-in-a-string + indent = len(line) - len(line.lstrip()) + + # A hanging paragraph ends due to a less-indented line. + if self.state.para != [] and indent < self.state.hangIndent: + logDiag('addLine: line reduces indentation, emit paragraph') + self.emitPara() + + # A bullet point (or something that looks like one) always ends the + # current paragraph. + if beginBullet.match(line): + logDiag('addLine: line matches beginBullet, emit paragraph') + self.emitPara() + + self.state.addLine(line, indent) + + def apiMatch(self, oldname, newname): + """Returns whether oldname and newname match, up to an API suffix. + This should use the API map instead of this heuristic, since aliases + like VkPhysicalDeviceVariablePointerFeaturesKHR -> + VkPhysicalDeviceVariablePointersFeatures are not recognized.""" + upper = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + return oldname.rstrip(upper) == newname.rstrip(upper) + + def transformFile(self, lines): + """Transform lines, and possibly output to to the given file.""" + + for line in lines: + self.state.incrLineNumber() + + # Is this a title line (leading '= ' followed by text)? + thisTitle = False + + # The logic here is broken. If we are in a non-transformable block and + # this line *does not* end the block, it should always be + # accumulated. + + # Test for a blockCommonTransform delimiter comment first, to avoid + # treating it solely as a end-Paragraph marker comment. + if line == blockCommonTransform: + # Starting or ending a pseudo-block for "common" VU statements. + self.endParaBlockTransform(line, vuBlock = True) + + elif blockTransform.match(line): + # Starting or ending a block whose contents may be transformed. + # Blocks cannot be nested. + + # Is this is an explicit Valid Usage block? + vuBlock = (self.state.lineNumber > 1 and + lines[self.state.lineNumber-2] == '.Valid Usage\n') + + self.endParaBlockTransform(line, vuBlock) + + elif endPara.match(line): + # Ending a paragraph. Emit the current paragraph, if any, and + # prepare to begin a new paragraph. + + self.endPara(line) + + # If this is an include:: line starting the definition of a + # structure or command, track that for use in VUID generation. + + matches = includePat.search(line) + if matches is not None: + generated_type = matches.group('generated_type') + include_type = matches.group('category') + if generated_type == 'api' and include_type in ('protos', 'structs', 'funcpointers'): + apiName = matches.group('entity_name') + if self.state.apiName != self.state.defaultApiName: + # This happens when there are multiple API include + # lines in a single block. The style guideline is to + # always place the API which others are promoted to + # first. In virtually all cases, the promoted API + # will differ solely in the vendor suffix (or + # absence of it), which is benign. + if not self.apiMatch(self.state.apiName, apiName): + logDiag(f'Promoted API name mismatch at line {self.state.lineNumber}: {apiName} does not match self.state.apiName (this is OK if it is just a spelling alias)') + else: + self.state.apiName = apiName + + elif endParaContinue.match(line): + # For now, always just end the paragraph. + # Could check see if len(para) > 0 to accumulate. + + self.endParaContinue(line) + + # If it is a title line, track that + if line[0:2] == '= ': + thisTitle = True + + elif blockPassthrough.match(line): + # Starting or ending a block whose contents must not be + # transformed. These are tables, etc. Blocks cannot be nested. + # Note that the use of a listing block masquerading as an + # open block, via an extension, will not be formatted even + # though it should be. + # Fixing this would require looking at the previous line + # state for the '[open]' tag, and there are so few cases of + # this in the spec markup that it is not worth the trouble. + + self.endParaBlockPassthrough(line) + elif self.state.lastTitle: + # The previous line was a document title line. This line + # is the author / credits line and must not be transformed. + + self.endPara(line) + else: + # Just accumulate a line to the current paragraph. Watch out for + # hanging indents / bullet-points and track that indent level. + + self.addLine(line) + + # Commented out now that VU extractor supports this, but may + # need to refactor through a conventions object enable if + # OpenXR still needs this. + + # This test looks for disallowed conditionals inside Valid Usage + # blocks, by checking if (a) this line does not start a new VU + # (bullet point) and (b) the previous line starts an asciidoctor + # conditional (ifdef:: or ifndef::). + # if (self.state.vuStack[-1] + # and not beginBullet.match(line) + # and conditionalStart.match(lines[self.state.lineNumber-2])): + # self.callback.onEmbeddedVUConditional(self.state) + + self.state.lastTitle = thisTitle + + # Cleanup at end of file + self.endPara(None) + + # Check for sensible block nesting + if len(self.state.blockStack) > 1: + logWarn('file', self.filename, + 'mismatched asciidoc block delimiters at EOF:', + self.state.blockStack[-1]) + diff --git a/adoc/scripts/reflib.py b/adoc/scripts/reflib.py index a8ce802f..db9353de 100644 --- a/adoc/scripts/reflib.py +++ b/adoc/scripts/reflib.py @@ -1,13 +1,15 @@ #!/usr/bin/python3 # -# Copyright (c) 2011-2023 The Khronos Group, Inc. +# Copyright 2016-2023 The Khronos Group Inc. +# # SPDX-License-Identifier: Apache-2.0 -# Utility functions for automatic ref page generation +# Utility functions for automatic ref page generation and other script stuff import io import re import sys +import subprocess # global errFile, warnFile, diagFile @@ -98,26 +100,26 @@ def logErr(*args, **kwargs): if file is not None: file.write(strfile.getvalue()) - sys.exit(1) + raise UserWarning(strfile.getvalue()) def isempty(s): """Return True if s is nothing but white space, False otherwise""" return len(''.join(s.split())) == 0 class pageInfo: - """Information about a ref page relative to the file it's extracted from.""" + """Information about a ref page relative to the file it is extracted from.""" def __init__(self): self.extractPage = True """True if page should be extracted""" self.Warning = None - """string warning if page is suboptimal or can't be generated""" + """string warning if page is suboptimal or cannot be generated""" self.embed = False """False or the name of the ref page this include is embedded within""" self.type = None - """'structs', 'protos', 'funcpointers', 'flags', 'enums'""" + """refpage type attribute - 'structs', 'protos', 'freeform', etc.""" self.name = None """struct/proto/enumerant/etc. name""" @@ -234,23 +236,27 @@ def lookupPage(pageMap, name): return pi def loadFile(filename): - """Load a file into a list of strings. Return the list or None on failure""" + """Load a file into a list of strings. Return the (list, newline_string) or (None, None) on failure""" + newline_string = "\n" try: - fp = open(filename, 'r', encoding='utf-8') + with open(filename, 'rb') as fp: + contents = fp.read() + if contents.count(b"\r\n") > 1: + newline_string = "\r\n" + + with open(filename, 'r', encoding='utf-8') as fp: + lines = fp.readlines() except: logWarn('Cannot open file', filename, ':', sys.exc_info()[0]) - return None - - file = fp.readlines() - fp.close() + return None, None - return file + return lines, newline_string def clampToBlock(line, minline, maxline): """Clamp a line number to be in the range [minline,maxline]. If the line number is None, just return it. - If minline is None, don't clamp to that value.""" + If minline is None, do not clamp to that value.""" if line is None: return line if minline and line < minline: @@ -278,8 +284,8 @@ def fixupRefs(pageMap, specFile, file): # # line to the include line, so autogeneration can at least # # pull the include out, but mark it not to be extracted. # # Examples include the host sync table includes in - # # chapters/fundamentals.txt and the table of Vk*Flag types in - # # appendices/boilerplate.txt. + # # chapters/fundamentals.adoc and the table of Vk*Flag types in + # # appendices/boilerplate.adoc. # if pi.begin is None and pi.validity is None and pi.end is None: # pi.begin = pi.include # pi.extractPage = False @@ -287,7 +293,7 @@ def fixupRefs(pageMap, specFile, file): # continue # Using open block delimiters, ref pages must *always* have a - # defined begin and end. If either is undefined, that's fatal. + # defined begin and end. If either is undefined, that is fatal. if pi.begin is None: pi.extractPage = False pi.Warning = 'Can\'t identify begin of ref page open block' @@ -298,7 +304,7 @@ def fixupRefs(pageMap, specFile, file): pi.Warning = 'Can\'t identify end of ref page open block' continue - # If there's no description of the page, infer one from the type + # If there is no description of the page, infer one from the type if pi.desc is None: if pi.type is not None: # pi.desc = pi.type[0:len(pi.type)-1] + ' (no short description available)' @@ -312,6 +318,9 @@ def fixupRefs(pageMap, specFile, file): # begin. funcpointer, proto, and struct pages infer the location of # the parameter and body sections. Other pages infer the location of # the body, but have no parameter sections. + # + # Probably some other types infer this as well - refer to list of + # all page types in genRef.py:emitPage() if pi.include is not None: if pi.type in ['funcpointers', 'protos', 'structs']: pi.param = nextPara(file, pi.include) @@ -323,13 +332,13 @@ def fixupRefs(pageMap, specFile, file): else: pi.Warning = 'Page does not have an API definition include::' - # It's possible for the inferred param and body lines to run past + # It is possible for the inferred param and body lines to run past # the end of block, if, for example, there is no parameter section. pi.param = clampToBlock(pi.param, pi.include, pi.end) pi.body = clampToBlock(pi.body, pi.param, pi.end) # We can get to this point with .include, .param, and .validity - # all being None, indicating those sections weren't found. + # all being None, indicating those sections were not found. logDiag('fixupRefs: after processing,', pi.name, 'looks like:') printPageInfo(pi, file) @@ -338,7 +347,7 @@ def fixupRefs(pageMap, specFile, file): # inferences about invalid pages. # # If a reference without a .end is entirely inside a valid reference, - # then it's intentionally embedded - may want to create an indirect + # then it is intentionally embedded - may want to create an indirect # page that links into the embedding page. This is done by a very # inefficient double loop, but the loop depth is small. for name in sorted(pageMap.keys()): @@ -348,7 +357,7 @@ def fixupRefs(pageMap, specFile, file): for embedName in sorted(pageMap.keys()): logDiag('fixupRefs: comparing', pi.name, 'to', embedName) embed = pageMap[embedName] - # Don't check embeddings which are themselves invalid + # Do not check embeddings which are themselves invalid if not embed.extractPage: logDiag('Skipping check for embedding in:', embed.name) continue @@ -373,9 +382,20 @@ def fixupRefs(pageMap, specFile, file): 'at line', pi.include) +def compatiblePageTypes(refpage_type, pagemap_type): + """Returns whether two refpage 'types' (categories) are compatible - + this is only true for 'consts' and 'enums' types.""" + + constsEnums = [ 'consts', 'enums' ] + + if refpage_type == pagemap_type: + return True + if refpage_type in constsEnums and pagemap_type in constsEnums: + return True + return False + # Patterns used to recognize interesting lines in an asciidoc source file. # These patterns are only compiled once. -INCSVAR_DEF = re.compile(r':INCS-VAR: (?P.*)') endifPat = re.compile(r'^endif::(?P[\w_+,]+)\[\]') beginPat = re.compile(r'^\[open,(?Prefpage=.*)\]') # attribute key/value pairs of an open block @@ -390,8 +410,7 @@ def fixupRefs(pageMap, specFile, file): # (category), and API name (entity_name). It could be put into the API # conventions object. INCLUDE = re.compile( - r'include::(?P((../){1,4}|\{INCS-VAR\}/|\{generated\}/)(generated/)?)(?P[\w]+)/(?P\w+)/(?P[^./]+).txt[\[][\]]') - + r'include::(?P((../){1,4}|\{generated\}/)(generated/)?)(?P[\w]+)/(?P\w+)/(?P[^./]+).adoc[\[][\]]') def findRefs(file, filename): """Identify reference pages in a list of strings, returning a dictionary of @@ -403,7 +422,7 @@ def findRefs(file, filename): # first detect the '[open,refpage=...]' markup delimiting the block; # skip past the '--' block delimiter on the next line; and identify the # '--' block delimiter closing the page. - # This can't be done solely with pattern matching, and requires state to + # This cannot be done solely with pattern matching, and requires state to # track 'inside/outside block'. # When looking for open blocks, possible states are: # 'outside' - outside a block @@ -420,26 +439,10 @@ def findRefs(file, filename): # Track the pageInfo object corresponding to the current open block pi = None - incsvar = None while (line < numLines): setLogLine(line) - # Look for a file-wide definition - matches = INCSVAR_DEF.match(file[line]) - if matches: - incsvar = matches.group('value') - logDiag('Matched INCS-VAR definition:', incsvar) - - line = line + 1 - continue - - # Perform INCS-VAR substitution immediately. - if incsvar and '{INCS-VAR}' in file[line]: - newLine = file[line].replace('{INCS-VAR}', incsvar) - logDiag('PERFORMING SUBSTITUTION', file[line], '->', newLine) - file[line] = newLine - # Only one of the patterns can possibly match. Add it to # the dictionary for that name. @@ -449,7 +452,7 @@ def findRefs(file, filename): logDiag('Matched open block pattern') attribs = matches.group('attribs') - # If the previous open block wasn't closed, raise an error + # If the previous open block was not closed, raise an error if openBlockState != 'outside': logErr('Nested open block starting at line', line, 'of', filename) @@ -551,7 +554,7 @@ def findRefs(file, filename): if gen_type == 'validity': logDiag('Matched validity pattern') if pi is not None: - if pi.type and refpage_type != pi.type: + if pi.type and not compatiblePageTypes(refpage_type, pi.type): logWarn('ERROR: pageMap[' + name + '] type:', pi.type, 'does not match type:', refpage_type) pi.type = refpage_type @@ -568,7 +571,7 @@ def findRefs(file, filename): if pi is not None: if pi.include is not None: logDiag('found multiple includes for this block') - if pi.type and refpage_type != pi.type: + if pi.type and not compatiblePageTypes(refpage_type, pi.type): logWarn('ERROR: pageMap[' + name + '] type:', pi.type, 'does not match type:', refpage_type) pi.type = refpage_type @@ -635,3 +638,27 @@ def findRefs(file, filename): setLogLine(None) return pageMap + + +def getBranch(): + """Determine current git branch + + Returns (branch name, ''), or (None, stderr output) if the branch name + cannot be determined""" + + command = [ 'git', 'symbolic-ref', '--short', 'HEAD' ] + results = subprocess.run(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + # git command failed + if len(results.stderr) > 0: + return (None, results.stderr) + + # Remove newline from output and convert to a string + branch = results.stdout.rstrip().decode() + if len(branch) > 0: + # Strip trailing newline + branch = results.stdout.decode()[0:-1] + + return (branch, '') diff --git a/adoc/scripts/reflow.py b/adoc/scripts/reflow.py index 7dda62e0..86f89fab 100755 --- a/adoc/scripts/reflow.py +++ b/adoc/scripts/reflow.py @@ -1,26 +1,27 @@ #!/usr/bin/python3 # -# Copyright (c) 2011-2023 The Khronos Group, Inc. +# Copyright 2016-2023 The Khronos Group Inc. +# # SPDX-License-Identifier: Apache-2.0 """Used for automatic reflow of spec sources to satisfy the agreed layout to -minimize git churn. Most of the logic has to do with detecting asciidoc -markup or block types that *shouldn't* be reflowed (tables, code) and -ignoring them. It's very likely there are many asciidoc constructs not yet -accounted for in the script, our usage of asciidoc markup is intentionally -somewhat limited. - -Also used to insert identifying tags on explicit Valid Usage statements. +minimize git churn. Also used to insert identifying tags on explicit Valid +Usage statements. Usage: `reflow.py [-noflow] [-tagvu] [-nextvu #] [-overwrite] [-out dir] [-suffix str] files` - `-noflow` acts as a passthrough, instead of reflowing text. Other processing may occur. - `-tagvu` generates explicit VUID tag for Valid Usage statements which - don't already have them. + do not already have them. - `-nextvu #` starts VUID tag generation at the specified # instead of the value wired into the `reflow.py` script. - `-overwrite` updates in place (can be risky, make sure there are backups) +- `-check FAIL|WARN` runs some consistency checks on markup. If the checks + fail and the WARN option is given, the script will simply print a warning + message. If the checks fail and the FAIL option is given, the script will + exit with an error code. FAIL is for use with continuous integration + scripts enforcing the checks. - `-out` specifies directory to create output file in, default 'out' - `-suffix` specifies suffix to add to output files, default '' - `files` are asciidoc source files from the spec to reflow. @@ -29,139 +30,73 @@ import argparse import os import re -import subprocess import sys -from reflib import loadFile, logDiag, logWarn, logErr, setLogFile -from vuidCounts import vuidCounts +from reflib import loadFile, logDiag, logWarn, logErr, setLogFile, getBranch +from pathlib import Path +import doctransformer # Vulkan-specific - will consolidate into scripts/ like OpenXR soon sys.path.insert(0, 'xml') -from vkconventions import VulkanConventions as APIConventions +from apiconventions import APIConventions conventions = APIConventions() -# Markup that always ends a paragraph -# empty line or whitespace -# [block options] -# [[anchor]] -# // comment -# <<<< page break -# :attribute-setting -# macro-directive::terms -# + standalone list item continuation -# label:: labelled list - label must be standalone -endPara = re.compile(r'^( *|\[.*\]|//.*|<<<<|:.*|[a-z]+::.*|\+|.*::)$') - -# Special case of markup ending a paragraph, used to track the current -# command/structure. This allows for either OpenXR or Vulkan API path -# conventions. Nominally it should use the file suffix defined by the API -# conventions (conventions.file_suffix), except that XR uses '.txt' for -# generated API include files, not '.adoc' like its other includes. -includePat = re.compile( - r'include::(?P((../){1,4}|\{INCS-VAR\}/|\{generated\}/)(generated/)?)(?P[\w]+)/(?P\w+)/(?P[^./]+).txt[\[][\]]') - -# Find the first pname: pattern in a Valid Usage statement -pnamePat = re.compile(r'pname:(?P\w+)') - -# Markup that's OK in a contiguous paragraph but otherwise passed through -# .anything -# === Section Titles -endParaContinue = re.compile(r'^(\..*|=+ .*)$') - -# Markup for block delimiters whose contents *should* be reformatted -# -- (exactly two) (open block) -# **** (4 or more) (sidebar block - why do we have these?!) -# ==== (4 or more) (example block) -# ____ (4 or more) (quote block) -blockReflow = re.compile(r'^(--|[*=_]{4,})$') - -# Fake block delimiters for "common" VU statements -blockCommonReflow = '// Common Valid Usage\n' - -# Markup for block delimiters whose contents should *not* be reformatted -# |=== (3 or more) (table) -# ++++ (4 or more) (passthrough block) -# .... (4 or more) (literal block) -# //// (4 or more) (comment block) -# ---- (4 or more) (listing block) -# ``` (3 or more) (listing block) -# **** (4 or more) (sidebar block) -blockPassthrough = re.compile(r'^(\|={3,}|[`]{3}|[-+./]{4,})$') - -# Markup for introducing lists (hanging paragraphs) -# * bullet -# ** bullet -# -- bullet -# . bullet -# :: bullet (no longer supported by asciidoctor 2) -# {empty}:: bullet -# 1. list item -beginBullet = re.compile(r'^ *([*-.]+|\{empty\}::|::|[0-9]+[.]) ') +# Patterns used to recognize interesting lines in an asciidoc source file. +# These patterns are only compiled once. + +# Find the pname: or code: patterns in a Valid Usage statement +pnamePat = re.compile(r'pname:(?P\{?\w+\}?)') +codePat = re.compile(r'code:(?P\w+)') # Text that (may) not end sentences # A single letter followed by a period, typically a middle initial. endInitial = re.compile(r'^[A-Z]\.$') -# An abbreviation, which doesn't (usually) end a line. -endAbbrev = re.compile(r'(e\.g|i\.e|c\.f|vs)\.$', re.IGNORECASE) +# An abbreviation, which does not (usually) end a line. +endAbbrev = re.compile(r'(e\.g|i\.e|c\.f|vs|co|ltd)\.$', re.IGNORECASE) + +# Explicit Valid Usage list item with one or more leading asterisks +# The re.DOTALL is needed to prevent vuPat.search() from stripping +# the trailing newline. +vuPat = re.compile(r'^(?P [*]+)( *)(?P.*)', re.DOTALL) + +# VUID with the numeric portion captured in the match object +vuidPat = re.compile(r'VUID-[^-]+-[^-]+-(?P[0-9]+)') -class ReflowState: - """State machine for reflowing. +# Pattern matching leading nested bullet points +global nestedVuPat +nestedVuPat = re.compile(r'^ \*\*') - Represents the state of the reflow operation""" +class ReflowCallbacks: + """State and arguments for reflowing. + + Used with DocTransformer to reflow a file.""" def __init__(self, filename, - margin = 76, - file = sys.stdout, + vuidDict, + margin = 80, breakPeriod = True, reflow = True, nextvu = None, - maxvu = None): + maxvu = None, + check = True): - self.blockStack = [ None ] - """The last element is a line with the asciidoc block delimiter that's currently in effect, - such as '--', '----', '****', '======', or '+++++++++'. - This affects whether or not the block contents should be formatted.""" + self.filename = filename + """base name of file being read from.""" - self.reflowStack = [ True ] - """The last element is True or False if the current blockStack contents - should be reflowed.""" - self.vuStack = [ False ] - """the last element is True or False if the current blockStack contents - are an explicit Valid Usage block.""" + self.check = check + """Whether consistency checks must be performed.""" self.margin = margin """margin to reflow text to.""" - self.para = [] - """list of lines in the paragraph being accumulated. - When this is non-empty, there is a current paragraph.""" - - self.lastTitle = False - """true if the previous line was a document title line - (e.g. :leveloffset: 0 - no attempt to track changes to this is made).""" - - self.leadIndent = 0 - """indent level (in spaces) of the first line of a paragraph.""" - - self.hangIndent = 0 - """indent level of the remaining lines of a paragraph.""" - - self.file = file - """file pointer to write to.""" - - self.filename = filename - """base name of file being read from.""" - - self.lineNumber = 0 - """line number being read from the input file.""" - self.breakPeriod = breakPeriod - """True if justification should break to a new line after the end of a sentence.""" + """True if justification should break to a new line after the end of a + sentence.""" self.breakInitial = True - """True if justification should break to a new line after - something that appears to be an initial in someone's name. **TBD**""" + """True if justification should break to a new line after something + that appears to be an initial in someone's name. **TBD**""" self.reflow = reflow """True if text should be reflowed, False to pass through unchanged.""" @@ -171,7 +106,8 @@ def __init__(self, self.vuFormat = '{0}-{1}-{2}-{3:0>5d}' """Format string for generating Valid Usage tags. - First argument is vuPrefix, second is command/struct name, third is parameter name, fourth is the tag number.""" + First argument is vuPrefix, second is command/struct name, third is + parameter name, fourth is the tag number.""" self.nextvu = nextvu """Integer to start tagging un-numbered Valid Usage statements with, @@ -181,23 +117,18 @@ def __init__(self, """Maximum tag to use for Valid Usage statements, or None if no tagging should be done.""" - self.apiName = '' - """String name of a Vulkan structure or command for VUID tag generation, - or None if one hasn't been included in this file yet.""" - - def incrLineNumber(self): - self.lineNumber = self.lineNumber + 1 + self.vuidDict = vuidDict + """Dictionary of VUID numbers found, containing a list of (file, VUID) + on which that number was found. This is used to warn on duplicate + VUIDs.""" - def printLines(self, lines): - """Print an array of lines with newlines already present""" - logDiag(':: printLines:', len(lines), 'lines: ', lines[0], end='') - for line in lines: - print(line, file=self.file, end='') + self.warnCount = 0 + """Count of markup check warnings encountered.""" def endSentence(self, word): """Return True if word ends with a sentence-period, False otherwise. - Allows for contraction cases which won't end a line: + Allows for contraction cases which will not end a line: - A single letter (if breakInitial is True) - Abbreviations: 'c.f.', 'e.g.', 'i.e.' (or mixed-case versions)""" @@ -212,12 +143,142 @@ def vuidAnchor(self, word): """Return True if word is a Valid Usage ID Tag anchor.""" return (word[0:7] == '[[VUID-') - def isOpenBlockDelimiter(self, line): - """Returns True if line is an open block delimiter.""" - return line[0:2] == '--' + def visitVUID(self, vuid, line): + if vuid not in self.vuidDict: + self.vuidDict[vuid] = [] + self.vuidDict[vuid].append([self.filename, line]) - def reflowPara(self): - """Reflow the current paragraph, respecting the paragraph lead and + def gatherVUIDs(self, para): + """Gather VUID tags and add them to vuidDict. Used to verify no-duplicate VUIDs""" + for line in para: + line = line.rstrip() + + matches = vuidPat.search(line) + if matches is not None: + vuid = matches.group('vuid') + self.visitVUID(vuid, line) + + def addVUID(self, para, state): + hangIndent = state.hangIndent + + """Generate and add VUID if necessary.""" + if not state.isVU or self.nextvu is None: + return para, hangIndent + + # If: + # - this paragraph is in a Valid Usage block, + # - VUID tags are being assigned, + # Try to assign VUIDs + + if nestedVuPat.search(para[0]): + # Do not assign VUIDs to nested bullet points. + # These are now allowed VU markup syntax, but will never + # themselves be VUs, just subsidiary points. + return para, hangIndent + + # Skip if there is already a VUID assigned + if self.vuPrefix in para[0]: + return para, hangIndent + + # If: + # - a tag is not already present, and + # - the paragraph is a properly marked-up list item + # Then add a VUID tag starting with the next free ID. + + # Split the first line after the bullet point + matches = vuPat.search(para[0]) + if matches is None: + # There are only a few cases of this, and they are all + # legitimate. Leave detecting this case to another tool + # or hand inspection. + # logWarn(self.filename + ': Unexpected non-bullet item in VU block (harmless if following an ifdef):', + # para[0]) + return para, hangIndent + + outPara = para + + logDiag('addVUID: Matched vuPat on line:', para[0], end='') + head = matches.group('head') + tail = matches.group('tail') + + # Find pname: or code: tags in the paragraph for the purposes of VUID + # tag generation. pname:{attribute}s are prioritized to make sure + # commonvalidity VUIDs end up being unique. Otherwise, the first pname: + # or code: tag in the paragraph is used, which may not always be + # correct, but should be highly reliable. + pnameMatches = re.findall(pnamePat, ' '.join(para)) + codeMatches = re.findall(codePat, ' '.join(para)) + + # Prioritize {attribute}s, but not the ones in the exception list + # below. These have complex expressions including ., ->, or [index] + # which makes them unsuitable for VUID tags. Ideally these would be + # automatically discovered. + attributeExceptionList = ['maxinstancecheck', 'regionsparam', + 'rayGenShaderBindingTableAddress', + 'rayGenShaderBindingTableStride', + 'missShaderBindingTableAddress', + 'missShaderBindingTableStride', + 'hitShaderBindingTableAddress', + 'hitShaderBindingTableStride', + 'callableShaderBindingTableAddress', + 'callableShaderBindingTableStride', + ] + attributeMatches = [match for match in pnameMatches if + match[0] == '{' and + match[1:-1] not in attributeExceptionList] + nonattributeMatches = [match for match in pnameMatches if + match[0] != '{'] + + if len(attributeMatches) > 0: + paramName = attributeMatches[0] + elif len(nonattributeMatches) > 0: + paramName = nonattributeMatches[0] + elif len(codeMatches) > 0: + paramName = codeMatches[0] + else: + paramName = 'None' + logWarn(self.filename, + 'No param name found for VUID tag on line:', + para[0]) + + # Transform: + # + # * VU first line + # + # To: + # + # * [[VUID]] + # VU first line + # + tagLine = (head + ' [[' + + self.vuFormat.format(self.vuPrefix, + state.apiName, + paramName, + self.nextvu) + ']]\n') + self.visitVUID(str(self.nextvu), tagLine) + + newLines = [tagLine] + if tail.strip() != '': + logDiag('transformParagraph first line matches bullet point -' + 'single line, assuming hangIndent @ input line', + state.lineNumber) + hangIndent = len(head) + 1 + newLines.append(''.ljust(hangIndent) + tail) + + logDiag('Assigning', self.vuPrefix, state.apiName, self.nextvu, + ' on line:\n' + para[0], '->\n' + newLines[0] + 'END', '\n' + newLines[1] if len(newLines) > 1 else '') + + # Do not actually assign the VUID unless it is in the reserved range + if self.nextvu <= self.maxvu: + if self.nextvu == self.maxvu: + logWarn('Skipping VUID assignment, no more VUIDs available') + outPara = newLines + para[1:] + self.nextvu = self.nextvu + 1 + + return outPara, hangIndent + + def transformParagraph(self, para, state): + """Reflow a given paragraph, respecting the paragraph lead and hanging indentation levels. The algorithm also respects trailing '+' signs that indicate embedded newlines, @@ -225,12 +286,18 @@ def reflowPara(self): Just return the paragraph unchanged if the -noflow argument was given.""" + + self.gatherVUIDs(para) + + # If this is a VU that is missing a VUID, add it to the paragraph now. + para, hangIndent = self.addVUID(para, state) + if not self.reflow: - return self.para + return para - logDiag('reflowPara lead indent = ', self.leadIndent, - 'hangIndent =', self.hangIndent, - 'para:', self.para[0], end='') + logDiag('transformParagraph lead indent = ', state.leadIndent, + 'hangIndent =', state.hangIndent, + 'para:', para[0], end='') # Total words processed (we care about the *first* word vs. others) wordCount = 0 @@ -238,13 +305,15 @@ def reflowPara(self): # Tracks the *previous* word processed. It must not be empty. prevWord = ' ' - #import pdb; pdb.set_trace() + # Track the previous line and paragraph being indented, if any + outLine = None + outPara = [] - for line in self.para: + for line in para: line = line.rstrip() words = line.split() - # logDiag('reflowPara: input line =', line) + # logDiag('transformParagraph: input line =', line) numWords = len(words) - 1 for i in range(0, numWords + 1): @@ -253,13 +322,15 @@ def reflowPara(self): wordCount += 1 endEscape = False - if i == numWords and word == '+': - # Trailing ' +' must stay on the same line + if i == numWords and word in ('+', '-'): + # Trailing ' +' or ' -' must stay on the same line endEscape = word - # logDiag('reflowPara last word of line =', word, 'prevWord =', prevWord, 'endEscape =', endEscape) + # logDiag('transformParagraph last word of line =', word, + # 'prevWord =', prevWord, 'endEscape =', endEscape) else: + # logDiag('transformParagraph wordCount =', wordCount, + # 'word =', word, 'prevWord =', prevWord) pass - # logDiag('reflowPara wordCount =', wordCount, 'word =', word, 'prevWord =', prevWord) if wordCount == 1: # The first word of the paragraph is treated specially. @@ -267,23 +338,22 @@ def reflowPara(self): # done prior to looping over lines and words, so all the # setup logic is done here. - outPara = [] - outLine = ''.ljust(self.leadIndent) + word - outLineLen = self.leadIndent + wordLen + outLine = ''.ljust(state.leadIndent) + word + outLineLen = state.leadIndent + wordLen # If the paragraph begins with a bullet point, generate - # a hanging indent level if there isn't one already. - if beginBullet.match(self.para[0]): + # a hanging indent level if there is not one already. + if doctransformer.beginBullet.match(para[0]): bulletPoint = True - if len(self.para) > 1: - logDiag('reflowPara first line matches bullet point', + if len(para) > 1: + logDiag('transformParagraph first line matches bullet point', 'but indent already hanging @ input line', - self.lineNumber) + state.lineNumber) else: - logDiag('reflowPara first line matches bullet point -' + logDiag('transformParagraph first line matches bullet point -' 'single line, assuming hangIndent @ input line', - self.lineNumber) - self.hangIndent = outLineLen + 1 + state.lineNumber) + hangIndent = outLineLen + 1 else: bulletPoint = False else: @@ -312,14 +382,22 @@ def reflowPara(self): elif self.vuidAnchor(word): # If the new word is a Valid Usage anchor, break the # line afterwards. Note that this should only happen - # immediately after a bullet point, but we don't + # immediately after a bullet point, but we do not # currently check for this. (addWord, closeLine, startLine) = (True, True, False) + elif newLen > self.margin: if firstBullet: # If the word follows a bullet point, add it to # the current line no matter its length. + (addWord, closeLine, startLine) = (True, True, False) + elif doctransformer.beginBullet.match(word + ' '): + # If the word *is* a bullet point, add it to + # the current line no matter its length. + # This avoids an innocent inline '-' or '*' + # turning into a bogus bullet point. + (addWord, closeLine, startLine) = (True, True, False) else: # The word overflows, so add it to a new line. @@ -342,12 +420,12 @@ def reflowPara(self): outLine += ' ' + word outLineLen = newLen else: - # Fall through to startLine case if there's no + # Fall through to startLine case if there is no # current line yet. startLine = True # Add current line to the output paragraph. Force - # starting a new line, although we don't yet know if it + # starting a new line, although we do not yet know if it # will ever have contents. if closeLine: if outLine: @@ -356,334 +434,77 @@ def reflowPara(self): # Start a new line and add a word to it if startLine: - outLine = ''.ljust(self.hangIndent) + word - outLineLen = self.hangIndent + wordLen + outLine = ''.ljust(hangIndent) + word + outLineLen = hangIndent + wordLen # Track the previous word, for use in breaking at end of # a sentence prevWord = word - # Add this line to the output paragraph. + # Add last line to the output paragraph. if outLine: outPara.append(outLine + '\n') return outPara - def emitPara(self): - """Emit a paragraph, possibly reflowing it depending on the block context. - - Resets the paragraph accumulator.""" - if self.para != []: - if self.vuStack[-1] and self.nextvu is not None: - # If: - # - this paragraph is in a Valid Usage block, - # - VUID tags are being assigned, - # Try to assign VUIDs - - if nestedVuPat.search(self.para[0]): - # Check for nested bullet points. These should not be - # assigned VUIDs, nor present at all, because they break - # the VU extractor. - logWarn(self.filename + ': Invalid nested bullet point in VU block:', self.para[0]) - elif self.vuPrefix not in self.para[0]: - # If: - # - a tag is not already present, and - # - the paragraph is a properly marked-up list item - # Then add a VUID tag starting with the next free ID. - - # Split the first line after the bullet point - matches = vuPat.search(self.para[0]) - if matches is not None: - logDiag('findRefs: Matched vuPat on line:', self.para[0], end='') - head = matches.group('head') - tail = matches.group('tail') - - # Use the first pname: statement in the paragraph as - # the parameter name in the VUID tag. This won't always - # be correct, but should be highly reliable. - for vuLine in self.para: - matches = pnamePat.search(vuLine) - if matches is not None: - break - - if matches is not None: - paramName = matches.group('param') - else: - paramName = 'None' - logWarn(self.filename, - 'No param name found for VUID tag on line:', - self.para[0]) - - newline = (head + ' [[' + - self.vuFormat.format(self.vuPrefix, - self.apiName, - paramName, - self.nextvu) + ']] ' + tail) - - logDiag('Assigning', self.vuPrefix, self.apiName, self.nextvu, - ' on line:', self.para[0], '->', newline, 'END') - - # Don't actually assign the VUID unless it's in the reserved range - if self.nextvu <= self.maxvu: - if self.nextvu == self.maxvu: - logWarn('Skipping VUID assignment, no more VUIDs available') - self.para[0] = newline - self.nextvu = self.nextvu + 1 - # else: - # There are only a few cases of this, and they're all - # legitimate. Leave detecting this case to another tool - # or hand inspection. - # logWarn(self.filename + ': Unexpected non-bullet item in VU block (harmless if following an ifdef):', - # self.para[0]) - - if self.reflowStack[-1]: - self.printLines(self.reflowPara()) - else: - self.printLines(self.para) - - # Reset the paragraph, including its indentation level - self.para = [] - self.leadIndent = 0 - self.hangIndent = 0 - - def endPara(self, line): - """'line' ends a paragraph and should itself be emitted. - line may be None to indicate EOF or other exception.""" - logDiag('endPara line', self.lineNumber, ': emitting paragraph') - - # Emit current paragraph, this line, and reset tracker - self.emitPara() - - if line: - self.printLines( [ line ] ) - - def endParaContinue(self, line): - """'line' ends a paragraph (unless there's already a paragraph being - accumulated, e.g. len(para) > 0 - currently not implemented)""" - self.endPara(line) - - def endBlock(self, line, reflow = False, vuBlock = False): - """'line' begins or ends a block. - - If beginning a block, tag whether or not to reflow the contents. - - vuBlock is True if the previous line indicates this is a Valid Usage block.""" - self.endPara(line) - - if self.blockStack[-1] == line: - logDiag('endBlock line', self.lineNumber, - ': popping block end depth:', len(self.blockStack), - ':', line, end='') - - # Reset apiName at the end of an open block. - # Open blocks cannot be nested, so this is safe. - if self.isOpenBlockDelimiter(line): - logDiag('reset apiName to empty at line', self.lineNumber) - self.apiName = '' - else: - logDiag('NOT resetting apiName to empty at line', self.lineNumber) - - self.blockStack.pop() - self.reflowStack.pop() - self.vuStack.pop() - else: - # Start a block - self.blockStack.append(line) - self.reflowStack.append(reflow) - self.vuStack.append(vuBlock) - - logDiag('endBlock reflow =', reflow, ' line', self.lineNumber, - ': pushing block start depth', len(self.blockStack), - ':', line, end='') - - def endParaBlockReflow(self, line, vuBlock): - """'line' begins or ends a block. The paragraphs in the block *should* be - reformatted (e.g. a NOTE).""" - self.endBlock(line, reflow = True, vuBlock = vuBlock) - - def endParaBlockPassthrough(self, line): - """'line' begins or ends a block. The paragraphs in the block should - *not* be reformatted (e.g. a code listing).""" - self.endBlock(line, reflow = False) - - def addLine(self, line): - """'line' starts or continues a paragraph. - - Paragraphs may have "hanging indent", e.g. - - ``` - * Bullet point... - ... continued - ``` - - In this case, when the higher indentation level ends, so does the - paragraph.""" - logDiag('addLine line', self.lineNumber, ':', line, end='') - - # See https://stackoverflow.com/questions/13648813/what-is-the-pythonic-way-to-count-the-leading-spaces-in-a-string - indent = len(line) - len(line.lstrip()) - - # A hanging paragraph ends due to a less-indented line. - if self.para != [] and indent < self.hangIndent: - logDiag('addLine: line reduces indentation, emit paragraph') - self.emitPara() - - # A bullet point (or something that looks like one) always ends the - # current paragraph. - if beginBullet.match(line): - logDiag('addLine: line matches beginBullet, emit paragraph') - self.emitPara() - - if self.para == []: - # Begin a new paragraph - self.para = [ line ] - self.leadIndent = indent - self.hangIndent = indent - else: - # Add a line to a paragraph. Increase the hanging indentation - # level - once. - if self.hangIndent == self.leadIndent: - self.hangIndent = indent - self.para.append(line) - -def apiMatch(oldname, newname): - """Returns whether oldname and newname match, up to an API suffix.""" - upper = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - return oldname.rstrip(upper) == newname.rstrip(upper) + def onEmbeddedVUConditional(self, state): + if self.check: + logWarn('Detected embedded Valid Usage conditional: {}:{}'.format( + self.filename, state.lineNumber - 1)) + # Keep track of warning check count + self.warnCount = self.warnCount + 1 def reflowFile(filename, args): logDiag('reflow: filename', filename) - lines = loadFile(filename) - if lines is None: - return - # Output file handle and reflow object for this file. There are no race - # conditions on overwriting the input, but it's not recommended unless + # conditions on overwriting the input, but it is not recommended unless # you have backing store such as git. + lines, newline_string = loadFile(filename) + if lines is None: + return + if args.overwrite: outFilename = filename else: - outFilename = args.outDir + '/' + os.path.basename(filename) + args.suffix - - try: - fp = open(outFilename, 'w', encoding='utf8') - except: - logWarn('Cannot open output file', filename, ':', sys.exc_info()[0]) - return - - state = ReflowState(filename, - file = fp, - reflow = not args.noflow, - nextvu = args.nextvu, - maxvu = args.maxvu) - - for line in lines: - state.incrLineNumber() - - # Is this a title line (leading '= ' followed by text)? - thisTitle = False - - # The logic here is broken. If we're in a non-reflowable block and - # this line *doesn't* end the block, it should always be - # accumulated. - - # Test for a blockCommonReflow delimiter comment first, to avoid - # treating it solely as a end-Paragraph marker comment. - if line == blockCommonReflow: - # Starting or ending a pseudo-block for "common" VU statements. - - # Common VU statements use an Asciidoc variable as the apiName, - # instead of inferring it from the most recent API include. - state.apiName = '{refpage}' - state.endParaBlockReflow(line, vuBlock = True) - - elif blockReflow.match(line): - # Starting or ending a block whose contents may be reflowed. - # Blocks cannot be nested. - - # Is this is an explicit Valid Usage block? - vuBlock = (state.lineNumber > 1 and - lines[state.lineNumber-2] == '.Valid Usage\n') - - state.endParaBlockReflow(line, vuBlock) - - elif endPara.match(line): - # Ending a paragraph. Emit the current paragraph, if any, and - # prepare to begin a new paragraph. - - state.endPara(line) - - # If this is an include:: line starting the definition of a - # structure or command, track that for use in VUID generation. - - matches = includePat.search(line) - if matches is not None: - generated_type = matches.group('generated_type') - include_type = matches.group('category') - if generated_type == 'api' and include_type in ('protos', 'structs'): - apiName = matches.group('entity_name') - if state.apiName != '': - # This happens when there are multiple API include - # lines in a single block. The style guideline is to - # always place the API which others are promoted to - # first. In virtually all cases, the promoted API - # will differ solely in the vendor suffix (or - # absence of it), which is benign. - if not apiMatch(state.apiName, apiName): - logWarn('Promoted API name mismatch at line', - state.lineNumber, - ':', - 'apiName:', apiName, - 'does not match state.apiName:', - state.apiName) - else: - state.apiName = apiName - - elif endParaContinue.match(line): - # For now, always just end the paragraph. - # Could check see if len(para) > 0 to accumulate. - - state.endParaContinue(line) - - # If it's a title line, track that - if line[0:2] == '= ': - thisTitle = True + outDir = Path(args.outDir).resolve() + outDir.mkdir(parents=True, exist_ok=True) - elif blockPassthrough.match(line): - # Starting or ending a block whose contents must not be reflowed. - # These are tables, etc. Blocks cannot be nested. + outFilename = str(outDir / (os.path.basename(filename) + args.suffix)) - state.endParaBlockPassthrough(line) - elif state.lastTitle: - # The previous line was a document title line. This line - # is the author / credits line and must not be reflowed. - - state.endPara(line) - else: - # Just accumulate a line to the current paragraph. Watch out for - # hanging indents / bullet-points and track that indent level. - - state.addLine(line) + if args.nowrite: + fp = None + else: + try: + fp = open(outFilename, 'w', encoding='utf8', newline=newline_string) + except: + logWarn('Cannot open output file', outFilename, ':', sys.exc_info()[0]) + return - state.lastTitle = thisTitle + callback = ReflowCallbacks(filename, + args.vuidDict, + margin = args.margin, + reflow = not args.noflow, + nextvu = args.nextvu, + maxvu = args.maxvu, + check = args.check) - # Cleanup at end of file - state.endPara(None) + transformer = doctransformer.DocTransformer(filename, + outfile = fp, + callback = callback) - # Sanity check on block nesting - if len(state.blockStack) > 1: - logWarn('file', filename, - 'mismatched asciidoc block delimiters at EOF:', - state.blockStack[-1]) + transformer.transformFile(lines) - fp.close() + if fp is not None: + fp.close() # Update the 'nextvu' value - if args.nextvu != state.nextvu: - logWarn('Updated nextvu to', state.nextvu, 'after file', filename) - args.nextvu = state.nextvu + if args.nextvu != callback.nextvu: + logWarn('Updated nextvu to', callback.nextvu, 'after file', filename) + args.nextvu = callback.nextvu + + args.warnCount += callback.warnCount def reflowAllAdocFiles(folder_to_reflow, args): for root, subdirs, files in os.walk(folder_to_reflow): @@ -700,18 +521,6 @@ def reflowAllAdocFiles(folder_to_reflow, args): else: print(' Skipping = %s' % sub_folder) -# Patterns used to recognize interesting lines in an asciidoc source file. -# These patterns are only compiled once. - -# Explicit Valid Usage list item with one or more leading asterisks -# The re.DOTALL is needed to prevent vuPat.search() from stripping -# the trailing newline. -vuPat = re.compile(r'^(?P [*]+)( *)(?P.*)', re.DOTALL) - -# Pattern matching leading nested bullet points -global nestedVuPat -nestedVuPat = re.compile(r'^ \*\*') - if __name__ == '__main__': parser = argparse.ArgumentParser() @@ -726,18 +535,27 @@ def reflowAllAdocFiles(folder_to_reflow, args): parser.add_argument('-out', action='store', dest='outDir', default='out', help='Set the output directory in which updated files are generated (default: out)') + parser.add_argument('-nowrite', action='store_true', + help='Do not write output files, for use with -check') + parser.add_argument('-check', action='store', dest='check', + help='Run markup checks and warn if WARN option is given, error exit if FAIL option is given') + parser.add_argument('-checkVUID', action='store', dest='checkVUID', + help='Detect duplicated VUID numbers and warn if WARN option is given, error exit if FAIL option is given') parser.add_argument('-tagvu', action='store_true', help='Tag un-tagged Valid Usage statements starting at the value wired into reflow.py') parser.add_argument('-nextvu', action='store', dest='nextvu', type=int, default=None, - help='Specify start VUID to use instead of the value wired into vuidCounts.py') + help='Tag un-tagged Valid Usage statements starting at the specified base VUID instead of the value wired into reflow.py') parser.add_argument('-maxvu', action='store', dest='maxvu', type=int, default=None, help='Specify maximum VUID instead of the value wired into vuidCounts.py') parser.add_argument('-branch', action='store', dest='branch', - help='Specify branch to assign VUIDs for.') + help='Specify branch to assign VUIDs for') parser.add_argument('-noflow', action='store_true', dest='noflow', - help='Do not reflow text. Other actions may apply.') + help='Do not reflow text. Other actions may apply') + parser.add_argument('-margin', action='store', type=int, dest='margin', + default='80', + help='Width to reflow text, defaults to 80 characters') parser.add_argument('-suffix', action='store', dest='suffix', default='', help='Set the suffix added to updated file names (default: none)') @@ -754,23 +572,19 @@ def reflowAllAdocFiles(folder_to_reflow, args): if args.overwrite: logWarn("reflow.py: will overwrite all input files") + errors = '' + if args.branch is None: + (args.branch, errors) = getBranch() if args.branch is None: - # Determine current git branch - command = [ 'git', 'symbolic-ref', '--short', 'HEAD' ] - results = subprocess.run(command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - if len(results.stderr) > 0: - logErr('Cannot determine current git branch:', results.stderr) - - # Remove newline from output and convert to a string - branch = results.stdout.rstrip().decode() - if len(branch) > 0: - # Strip trailing newline - branch = results.stdout.decode()[0:-1] - args.branch = branch + # This is not fatal unless VUID assignment is required + if args.tagvu: + logErr('Cannot determine current git branch, so cannot assign VUIDs:', errors) if args.tagvu and args.nextvu is None: + # Moved here since vuidCounts is only needed in the internal + # repository + from vuidCounts import vuidCounts + if args.branch not in vuidCounts: logErr('Branch', args.branch, 'not in vuidCounts, cannot continue') maxVUID = vuidCounts[args.branch][1] @@ -781,6 +595,15 @@ def reflowAllAdocFiles(folder_to_reflow, args): if args.nextvu is not None: logWarn('Tagging untagged Valid Usage statements starting at', args.nextvu) + # Count of markup check warnings encountered + # This is added to the argparse structure + args.warnCount = 0 + + # Dictionary of VUID numbers found, containing a list of (file, VUID) on + # which that number was found + # This is added to the argparse structure + args.vuidDict = {} + # If no files are specified, reflow the entire specification chapters folder if not args.files: folder_to_reflow = conventions.spec_reflow_path @@ -790,6 +613,35 @@ def reflowAllAdocFiles(folder_to_reflow, args): for file in args.files: reflowFile(file, args) + if args.warnCount > 0: + if args.check == 'FAIL': + logErr('Failed with', args.warnCount, 'markup errors detected.\n' + + 'To fix these, you can take actions such as:\n' + + ' * Moving conditionals outside VU start / end without changing VU meaning\n' + + ' * Refactor conditional text using terminology defined conditionally outside the VU itself\n' + + ' * Remove the conditional (allowable when this just affects command / structure / enum names)\n') + else: + logWarn('Total warning count for markup issues is', args.warnCount) + + # Look for duplicated VUID numbers + if args.checkVUID: + dupVUIDs = 0 + for vuid in sorted(args.vuidDict): + found = args.vuidDict[vuid] + if len(found) > 1: + logWarn('Duplicate VUID number {} found in files:'.format(vuid)) + for (file, vuidLine) in found: + logWarn(' {}: {}'.format(file, vuidLine)) + dupVUIDs = dupVUIDs + 1 + + if dupVUIDs > 0: + if args.checkVUID == 'FAIL': + logErr('Failed with', dupVUIDs, 'duplicated VUID numbers found.\n' + + 'To fix this, either convert these to commonvalidity VUs if possible, or strip\n' + + 'the VUIDs from all but one of the duplicates and regenerate new ones.') + else: + logWarn('Total number of duplicated VUID numbers is', dupVUIDs) + if args.nextvu is not None and args.nextvu != startVUID: # Update next free VUID to assign vuidCounts[args.branch][2] = args.nextvu @@ -797,16 +649,14 @@ def reflowAllAdocFiles(folder_to_reflow, args): reflow_count_file_path = os.path.dirname(os.path.realpath(__file__)) reflow_count_file_path += '/vuidCounts.py' reflow_count_file = open(reflow_count_file_path, 'w', encoding='utf8') - print('# Do not edit this file!', file=reflow_count_file) + print('# Do not edit this file, unless reserving a new VUID range', file=reflow_count_file) print('# VUID ranges reserved for branches', file=reflow_count_file) print('# Key is branch name, value is [ start, end, nextfree ]', file=reflow_count_file) + print('# New reservations must be made by MR to main branch', file=reflow_count_file) print('vuidCounts = {', file=reflow_count_file) - for key in sorted(vuidCounts): - print(" '{}': [ {}, {}, {} ],".format( - key, - vuidCounts[key][0], - vuidCounts[key][1], - vuidCounts[key][2]), + for key in sorted(vuidCounts.keys(), key=lambda k: vuidCounts[k][0]): + counts = vuidCounts[key] + print(f" '{key}': [ {counts[0]}, {counts[1]}, {counts[2]} ],", file=reflow_count_file) print('}', file=reflow_count_file) reflow_count_file.close() diff --git a/adoc/scripts/spec_tools/__init__.py b/adoc/scripts/spec_tools/__init__.py new file mode 100644 index 00000000..34c01f39 --- /dev/null +++ b/adoc/scripts/spec_tools/__init__.py @@ -0,0 +1,7 @@ +#!/usr/bin/python3 -i +# +# Copyright (c) 2018-2019 Collabora, Ltd. +# +# SPDX-License-Identifier: Apache-2.0 +# +# Author(s): Ryan Pavlik diff --git a/adoc/scripts/spec_tools/conventions.py b/adoc/scripts/spec_tools/conventions.py new file mode 100644 index 00000000..faca3a27 --- /dev/null +++ b/adoc/scripts/spec_tools/conventions.py @@ -0,0 +1,454 @@ +#!/usr/bin/python3 -i +# +# Copyright 2013-2023 The Khronos Group Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +# Base class for working-group-specific style conventions, +# used in generation. + +from enum import Enum +import abc +import re + +# Type categories that respond "False" to isStructAlwaysValid +# basetype is home to typedefs like ..Bool32 +CATEGORIES_REQUIRING_VALIDATION = set(('handle', + 'enum', + 'bitmask', + 'basetype', + None)) + +# These are basic C types pulled in via openxr_platform_defines.h +TYPES_KNOWN_ALWAYS_VALID = set(('char', + 'float', + 'int8_t', 'uint8_t', + 'int16_t', 'uint16_t', + 'int32_t', 'uint32_t', + 'int64_t', 'uint64_t', + 'size_t', + 'intptr_t', 'uintptr_t', + 'int', + )) + +# Split an extension name into vendor ID and name portions +EXT_NAME_DECOMPOSE_RE = re.compile(r'[A-Z]+_(?P[A-Z]+)_(?P[\w_]+)') + +# Match an API version name. +# This could be refined further for specific APIs. +API_VERSION_NAME_RE = re.compile(r'[A-Z]+_VERSION_[0-9]') + + +class ProseListFormats(Enum): + """A connective, possibly with a quantifier.""" + AND = 0 + EACH_AND = 1 + OR = 2 + ANY_OR = 3 + + @classmethod + def from_string(cls, s): + if s == 'or': + return cls.OR + if s == 'and': + return cls.AND + raise RuntimeError("Unrecognized string connective: " + s) + + @property + def connective(self): + if self in (ProseListFormats.OR, ProseListFormats.ANY_OR): + return 'or' + return 'and' + + def quantifier(self, n): + """Return the desired quantifier for a list of a given length.""" + if self == ProseListFormats.ANY_OR: + if n > 1: + return 'any of ' + elif self == ProseListFormats.EACH_AND: + if n > 2: + return 'each of ' + if n == 2: + return 'both of ' + return '' + + +class ConventionsBase(abc.ABC): + """WG-specific conventions.""" + + def __init__(self): + self._command_prefix = None + self._type_prefix = None + + def formatExtension(self, name): + """Mark up an extension name as a link the spec.""" + return '`<<{}>>`'.format(name) + + @property + @abc.abstractmethod + def null(self): + """Preferred spelling of NULL.""" + raise NotImplementedError + + def makeProseList(self, elements, fmt=ProseListFormats.AND, with_verb=False, *args, **kwargs): + """Make a (comma-separated) list for use in prose. + + Adds a connective (by default, 'and') + before the last element if there are more than 1. + + Adds the right one of "is" or "are" to the end if with_verb is true. + + Optionally adds a quantifier (like 'any') before a list of 2 or more, + if specified by fmt. + + Override with a different method or different call to + _implMakeProseList if you want to add a comma for two elements, + or not use a serial comma. + """ + return self._implMakeProseList(elements, fmt, with_verb, *args, **kwargs) + + @property + def struct_macro(self): + """Get the appropriate format macro for a structure. + + May override. + """ + return 'slink:' + + @property + def external_macro(self): + """Get the appropriate format macro for an external type like uint32_t. + + May override. + """ + return 'code:' + + @property + @abc.abstractmethod + def structtype_member_name(self): + """Return name of the structure type member. + + Must implement. + """ + raise NotImplementedError() + + @property + @abc.abstractmethod + def nextpointer_member_name(self): + """Return name of the structure pointer chain member. + + Must implement. + """ + raise NotImplementedError() + + @property + @abc.abstractmethod + def xml_api_name(self): + """Return the name used in the default API XML registry for the default API""" + raise NotImplementedError() + + @abc.abstractmethod + def generate_structure_type_from_name(self, structname): + """Generate a structure type name, like XR_TYPE_CREATE_INSTANCE_INFO. + + Must implement. + """ + raise NotImplementedError() + + def makeStructName(self, name): + """Prepend the appropriate format macro for a structure to a structure type name. + + Uses struct_macro, so just override that if you want to change behavior. + """ + return self.struct_macro + name + + def makeExternalTypeName(self, name): + """Prepend the appropriate format macro for an external type like uint32_t to a type name. + + Uses external_macro, so just override that if you want to change behavior. + """ + return self.external_macro + name + + def _implMakeProseList(self, elements, fmt, with_verb, comma_for_two_elts=False, serial_comma=True): + """Internal-use implementation to make a (comma-separated) list for use in prose. + + Adds a connective (by default, 'and') + before the last element if there are more than 1, + and only includes commas if there are more than 2 + (if comma_for_two_elts is False). + + Adds the right one of "is" or "are" to the end if with_verb is true. + + Optionally adds a quantifier (like 'any') before a list of 2 or more, + if specified by fmt. + + Do not edit these defaults, override self.makeProseList(). + """ + assert(serial_comma) # did not implement what we did not need + if isinstance(fmt, str): + fmt = ProseListFormats.from_string(fmt) + + my_elts = list(elements) + if len(my_elts) > 1: + my_elts[-1] = '{} {}'.format(fmt.connective, my_elts[-1]) + + if not comma_for_two_elts and len(my_elts) <= 2: + prose = ' '.join(my_elts) + else: + prose = ', '.join(my_elts) + + quantifier = fmt.quantifier(len(my_elts)) + + parts = [quantifier, prose] + + if with_verb: + if len(my_elts) > 1: + parts.append(' are') + else: + parts.append(' is') + return ''.join(parts) + + @property + @abc.abstractmethod + def file_suffix(self): + """Return suffix of generated Asciidoctor files""" + raise NotImplementedError + + @abc.abstractmethod + def api_name(self, spectype=None): + """Return API or specification name for citations in ref pages. + + spectype is the spec this refpage is for. + 'api' (the default value) is the main API Specification. + If an unrecognized spectype is given, returns None. + + Must implement.""" + raise NotImplementedError + + def should_insert_may_alias_macro(self, genOpts): + """Return true if we should insert a "may alias" macro in this file. + + Only used by OpenXR right now.""" + return False + + @property + def command_prefix(self): + """Return the expected prefix of commands/functions. + + Implemented in terms of api_prefix.""" + if not self._command_prefix: + self._command_prefix = self.api_prefix[:].replace('_', '').lower() + return self._command_prefix + + @property + def type_prefix(self): + """Return the expected prefix of type names. + + Implemented in terms of command_prefix (and in turn, api_prefix).""" + if not self._type_prefix: + self._type_prefix = ''.join( + (self.command_prefix[0:1].upper(), self.command_prefix[1:])) + return self._type_prefix + + @property + @abc.abstractmethod + def api_prefix(self): + """Return API token prefix. + + Typically two uppercase letters followed by an underscore. + + Must implement.""" + raise NotImplementedError + + @property + def api_version_prefix(self): + """Return API core version token prefix. + + Implemented in terms of api_prefix. + + May override.""" + return self.api_prefix + 'VERSION_' + + @property + def KHR_prefix(self): + """Return extension name prefix for KHR extensions. + + Implemented in terms of api_prefix. + + May override.""" + return self.api_prefix + 'KHR_' + + @property + def EXT_prefix(self): + """Return extension name prefix for EXT extensions. + + Implemented in terms of api_prefix. + + May override.""" + return self.api_prefix + 'EXT_' + + def writeFeature(self, featureExtraProtect, filename): + """Return True if OutputGenerator.endFeature should write this feature. + + Defaults to always True. + Used in COutputGenerator. + + May override.""" + return True + + def requires_error_validation(self, return_type): + """Return True if the return_type element is an API result code + requiring error validation. + + Defaults to always False. + + May override.""" + return False + + @property + def required_errors(self): + """Return a list of required error codes for validation. + + Defaults to an empty list. + + May override.""" + return [] + + def is_voidpointer_alias(self, tag, text, tail): + """Return True if the declaration components (tag,text,tail) of an + element represents a void * type. + + Defaults to a reasonable implementation. + + May override.""" + return tag == 'type' and text == 'void' and tail.startswith('*') + + def make_voidpointer_alias(self, tail): + """Reformat a void * declaration to include the API alias macro. + + Defaults to a no-op. + + Must override if you actually want to use this feature in your project.""" + return tail + + def category_requires_validation(self, category): + """Return True if the given type 'category' always requires validation. + + Defaults to a reasonable implementation. + + May override.""" + return category in CATEGORIES_REQUIRING_VALIDATION + + def type_always_valid(self, typename): + """Return True if the given type name is always valid (never requires validation). + + This is for things like integers. + + Defaults to a reasonable implementation. + + May override.""" + return typename in TYPES_KNOWN_ALWAYS_VALID + + @property + def should_skip_checking_codes(self): + """Return True if more than the basic validation of return codes should + be skipped for a command.""" + + return False + + @property + def generate_index_terms(self): + """Return True if asiidoctor index terms should be generated as part + of an API interface from the docgenerator.""" + + return False + + @property + def generate_enum_table(self): + """Return True if asciidoctor tables describing enumerants in a + group should be generated as part of group generation.""" + return False + + @property + def generate_max_enum_in_docs(self): + """Return True if MAX_ENUM tokens should be generated in + documentation includes.""" + return False + + @abc.abstractmethod + def extension_file_path(self, name): + """Return file path to an extension appendix relative to a directory + containing all such appendices. + - name - extension name + + Must implement.""" + raise NotImplementedError + + def extension_include_string(self, name): + """Return format string for include:: line for an extension appendix + file. + - name - extension name""" + + return 'include::{{appendices}}/{}[]'.format( + self.extension_file_path(name)) + + @property + def provisional_extension_warning(self): + """Return True if a warning should be included in extension + appendices for provisional extensions.""" + return True + + @property + def generated_include_path(self): + """Return path relative to the generated reference pages, to the + generated API include files.""" + + return '{generated}' + + @property + def include_extension_appendix_in_refpage(self): + """Return True if generating extension refpages by embedding + extension appendix content (default), False otherwise + (OpenXR).""" + + return True + + def valid_flag_bit(self, bitpos): + """Return True if bitpos is an allowed numeric bit position for + an API flag. + + Behavior depends on the data type used for flags (which may be 32 + or 64 bits), and may depend on assumptions about compiler + handling of sign bits in enumerated types, as well.""" + return True + + @property + def duplicate_aliased_structs(self): + """ + Should aliased structs have the original struct definition listed in the + generated docs snippet? + """ + return False + + @property + def protectProtoComment(self): + """Return True if generated #endif should have a comment matching + the protection symbol used in the opening #ifdef/#ifndef.""" + return False + + @property + def extra_refpage_headers(self): + """Return any extra headers (preceding the title) for generated + reference pages.""" + return '' + + @property + def extra_refpage_body(self): + """Return any extra text (following the title) for generated + reference pages.""" + return '' + + def is_api_version_name(self, name): + """Return True if name is an API version name.""" + + return API_VERSION_NAME_RE.match(name) is not None diff --git a/adoc/scripts/syclconventions.py b/adoc/scripts/syclconventions.py new file mode 100755 index 00000000..472c660a --- /dev/null +++ b/adoc/scripts/syclconventions.py @@ -0,0 +1,67 @@ +#!/usr/bin/python3 -i +# +# Copyright 2013-2023 The Khronos Group Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +# Working-group-specific style conventions, +# used in generation. + +import re +import os + +from spec_tools.conventions import ConventionsBase + +class SYCLConventions(ConventionsBase): + @property + def null(self): + """Preferred spelling of NULL.""" + return '`NULL`' + + @property + def structtype_member_name(self): + """Return name of the structure type member""" + return None + + @property + def nextpointer_member_name(self): + """Return name of the structure pointer chain member""" + return None + + @property + def xml_api_name(self): + """Return the name used in the default API XML registry for the default API""" + return None + + def generate_structure_type_from_name(self, structname): + """Generate a structure type name""" + + return structname + + @property + def warning_comment(self): + """Return warning comment to be placed in header of generated Asciidoctor files""" + return '' + + @property + def file_suffix(self): + """Return suffix of generated Asciidoctor files""" + return '.adoc' + + def api_name(self, spectype='api'): + """Return API or specification name for citations in ref pages. + i + spectype is the spec this refpage is for. + """ + return 'SYCL' + + @property + def api_prefix(self): + """Return API token prefix""" + return '' + + def extension_file_path(self, name): + """Return file path to an extension appendix relative to a directory + containing all such appendices.""" + + return f'{name}{self.file_suffix}' diff --git a/adoc/scripts/verify_reflow_conformance.sh b/adoc/scripts/verify_reflow_conformance.sh new file mode 100755 index 00000000..3e51d3f9 --- /dev/null +++ b/adoc/scripts/verify_reflow_conformance.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +error=0 +for file in adoc/chapters/*.adoc; +do + echo "$file" + ./adoc/scripts/reflow.py -out tmp_ci/ -- "$file" + diff "$file" "${file/adoc\/chapters/tmp_ci}" + error=$((error+$?)) +done + +exit $error