Loading asn2md.pydeleted 100755 → 0 +0 −301 Original line number Diff line number Diff line #!/usr/bin/env python # -*- coding: utf-8 -*- import argparse # parse arguments import os.path # getting extension from file import sys # output and stuff import re # for regular expressions if (sys.version_info > (3, 0)): import urllib.parse # else: import urllib # ## extract doxygen-tag namespace RE_MODULE = re.compile( r'^\s*([A-Z][\w-]*)\s*({.*?})?\s*DEFINITIONS.*?::=\s*?BEGIN(.*)END', re.VERBOSE | re.MULTILINE | re.DOTALL) RE_SPACES = re.compile(r'\s+') RE_COMMENTS = re.compile(r'^\s*--.*?\n|--.*?(?:--|$)|/\*.*?\*/[\t ]*\n?', re.MULTILINE|re.DOTALL) RE_BASIC_TYPES = re.compile(r'^OCTET\s+STRING|BIT\s+STRING|BOOLEAN|INTEGER|FLOAT|SEQUENCE|SET|NULL') RE_TYPE_BODY = re.compile(r'.*?{(.*)}\s*(?:\(.*?\)|\s*$)', re.MULTILINE|re.DOTALL) #RE_FIELDS = re.compile(r'^\s*(?:/\*\*.*?\*/)|^\s*([\w-]+?)\s+(OCTET\s+STRING|BIT\s+STRING|[A-Z][.\w-]+)?(.*?)(?:,((?:\s*--!?<.*?\n)*)|((?:--!?<.*?\n)*)$)', re.MULTILINE | re.DOTALL| re.VERBOSE) RE_FIELDS = re.compile(r'^\s*/\*.*?\*/|^\s*--\!.*?\n|^\s*([\w-]+)\s+(OCTET\s+STRING|BIT\s+STRING|[A-Z][\w-]+)?((?:{[^}]*}|\([^)]*\)|.)*?)(?:,|(--)|$)', re.MULTILINE | re.DOTALL) RE_EXPORTS = re.compile(r'^\s*EXPORTS.*?;', re.DOTALL | re.MULTILINE) RE_IMPORTS = re.compile(r'^\s*IMPORTS\s*(.*?);', re.DOTALL | re.MULTILINE) RE_IMPORT_ELEMENTS = re.compile(r'^([,\s\w-]*?)FROM\s*([\w-]+)\s*({[^}]*}(?:\s+WITH\s+SUCCESSORS)?)?', re.MULTILINE) RE_IMPORT_ELEMENT_TYPE = re.compile(r'[^,\s]+') RE_DOXY_COMMENTS = re.compile(r'^\s*--[-!#](:?$|\s(.*))', re.MULTILINE) RE_DOXY_C_COMMENTS = re.compile(r'^\s*/\*\*\s(.*?)\*/', re.MULTILINE | re.DOTALL) RE_DOXY_C_COMMENTS_I = re.compile(r'\s*\*+') RE_DOXY_REF = re.compile(r'@ref\s+([\w-]+)') RE_DOXY_CLASS = re.compile(r'@class:?\s+([\w-]+)') RE_DOXY_STRIP_SINGLE_TAG = re.compile(r'@(?:brief|url|details)\s+') RE_DOXY_DETAILS = re.compile(r'@details:?\s+[\w-]+') RE_DOXY_STRIP_TAG = re.compile(r'\s*@(?:class|struct):?\s+[\w-]+') RE_DOXY_UNIT = re.compile(r'^\s*@unit:?\s+(.+)\n+', re.MULTILINE) RE_DOXY_BRIEF = re.compile(r'^\s*@brief:?\s+(.+)\n+', re.MULTILINE) RE_DOXY_CATEGORY = re.compile(r'@category:\s+(.+)\n+', re.MULTILINE) RE_DOXY_NOTE = re.compile(r'@note\s*(\d*):\s+(.+?)\n\s*$', re.MULTILINE | re.DOTALL) RE_DOXY_PARAM = re.compile(r'^\s*@(?:param|field|value)\s+([\w-]+):?\s*(.*?)\n\s*$', re.MULTILINE | re.DOTALL) RE_DOXY_OPTION = re.compile(r'@(no-auto-fields|no-auto-values)', re.MULTILINE) # RE_TYPE = re.compile(r'(([A-Z][\w-]*)\s*::=[\w \t]+(?:{+(.*?)}+)?.*?)\n\s*\n', re.MULTILINE | re.DOTALL) RE_TYPE = re.compile(r'^\s*([A-Z][\w-]*)?\s*([{} \t:\w-]*?)?::=([\w \t]+.*?)\n\s*\n', re.MULTILINE | re.DOTALL) extTypes = {} cpos = 0 o_args = [] def urlquote(s): if (sys.version_info > (3, 0)): return urllib.parse.quote_plus(s) else: return urllib.quote_plus(s) def parseText(content, indent=None): def repl_ref(m): return '[**{0}**]({1}#{0})'.format(m.group(1), extTypes.get(m.group(1),'')) content = RE_DOXY_REF.sub(repl_ref, content) content = RE_DOXY_STRIP_TAG.sub('', content) content = RE_DOXY_STRIP_SINGLE_TAG.sub('', content) return content def parseInlineComments(content:str, indent=None): # keep into account only '--<' comments lines = content.splitlines() content = '' for l in lines: l = l.lstrip() if l.startswith('--< '): content += ''.ljust(indent or 0) + l[4:] + '\n' elif l.startswith('--!< '): content += ''.ljust(indent or 0) + l[5:] + '\n' else: continue return parseText(content, indent) def parseDoxyComments(content:str, indent=None): # keep only '--! ' and /** */ comments # convert '--! ' comments to C-style content = RE_DOXY_COMMENTS.sub(r'/** *\g<1>*/', content) ret = '' for m in RE_DOXY_C_COMMENTS.finditer(content): lines = m.group(1).splitlines() for l in lines: l = l.strip().lstrip('*') ret += ''.ljust(indent or 0) + l + '\n' return ret def parseModule(mname, content): global cpos cpos = 0 ret = '' m = RE_IMPORTS.search(content) if m is not None: pos = 0 if m.group(1) is not None: ret += '## Imports:\n' s = m.group(1) for fm in RE_IMPORT_ELEMENTS.finditer(s): imName = fm.group(2) for im in RE_IMPORT_ELEMENT_TYPE.finditer(fm.group(1)): extTypes[im.group(0)] = imName+'.md' ret += ' * **[{0}]({0}.md)** *{1}*<br/>\n'.format(imName, RE_SPACES.sub(' ', fm.group(3) or '')) ret += parseDoxyComments(s[pos:fm.start()], 3)+'\n' pos = fm.end() ret += parseDoxyComments(s[pos:]) cpos = m.end() m = RE_EXPORTS.search(content) if m is not None: if cpos < m.end(): cpos = m.end() # parse types def repl_type (m, doc): title = t = m.group(1) auto_fields = True s_unit = '' s_category = '' s_params = {} if doc : # non None and not empty # keep only doxy comments doc = parseDoxyComments(doc) # parse @brief def repl_brief (m): nonlocal title title = m.group(1) return '\n' if o_args.brief_as_title: doc = RE_DOXY_BRIEF.sub(repl_brief, doc, 1) # parse options def repl_doxy_option(m): nonlocal auto_fields if m.group(1) == 'no-auto-fields' or m.group(1) == 'no-auto-values': auto_fields = False return '' doc = RE_DOXY_OPTION.sub(repl_doxy_option, doc) # filter out unit def repl_unit(m): nonlocal s_unit s_unit = '\n\n **Unit**: _{}_'.format(m.group(1).strip()) return '' doc = RE_DOXY_UNIT.sub(repl_unit, doc, 1) #filter out category def repl_category(m): nonlocal s_category s_category = '\n\n **Categories**: ' for l in m.group(1).split(','): l = l.strip() if l: s_category += '_[{0}](#{1})_ '.format(l, urlquote(l)) return '' doc = RE_DOXY_CATEGORY.sub(repl_category, doc, 1) #filter out notes def repl_note(m): return ' **NOTE{0}**: {1}\n{2}\n\n'.format(m.group(1) or '', m.group(2).strip(), " {: .note}") doc = RE_DOXY_NOTE.sub(repl_note, doc) #filter out params def repl_param (m): nonlocal s_params if m.group(1) is not None and m.group(2) is not None: l = parseText(m.group(2).lstrip(":, \t\n")) if len(l): s_params[m.group(1)] = l return '' doc = RE_DOXY_PARAM.sub(repl_param, doc) doc = parseText(doc).strip() + s_unit + s_category else: doc = '' ret = '' if t is not None: ret = '### <a name="{0}"></a>{1}\n\n'.format(t, title) + doc + '\n\n' # parse fields and get out fields descriptions if m.group(3) is not None: # check if contain fields fm = RE_TYPE_BODY.search(m.group(3)) if fm is not None and fm.group(1) is not None: typeBody = fm.group(1).strip() if typeBody is not None: fTitle = '' fields = '' f_header = '' f_doc = '' pos = 0 for fm in RE_FIELDS.finditer(typeBody): f_doc += parseInlineComments(fm.string[pos:fm.start()], 3).strip() if f_header and (f_doc or not o_args.no_empty_fields): fields += f_header + ( f_doc or '\n' ) f_doc = '' if fm.group(1) is not None: # add description to the previous type f = fm.group(1).strip() ext = fm.group(3) or '' if fm.group(2) is not None: fTitle = 'Fields:\n' t = fm.group(2).strip() if RE_BASIC_TYPES.match(t) is not None: f_header = '* {0} **{1}** {2}<br>\n'.format(f, t, ext) else: f_header += '* {0} [**{1}**]({2}#{1}) {3}<br>\n'.format(f, t, extTypes.get(t,''), ext) else: fTitle = 'Values:\n' f_header = '* **{0}** {1}<br>\n'.format(f, ext) if f in s_params: f_doc = s_params[f] + '\n\n' f = parseDoxyComments(fm.string[pos:fm.start()], 3).strip() if f: f_doc += f + '\n\n' pos = fm.end() if fm.group(4) is not None: # keep '--' for the next round pos -= 2 f_doc += parseInlineComments(typeBody[pos:], 3).strip() if f_doc or not o_args.no_empty_fields: fields += f_header + ( f_doc or '\n' ) ret = ret.strip() + '\n\n' if auto_fields and len(fields): ret += fTitle + fields else: if title: ret = '### {}\n\n'.format(title) ret += doc + '\n\n' return ret + '```asn1\n' + RE_COMMENTS.sub('', m.group(0).strip()) +'\n```\n\n' pos = 0 ret += '## Data Elements:\n' for m in RE_TYPE.finditer(content[cpos:]): ret += repl_type (m, m.string[pos:m.start()]) pos = m.end() return ret def parseAsn(outDir, content) : # iterate modules in the file pos= 0 cnt = 0 for m in RE_MODULE.finditer(content): ret = '# ASN.1 module {}\n OID: _{}_\n'.format(m.group(1), RE_SPACES.sub(' ', m.group(2))) ret += parseDoxyComments(content[pos:m.start()]) + '\n' if m.group(3) is not None: ret += parseModule(m.group(1), m.group(3)) ret += '\n\n' open(outDir + '/' + m.group(1) + '.md', "w").write(ret) pos = m.end() cnt += 1 return cnt def main(): global o_args ap = argparse.ArgumentParser(description='ASN.1 to markdown converter') ap.add_argument('--out', '-o', type=str, default='.', help='output directory') ap.add_argument('--brief-as-title', '-B', default=False, action='store_true', help='Do not treat @brief line as type header') ap.add_argument('--no-empty-fields', '-F', default=False, action='store_true', help='Do not add non-documented fields in the "Fields" block') ap.add_argument('--no-empty-values', '-V', default=False, action='store_true', help='Do not add non-documented fields in the "Fields" block') ap.add_argument('modules', action='store', nargs='+', help='ASN.1 files') o_args = ap.parse_args() if not o_args.modules: ap.print_help() exit(1) cnt = 0 for a in o_args.modules: try: content = open(a).read() cnt += parseAsn(o_args.out, content) except IOError as e: sys.stderr.write(e[1]+"\n") print("{} modules porcessed\n".format(cnt)) if __name__ == '__main__': main() Loading
asn2md.pydeleted 100755 → 0 +0 −301 Original line number Diff line number Diff line #!/usr/bin/env python # -*- coding: utf-8 -*- import argparse # parse arguments import os.path # getting extension from file import sys # output and stuff import re # for regular expressions if (sys.version_info > (3, 0)): import urllib.parse # else: import urllib # ## extract doxygen-tag namespace RE_MODULE = re.compile( r'^\s*([A-Z][\w-]*)\s*({.*?})?\s*DEFINITIONS.*?::=\s*?BEGIN(.*)END', re.VERBOSE | re.MULTILINE | re.DOTALL) RE_SPACES = re.compile(r'\s+') RE_COMMENTS = re.compile(r'^\s*--.*?\n|--.*?(?:--|$)|/\*.*?\*/[\t ]*\n?', re.MULTILINE|re.DOTALL) RE_BASIC_TYPES = re.compile(r'^OCTET\s+STRING|BIT\s+STRING|BOOLEAN|INTEGER|FLOAT|SEQUENCE|SET|NULL') RE_TYPE_BODY = re.compile(r'.*?{(.*)}\s*(?:\(.*?\)|\s*$)', re.MULTILINE|re.DOTALL) #RE_FIELDS = re.compile(r'^\s*(?:/\*\*.*?\*/)|^\s*([\w-]+?)\s+(OCTET\s+STRING|BIT\s+STRING|[A-Z][.\w-]+)?(.*?)(?:,((?:\s*--!?<.*?\n)*)|((?:--!?<.*?\n)*)$)', re.MULTILINE | re.DOTALL| re.VERBOSE) RE_FIELDS = re.compile(r'^\s*/\*.*?\*/|^\s*--\!.*?\n|^\s*([\w-]+)\s+(OCTET\s+STRING|BIT\s+STRING|[A-Z][\w-]+)?((?:{[^}]*}|\([^)]*\)|.)*?)(?:,|(--)|$)', re.MULTILINE | re.DOTALL) RE_EXPORTS = re.compile(r'^\s*EXPORTS.*?;', re.DOTALL | re.MULTILINE) RE_IMPORTS = re.compile(r'^\s*IMPORTS\s*(.*?);', re.DOTALL | re.MULTILINE) RE_IMPORT_ELEMENTS = re.compile(r'^([,\s\w-]*?)FROM\s*([\w-]+)\s*({[^}]*}(?:\s+WITH\s+SUCCESSORS)?)?', re.MULTILINE) RE_IMPORT_ELEMENT_TYPE = re.compile(r'[^,\s]+') RE_DOXY_COMMENTS = re.compile(r'^\s*--[-!#](:?$|\s(.*))', re.MULTILINE) RE_DOXY_C_COMMENTS = re.compile(r'^\s*/\*\*\s(.*?)\*/', re.MULTILINE | re.DOTALL) RE_DOXY_C_COMMENTS_I = re.compile(r'\s*\*+') RE_DOXY_REF = re.compile(r'@ref\s+([\w-]+)') RE_DOXY_CLASS = re.compile(r'@class:?\s+([\w-]+)') RE_DOXY_STRIP_SINGLE_TAG = re.compile(r'@(?:brief|url|details)\s+') RE_DOXY_DETAILS = re.compile(r'@details:?\s+[\w-]+') RE_DOXY_STRIP_TAG = re.compile(r'\s*@(?:class|struct):?\s+[\w-]+') RE_DOXY_UNIT = re.compile(r'^\s*@unit:?\s+(.+)\n+', re.MULTILINE) RE_DOXY_BRIEF = re.compile(r'^\s*@brief:?\s+(.+)\n+', re.MULTILINE) RE_DOXY_CATEGORY = re.compile(r'@category:\s+(.+)\n+', re.MULTILINE) RE_DOXY_NOTE = re.compile(r'@note\s*(\d*):\s+(.+?)\n\s*$', re.MULTILINE | re.DOTALL) RE_DOXY_PARAM = re.compile(r'^\s*@(?:param|field|value)\s+([\w-]+):?\s*(.*?)\n\s*$', re.MULTILINE | re.DOTALL) RE_DOXY_OPTION = re.compile(r'@(no-auto-fields|no-auto-values)', re.MULTILINE) # RE_TYPE = re.compile(r'(([A-Z][\w-]*)\s*::=[\w \t]+(?:{+(.*?)}+)?.*?)\n\s*\n', re.MULTILINE | re.DOTALL) RE_TYPE = re.compile(r'^\s*([A-Z][\w-]*)?\s*([{} \t:\w-]*?)?::=([\w \t]+.*?)\n\s*\n', re.MULTILINE | re.DOTALL) extTypes = {} cpos = 0 o_args = [] def urlquote(s): if (sys.version_info > (3, 0)): return urllib.parse.quote_plus(s) else: return urllib.quote_plus(s) def parseText(content, indent=None): def repl_ref(m): return '[**{0}**]({1}#{0})'.format(m.group(1), extTypes.get(m.group(1),'')) content = RE_DOXY_REF.sub(repl_ref, content) content = RE_DOXY_STRIP_TAG.sub('', content) content = RE_DOXY_STRIP_SINGLE_TAG.sub('', content) return content def parseInlineComments(content:str, indent=None): # keep into account only '--<' comments lines = content.splitlines() content = '' for l in lines: l = l.lstrip() if l.startswith('--< '): content += ''.ljust(indent or 0) + l[4:] + '\n' elif l.startswith('--!< '): content += ''.ljust(indent or 0) + l[5:] + '\n' else: continue return parseText(content, indent) def parseDoxyComments(content:str, indent=None): # keep only '--! ' and /** */ comments # convert '--! ' comments to C-style content = RE_DOXY_COMMENTS.sub(r'/** *\g<1>*/', content) ret = '' for m in RE_DOXY_C_COMMENTS.finditer(content): lines = m.group(1).splitlines() for l in lines: l = l.strip().lstrip('*') ret += ''.ljust(indent or 0) + l + '\n' return ret def parseModule(mname, content): global cpos cpos = 0 ret = '' m = RE_IMPORTS.search(content) if m is not None: pos = 0 if m.group(1) is not None: ret += '## Imports:\n' s = m.group(1) for fm in RE_IMPORT_ELEMENTS.finditer(s): imName = fm.group(2) for im in RE_IMPORT_ELEMENT_TYPE.finditer(fm.group(1)): extTypes[im.group(0)] = imName+'.md' ret += ' * **[{0}]({0}.md)** *{1}*<br/>\n'.format(imName, RE_SPACES.sub(' ', fm.group(3) or '')) ret += parseDoxyComments(s[pos:fm.start()], 3)+'\n' pos = fm.end() ret += parseDoxyComments(s[pos:]) cpos = m.end() m = RE_EXPORTS.search(content) if m is not None: if cpos < m.end(): cpos = m.end() # parse types def repl_type (m, doc): title = t = m.group(1) auto_fields = True s_unit = '' s_category = '' s_params = {} if doc : # non None and not empty # keep only doxy comments doc = parseDoxyComments(doc) # parse @brief def repl_brief (m): nonlocal title title = m.group(1) return '\n' if o_args.brief_as_title: doc = RE_DOXY_BRIEF.sub(repl_brief, doc, 1) # parse options def repl_doxy_option(m): nonlocal auto_fields if m.group(1) == 'no-auto-fields' or m.group(1) == 'no-auto-values': auto_fields = False return '' doc = RE_DOXY_OPTION.sub(repl_doxy_option, doc) # filter out unit def repl_unit(m): nonlocal s_unit s_unit = '\n\n **Unit**: _{}_'.format(m.group(1).strip()) return '' doc = RE_DOXY_UNIT.sub(repl_unit, doc, 1) #filter out category def repl_category(m): nonlocal s_category s_category = '\n\n **Categories**: ' for l in m.group(1).split(','): l = l.strip() if l: s_category += '_[{0}](#{1})_ '.format(l, urlquote(l)) return '' doc = RE_DOXY_CATEGORY.sub(repl_category, doc, 1) #filter out notes def repl_note(m): return ' **NOTE{0}**: {1}\n{2}\n\n'.format(m.group(1) or '', m.group(2).strip(), " {: .note}") doc = RE_DOXY_NOTE.sub(repl_note, doc) #filter out params def repl_param (m): nonlocal s_params if m.group(1) is not None and m.group(2) is not None: l = parseText(m.group(2).lstrip(":, \t\n")) if len(l): s_params[m.group(1)] = l return '' doc = RE_DOXY_PARAM.sub(repl_param, doc) doc = parseText(doc).strip() + s_unit + s_category else: doc = '' ret = '' if t is not None: ret = '### <a name="{0}"></a>{1}\n\n'.format(t, title) + doc + '\n\n' # parse fields and get out fields descriptions if m.group(3) is not None: # check if contain fields fm = RE_TYPE_BODY.search(m.group(3)) if fm is not None and fm.group(1) is not None: typeBody = fm.group(1).strip() if typeBody is not None: fTitle = '' fields = '' f_header = '' f_doc = '' pos = 0 for fm in RE_FIELDS.finditer(typeBody): f_doc += parseInlineComments(fm.string[pos:fm.start()], 3).strip() if f_header and (f_doc or not o_args.no_empty_fields): fields += f_header + ( f_doc or '\n' ) f_doc = '' if fm.group(1) is not None: # add description to the previous type f = fm.group(1).strip() ext = fm.group(3) or '' if fm.group(2) is not None: fTitle = 'Fields:\n' t = fm.group(2).strip() if RE_BASIC_TYPES.match(t) is not None: f_header = '* {0} **{1}** {2}<br>\n'.format(f, t, ext) else: f_header += '* {0} [**{1}**]({2}#{1}) {3}<br>\n'.format(f, t, extTypes.get(t,''), ext) else: fTitle = 'Values:\n' f_header = '* **{0}** {1}<br>\n'.format(f, ext) if f in s_params: f_doc = s_params[f] + '\n\n' f = parseDoxyComments(fm.string[pos:fm.start()], 3).strip() if f: f_doc += f + '\n\n' pos = fm.end() if fm.group(4) is not None: # keep '--' for the next round pos -= 2 f_doc += parseInlineComments(typeBody[pos:], 3).strip() if f_doc or not o_args.no_empty_fields: fields += f_header + ( f_doc or '\n' ) ret = ret.strip() + '\n\n' if auto_fields and len(fields): ret += fTitle + fields else: if title: ret = '### {}\n\n'.format(title) ret += doc + '\n\n' return ret + '```asn1\n' + RE_COMMENTS.sub('', m.group(0).strip()) +'\n```\n\n' pos = 0 ret += '## Data Elements:\n' for m in RE_TYPE.finditer(content[cpos:]): ret += repl_type (m, m.string[pos:m.start()]) pos = m.end() return ret def parseAsn(outDir, content) : # iterate modules in the file pos= 0 cnt = 0 for m in RE_MODULE.finditer(content): ret = '# ASN.1 module {}\n OID: _{}_\n'.format(m.group(1), RE_SPACES.sub(' ', m.group(2))) ret += parseDoxyComments(content[pos:m.start()]) + '\n' if m.group(3) is not None: ret += parseModule(m.group(1), m.group(3)) ret += '\n\n' open(outDir + '/' + m.group(1) + '.md', "w").write(ret) pos = m.end() cnt += 1 return cnt def main(): global o_args ap = argparse.ArgumentParser(description='ASN.1 to markdown converter') ap.add_argument('--out', '-o', type=str, default='.', help='output directory') ap.add_argument('--brief-as-title', '-B', default=False, action='store_true', help='Do not treat @brief line as type header') ap.add_argument('--no-empty-fields', '-F', default=False, action='store_true', help='Do not add non-documented fields in the "Fields" block') ap.add_argument('--no-empty-values', '-V', default=False, action='store_true', help='Do not add non-documented fields in the "Fields" block') ap.add_argument('modules', action='store', nargs='+', help='ASN.1 files') o_args = ap.parse_args() if not o_args.modules: ap.print_help() exit(1) cnt = 0 for a in o_args.modules: try: content = open(a).read() cnt += parseAsn(o_args.out, content) except IOError as e: sys.stderr.write(e[1]+"\n") print("{} modules porcessed\n".format(cnt)) if __name__ == '__main__': main()