diff --git a/buildscripts/add_html_footer.py b/buildscripts/add_html_footer.py index a7bc7e1..7c63755 100644 --- a/buildscripts/add_html_footer.py +++ b/buildscripts/add_html_footer.py @@ -9,10 +9,23 @@ import time import langdefs -default_header = r""" +# This is to try to make the docball not too big with almost duplicate files +# see process_links() +non_copied_pages = ['Documentation/user/out-www/lilypond-big-page', + 'Documentation/user/out-www/lilypond-internals-big-page', + 'Documentation/user/out-www/music-glossary-big-page', + 'out-www/examples', + 'Documentation/topdocs/out-www/NEWS', + 'Documentation/topdocs/out-www/INSTALL', + 'Documentation/bibliography/out-www/index', + 'Documentation/out-www/THANKS', + 'Documentation/out-www/DEDICATION', + 'Documentation/topdocs/ou-www/AUTHORS'] + +header = r""" """ -default_footer = r''' +footer = r'''

@@ -24,6 +37,8 @@ Report errors to %(mail_address)s. ''' +mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs' + header_tag = '' footer_tag = '' @@ -44,10 +59,11 @@ LANGUAGES_TEMPLATE = '''\ html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$') +pages_dict = {} def build_pages_dict (filelist): """Build dictionnary of available translations of each page""" - pages_dict = {} + global pages_dict for f in filelist: m = html_re.match (f) if m: @@ -60,19 +76,9 @@ def build_pages_dict (filelist): pages_dict[g[0]] = [e] else: pages_dict[g[0]].append (e) - return pages_dict - - -def do_file (prefix, lang_ext, target, header, footer, pages_dict, out_root, name_filter, - package_name, package_version, branch_str, mail_address_url, mail_address): - file_name = langdefs.lang_file_name (prefix, lang_ext, '.html') - in_f = open (file_name) - s = in_f.read() - in_f.close() - - s = re.sub ('%', '%%', s) - ### add header +def add_header (s): + """Add header ( and doctype)""" if re.search (header_tag, s) == None: body = '' s = re.sub ('(?i)', body, s) @@ -88,10 +94,13 @@ def do_file (prefix, lang_ext, target, header, footer, pages_dict, out_root, nam if re.search ('(?i)\n' s = doctype + s + return s - # remove info's annoying's indication of referencing external document - s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)', '', s) +def info_external_ref_remove (s): + """Remove info's annoying's indication of referencing external document""" + return re.sub (' \((lilypond|lilypond-internals|music-glossary)\)', '', s) +def add_title (s): # urg # maybe find first node? fallback_web_title = '-- --' @@ -99,108 +108,103 @@ def do_file (prefix, lang_ext, target, header, footer, pages_dict, out_root, nam if m: fallback_web_title = m.group (1) s = re.sub ('@WEB-TITLE@', fallback_web_title, s) + return s + +info_nav_bar = re.compile (r'

\s*

\s*(.+?)


\s*
', re.M | re.S) + +def add_footer (s): + """add footer - ### add footer +also add navigation bar to bottom of Info HTML pages""" + m = info_nav_bar.search (s) + if m: + custom_footer = '

\n
\n

' + m.group (1) + '

\n' + footer + else: + custom_footer = footer + if re.search ('(?i)', footer_tag + custom_footer + '\n' + '', s, 1) + elif re.search ('(?i)', footer_tag + custom_footer + '\n' + '', s, 1) + else: + s += footer_tag + custom_footer + '\n' + return s + +def find_translations (prefix, lang_ext): + """find available translations of a page""" + available = [] + missing = [] + for l in langdefs.LANGUAGES: + e = l.webext + if lang_ext != e: + if e in pages_dict[prefix]: + available.append (l) + elif lang_ext == '' and l.enabled and not prefix in non_copied_pages: + # English version of missing translated pages will be written + missing.append (e) + return available, missing + +def process_links (s, prefix, lang_ext, file_name, missing, target): page_flavors = {} - if re.search (footer_tag, s) == None: - if re.search ('(?i)', footer_tag + footer + '\n' + '', s, 1) - elif re.search ('(?i)', footer_tag + footer + '\n' + '', s, 1) - else: - s += footer_tag + footer + '\n' - - # Find available translations of this page. - available = [] - missing = [] - for l in langdefs.LANGUAGES: - e = l.webext - if lang_ext != e: - if e in pages_dict[prefix]: - available.append (l) - elif lang_ext == '' and l.enabled: # English version of missing translated pages will be written - missing.append (e) - - if target == 'online': - # Strip .html, .png suffix for auto language selection (content - # negotiation). The menu must keep the full extension, so do - # this before adding the menu. - # Don't strip .html suffix for documentation index because of - # lilypond/ vs. lilypond.html conflict - if prefix == 'Documentation/out-www/index': - page_flavors[file_name] = s - else: - page_flavors[file_name] = re.sub ( - '''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html|.png)(#[^"\']*|)[\'"]''', - '\\1="\\2\\4"', s) - elif target == 'offline': - if lang_ext == '': - page_flavors[file_name] = s - for e in missing: - page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = re.sub ( - '''href=[\'"]([^/][.]*[^.:\'"]*)(.html)(#[^"\']*|)[\'"]''', - 'href="\\1.' + e + '\\2\\3"', s) - else: - page_flavors[file_name] = re.sub ( + if target == 'online': + # Strip .html, .png suffix for auto language selection (content + # negotiation). The menu must keep the full extension, so do + # this before adding the menu. + page_flavors[file_name] = re.sub ( + '''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html|.png)(#[^"\']*|)[\'"]''', + '\\1="\\2\\4"', s) + elif target == 'offline': + # in LANG doc index: don't rewrite .html suffixes as not all .LANG.html pages exist + # the doc index should be translated and contain the right links + if prefix == 'Documentation/out-www/index': + page_flavors[file_name] = s + elif lang_ext == '': + page_flavors[file_name] = s + for e in missing: + page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = re.sub ( '''href=[\'"]([^/][.]*[^.:\'"]*)(.html)(#[^"\']*|)[\'"]''', - 'href="\\1.' + lang_ext + '\\2\\3"', s) - - # Add menu after stripping: must not have autoselection for language menu. - language_menu = '' - for lang in available: - lang_file = lang.file_name (os.path.basename (prefix), '.html') - if language_menu != '': - language_menu += ', ' - language_menu += '%s' % (lang_file, lang.name) - - languages = '' - if language_menu: - languages = LANGUAGES_TEMPLATE % vars () - - # Put language menu before '' and '' tags - for k in page_flavors.keys(): - if re.search ('(?i)', languages + '', page_flavors[k], 1) - elif re.search ('(?i)', languages + '', page_flavors[k], 1) - else: - page_flavors[k] += languages - else: - for e in [l.webext for l in langdefs.LANGUAGES]: - if not e in pages_dict[prefix]: - page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = s + 'href="\\1.' + e + '\\2\\3"', s) + else: + page_flavors[file_name] = re.sub ( + '''href=[\'"]([^/][.]*[^.:\'"]*)(.html)(#[^"\']*|)[\'"]''', + 'href="\\1.' + lang_ext + '\\2\\3"', s) + return page_flavors - for k in page_flavors.keys(): - page_flavors[k] = page_flavors[k] % vars () +def add_menu (page_flavors, prefix, available): + language_menu = '' + for lang in available: + lang_file = lang.file_name (os.path.basename (prefix), '.html') + if language_menu != '': + language_menu += ', ' + language_menu += '%s' % (lang_file, lang.name) - out_f = open (os.path.join (out_root, name_filter (k)), 'w') - out_f.write (page_flavors[k]) - out_f.close() + languages = '' + if language_menu: + languages = LANGUAGES_TEMPLATE % vars () + + # put language menu before '' and '' tags + for k in page_flavors.keys(): + if re.search ('(?i)', languages + '', page_flavors[k], 1) + elif re.search ('(?i)', languages + '', page_flavors[k], 1) + else: + page_flavors[k] += languages + return page_flavors def add_html_footer (package_name = '', package_version = '', - header = default_header, - footer = default_footer, target = 'offline', - mail_address = '(address unknown)', - pages_dict = {}, - out_root = '', name_filter = lambda s: s): """Add header, footer to a number of HTML files Arguments: package_name=NAME set package_name to NAME package_version=VERSION set package version to VERSION - header=TEXT use TEXT as header - footer=TEXT use TEXT as footer targets=offline|online set page processing depending on the target offline is for reading HTML pages locally online is for hosting the HTML pages on a website with content negotiation - mail_address set \"Report errors to\" link - pages_dict a dictionnary returned by build_pages_dict() - out_root a path prefix where to write HTML pages name_filter a HTML file name filter """ localtime = time.strftime ('%c %Z', time.localtime (time.time ())) @@ -215,10 +219,39 @@ def add_html_footer (package_name = '', if int ( versiontup[1]) % 2: branch_str = 'development-branch' - for page, ext_list in pages_dict.items (): - for e in ext_list: - do_file (page, e, target, header, footer, pages_dict, out_root, name_filter, - package_name, package_version, branch_str, mail_address_url, mail_address) + for prefix, ext_list in pages_dict.items (): + for lang_ext in ext_list: + file_name = langdefs.lang_file_name (prefix, lang_ext, '.html') + in_f = open (file_name) + s = in_f.read() + in_f.close() + + s = re.sub ('%', '%%', s) + s = add_header (s) + # seems to be no more needed + # s = info_external_ref_remove (s) + + ### add footer + if re.search (footer_tag, s) == None: + s = add_footer (s) + available, missing = find_translations (prefix, lang_ext) + page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target) + # Add menu after stripping: must not have autoselection for language menu. + page_flavors = add_menu (page_flavors, prefix, available) + # urg, this stuff is oudated and seems useless, let's disable it + #else: + # for e in [l.webext for l in langdefs.LANGUAGES]: + # if not e in pages_dict[prefix]: + # page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = s + + subst = globals () + subst.update (locals()) + for k in page_flavors.keys(): + page_flavors[k] = page_flavors[k] % subst + + out_f = open (name_filter (k), 'w') + out_f.write (page_flavors[k]) + out_f.close() # if the page is translated, a .en.html symlink is necessary for content negotiation if target == 'online' and ext_list != ['']: - os.symlink (os.path.basename (page) + '.html', os.path.join (out_root, name_filter (page + '.en.html'))) + os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html')) diff --git a/buildscripts/mirrortree.py b/buildscripts/mirrortree.py index 72a5672..507d566 100644 --- a/buildscripts/mirrortree.py +++ b/buildscripts/mirrortree.py @@ -14,74 +14,49 @@ def new_link_path (link, dir, r): i += 1 return '/'.join ([x for x in l if not r.match (x)]) -def hardlink_tree (input_roots = [], - process_dirs = '.*', - strip_dir_names = '', - exclude_dirs = '', - process_files = '.*', - find_files = '', - exclude_files = '', - target_pattern = '', - targets = ['.']): - """Mirror trees for different targets by hardlinking files. +def walk_tree (tree_roots = [], + process_dirs = '.*', + exclude_dirs = '', + find_files = '.*', + exclude_files = ''): + """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple. Arguments: - input_roots=DIRLIST use DIRLIST as input tree roots list + tree_roots=DIRLIST use DIRLIST as tree roots list process_dir=PATTERN only process files in directories named PATTERN - strip_dir_names=PATTERN strip directories names matching PATTERN - (write their content to parent) exclude_dir=PATTERN don't recurse into directories named PATTERN - process_files=PATTERN filters files which are hardlinked - find_files=PATTERN find files named PATTERN. The files list will be returned. + find_files=PATTERN filters files which are hardlinked exclude_files=PATTERN exclude files named PATTERN - target_pattern=STRING use STRING as target root directory name pattern - targets=DIRLIST mkdir each directory in DIRLIST and mirrors the tree into each """ - process_files_re = re.compile (process_files) find_files_re = re.compile (find_files) exclude_dirs_re = re.compile (exclude_dirs) exclude_files_re = re.compile (exclude_files) process_dirs_re = re.compile (process_dirs) - strip_dir_names_re = re.compile (strip_dir_names) - do_strip_dir_names_re = re.compile ('/(?:' + strip_dir_names + ')') - found_files = [] + dirs_paths = [] + symlinks_paths = [] + files_paths = [] - if not '%s' in target_pattern: - target_pattern += '%s' - target_dirs = [target_pattern % s for s in targets] - - map (os.mkdir, target_dirs) - - for d in input_roots: - for in_dir, dirs, files in os.walk(d): - out_dir = strip_dir_names_re.sub ('', in_dir) + for d in tree_roots: + for current_dir, dirs, files in os.walk(d): i = 0 while i < len(dirs): if exclude_dirs_re.search (dirs[i]): del dirs[i] else: - if os.path.islink (os.path.join (in_dir, dirs[i])): - files.append (dirs[i]) + p = os.path.join (current_dir, dirs[i]) + if os.path.islink (p): + symlinks_paths.append (p) i += 1 - if not strip_dir_names_re.match (os.path.basename (in_dir)): - for t in target_dirs: - p = os.path.join (t, out_dir) - if not os.path.isdir (p): - os.mkdir (p) - if not process_dirs_re.search (in_dir): + if not process_dirs_re.search (current_dir): continue + dirs_paths.append (current_dir) for f in files: if exclude_files_re.match (f): continue - in_file = os.path.join (in_dir, f) - if find_files_re.match (f): - found_files.append (in_file) - if os.path.islink (in_file): # all symlinks are assumed to be relative and to point to files in the input trees - link_path = new_link_path (os.path.normpath (os.readlink (in_file)), in_dir, do_strip_dir_names_re) - for t in target_dirs: - os.symlink (link_path, os.path.join (t, out_dir, f)) - elif process_files_re.match (f): - for t in target_dirs: - os.link (in_file, os.path.join (t, out_dir, f)) - return found_files + p = os.path.join (current_dir, f) + if os.path.islink (p): + symlinks_paths.append (p) + elif find_files_re.match (f): + files_paths.append (p) + return (dirs_paths, symlinks_paths, files_paths) diff --git a/buildscripts/www_post.py b/buildscripts/www_post.py index 1e18241..a8dc9b9 100644 --- a/buildscripts/www_post.py +++ b/buildscripts/www_post.py @@ -16,14 +16,20 @@ outdir = os.path.normpath (outdir) doc_dirs = ['input', 'Documentation', outdir] target_pattern = os.path.join (outdir, '%s-root') -static_files = {os.path.join (outdir, 'index.html'): - ''' +static_files = { + # ugly hack: the following overwrites HTML Info dir with a link to + # the (more useful) documentation index + os.path.join ('Documentation/user', outdir, 'index.html'): + ''' Redirecting to the documentation index...\n''', - os.path.join (outdir, 'VERSION'): - package_version + '\n' } + os.path.join (outdir, 'index.html'): + ''' +Redirecting to the documentation index...\n''', + os.path.join (outdir, 'VERSION'): + package_version + '\n' } -for f in static_files.keys(): - open (f, 'w').write (static_files[f]) +for f, contents in static_files.items (): + open (f, 'w').write (contents) sys.path.append (buildscript_dir) @@ -32,25 +38,46 @@ import add_html_footer import langdefs sys.stderr.write ("Mirrorring...\n") -html_list = mirrortree.hardlink_tree (input_roots = doc_dirs, - process_dirs = outdir, - strip_dir_names = outdir, - exclude_dirs = '(' + - '|'.join ([l.code for l in langdefs.LANGUAGES]) + - r'|po|out|\w*?-root)(/|$)', - process_files = r'.*?\.(?:midi|pdf|png|txt|ly|signature)$|VERSION', - exclude_files = r'lily-[0-9a-f]+.*\.pdf', - target_pattern = target_pattern, - targets = targets) -html_dict = add_html_footer.build_pages_dict (html_list) +dirs, symlinks, files = mirrortree.walk_tree ( + tree_roots = doc_dirs, + process_dirs = outdir, + exclude_dirs = '(' + '|'.join ([l.code for l in langdefs.LANGUAGES]) + r'|po|out|\w*?-root)(/|$)', + find_files = r'.*?\.(?:midi|html|pdf|png|txt|ly|signature)$|VERSION', + exclude_files = r'lily-[0-9a-f]+.*\.pdf') + +# actual mirrorring stuff +html_files = [] +hardlinked_files = [] +for f in files: + if f.endswith ('.html'): + html_files.append (f) + else: + hardlinked_files.append (f) +dirs = [re.sub ('/' + outdir, '', d) for d in dirs] +while outdir in dirs: + dirs.remove (outdir) +dirs = list( set (dirs)) +dirs.sort () + +strip_file_name = {} strip_re = re.compile (outdir + '/') for t in targets: + out_root = target_pattern % t + strip_file_name[t] = lambda s: os.path.join (target_pattern % t, (strip_re.sub ('', s))) + os.mkdir (out_root) + map (os.mkdir, [os.path.join (out_root, d) for d in dirs]) + for f in hardlinked_files: + os.link (f, strip_file_name[t] (f)) + for l in symlinks: + p = mirrortree.new_link_path (os.path.normpath (os.readlink (l)), os.path.dirname (l), strip_re) + os.symlink (p, strip_file_name[t] (l)) + +add_html_footer.build_pages_dict (html_files) +for t in targets: sys.stderr.write ("Processing HTML pages for %s target...\n" % t) add_html_footer.add_html_footer ( package_name = package_name, package_version = package_version, target = t, - mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs', - pages_dict = html_dict, - out_root = target_pattern % t, - name_filter = lambda s: strip_re.sub ('', s)) + name_filter = strip_file_name[t]) + diff --git a/stepmake/stepmake/texinfo-rules.make b/stepmake/stepmake/texinfo-rules.make index 5cf8c2d..8dbd716 100644 --- a/stepmake/stepmake/texinfo-rules.make +++ b/stepmake/stepmake/texinfo-rules.make @@ -4,6 +4,9 @@ $(outdir)/%.info: $(outdir)/%.texi $(MAKEINFO) -I $(outdir) --output=$@ $< +$(outdir)/%-big-page.html: $(outdir)/%.texi + $(MAKEINFO) -I $(outdir) --output=$@ --css-include=$(top-src-dir)/Documentation/texinfo.css --html --no-split -D bigpage --no-headers $< + $(outdir)/%.html: $(outdir)/%.texi $(MAKEINFO) -I $(outdir) --output=$@ --css-include=$(top-src-dir)/Documentation/texinfo.css --html --no-split --no-headers $< diff --git a/Documentation/user/GNUmakefile b/Documentation/user/GNUmakefile index 9186bf5..c9a139e 100644 --- a/Documentation/user/GNUmakefile +++ b/Documentation/user/GNUmakefile @@ -15,8 +15,8 @@ OUT_PNG_IMAGES=$(OUT_PDF_IMAGES:%.pdf=%.png) OUT_TEXI_FILES=$(ITEXI_FILES:%.itexi=$(outdir)/%.texi)\ $(ITELY_FILES:%.itely=$(outdir)/%.texi) -HTML_FILES = $(TELY_FILES:%.tely=$(outdir)/%.html)\ - $(outdir)/lilypond-internals.html +HTML_FILES = $(TELY_FILES:%.tely=$(outdir)/%-big-page.html)\ + $(outdir)/lilypond-internals-big-page.html # todo: add latex. PDF_FILES = $(TELY_FILES:%.tely=$(outdir)/%.pdf) @@ -155,8 +155,8 @@ $(outdir)/lilypond/index.html: $(outdir)/lilypond.texi $(OUT_PNG_IMAGES) $(OUT_E # # One big page manual # -$(outdir)/lilypond.html: $(outdir)/lilypond.texi $(OUT_PNG_IMAGES) - $(MAKEINFO) -I$(outdir) --output=$@ --css-include=$(top-src-dir)/Documentation/texinfo.css --html --no-split --no-headers $< +$(outdir)/lilypond-big-page.html: $(outdir)/lilypond.texi $(OUT_PNG_IMAGES) + $(MAKEINFO) -I$(outdir) --output=$@ --css-include=$(top-src-dir)/Documentation/texinfo.css --html --no-split -D bigpage --no-headers $< # # The split internals reference @@ -168,8 +168,8 @@ $(outdir)/lilypond-internals/index.html: $(outdir)/lilypond-internals.texi # # One big page internals reference # -$(outdir)/lilypond-internals.html: $(outdir)/lilypond-internals.texi - $(MAKEINFO) --output=$@ --css-include=$(top-src-dir)/Documentation/texinfo.css --html --no-split --no-headers $< +$(outdir)/lilypond-internals-big-page.html: $(outdir)/lilypond-internals.texi + $(MAKEINFO) --output=$@ --css-include=$(top-src-dir)/Documentation/texinfo.css --html --no-split -D bigpage --no-headers $< # # The split glossary @@ -218,12 +218,10 @@ $(outdir)/source: $(outdir)/lilypond/source: @rm -f $(@) - mkdir -p $(outdir)/lilypond ln -sf ../../../ $(@) $(outdir)/music-glossary/source: @rm -f $(@) - mkdir -p $(outdir)/music-glossary ln -sf ../../../ $(@) local-WWW: $(HTML_FILES) $(DEEP_HTML_FILES)\ diff --git a/Documentation/user/macros.itexi b/Documentation/user/macros.itexi index 0274641..2242b6a 100644 --- a/Documentation/user/macros.itexi +++ b/Documentation/user/macros.itexi @@ -53,15 +53,27 @@ @c @inforef{\NAME\,,address@hidden @c using @ref without punctuation is OK without for formats other than info address@hidden annoying parentheses stripped by add-html-footer.py address@hidden bigpage + @macro internalsref{NAME} @vindex \NAME\ address@hidden,,,lilypond-internals} address@hidden,,,lilypond-internals-big-page} @end macro address@hidden usage: @inputfileref{input@/regression,FILE@/-NAME.ly} address@hidden inputfileref{DIR,NAME} address@hidden/\DIR\/collated-files.html#\NAME\,@file{\DIR\/address@hidden address@hidden rglos{NAME} address@hidden \NAME\ address@hidden,,,music-glossary-big-page} address@hidden macro + address@hidden ifset + + address@hidden bigpage + address@hidden annoying parentheses stripped by add-html-footer.py address@hidden internalsref{NAME} address@hidden \NAME\ address@hidden,,,lilypond-internals} @end macro @c annoying parentheses stripped by add-html-footer.py @@ -70,6 +82,14 @@ @ref{\NAME\,,,music-glossary} @end macro address@hidden ifclear + + address@hidden usage: @inputfileref{input@/regression,FILE@/-NAME.ly} address@hidden inputfileref{DIR,NAME} address@hidden/\DIR\/collated-files.html#\NAME\,@file{\DIR\/address@hidden address@hidden macro + @c to get decent quotes in `foo' @macro q{TEXT} @html