gnunet-svn
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[www] 13/19: move inc to www_shared repo


From: gnunet
Subject: [www] 13/19: move inc to www_shared repo
Date: Wed, 13 Nov 2019 01:28:45 +0100

This is an automated email from the git hooks/post-receive script.

ng0 pushed a commit to branch master
in repository www.

commit 4500a4e0bde4939eb3f49ea54efd4feb18d6ec4c
Author: ng0 <address@hidden>
AuthorDate: Tue Nov 12 21:36:28 2019 +0000

    move inc to www_shared repo
---
 inc/fileproc.py   |  98 ----------------------------------
 inc/i18nfix.py    |  41 ---------------
 inc/news.macro.j2 |  13 -----
 inc/site.py       | 153 ------------------------------------------------------
 inc/sitemap.py    |  21 --------
 inc/sum.py        |  33 ------------
 inc/textproc.py   |  39 --------------
 7 files changed, 398 deletions(-)

diff --git a/inc/fileproc.py b/inc/fileproc.py
deleted file mode 100644
index bf6cc8d..0000000
--- a/inc/fileproc.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from pathlib import Path
-import os
-import shutil
-
-def copy_tree(source, destination):
-    destination.mkdir(parents=True, exist_ok=True)
-    for _ in os.listdir(source):
-        i = source / _
-        o = destination / _
-        if i.is_dir():
-            copy_tree(i, o)
-        else:
-            shutil.copy2(str(i), str(o))
-
-
-def copy_files(kind, conf, locale, inlist, ptarget):
-    o = Path(ptarget)
-    for item in conf[inlist]:
-        i = Path(kind + "/" + item["file"])
-        # print(i)
-        for t in item["targets"]:
-            d_loc = o / locale / t
-            d = o / t
-            # print(d)
-            if i.is_file() is not False:
-                d_loc.write_text(i.read_text())
-                print("copied " + str(i) + " to " + str(d_loc) + "...")
-                d.write_text(i.read_text())
-                print("copied " + str(i) + " to " + str(d) + "...")
-
-
-def rm_rf(directory):
-    directory = Path(directory)
-    for child in directory.glob('*'):
-        if child.is_file():
-            child.unlink()
-        else:
-            rm_rf(child)
-    # directory.rmdir()
-
-
-def fileop(infile, outfile, action):
-    """
-    infile: inputfile, Path object
-    outfile: outputfile, Path object
-    action: action if any, String
-    """
-    i = Path(infile)
-    o = Path(outfile)
-    outdir = Path("rendered")
-    if i.is_file() is not False:
-        if action == "copy":
-            # Write content of i to o.
-            o.write_text(i.read_text())
-        if action == "link":
-            o.symlink_to(i)
-
-
-def write_name(filename, infile, locale, replacer):
-    return "./rendered/" + locale + "/" + infile.replace(replacer,
-                                                         '').rstrip(".j2")
-
-
-def localized(filename, locale, *args):
-    if len(args) == 0:
-        return "../" + locale + "/" + filename
-    ext = kwargs.get('ext', None)
-    if ext is not None:
-        lf = filename + "." + locale + "." + ext
-        lp = Path(lf)
-        if locale == "en" or not lp.is_file():
-            return "../" + filename + "." + ext
-        else:
-            return "../" + lf
-
-
-# This generates and switches sites generations, preventing
-# in-place modification of the website.
-# * save old generation directory name
-# * jinja2 creates content in "rendered" (happened before calling this 
function)
-# * calculate sum of "rendered"
-# * move "rendered" to out/$sum
-# * remove symlink "html_dir"
-# * symlink out/$sum to "html_dir"
-# * delete old generation directory
-def generation_dir(htmldir):
-    oldgen = Path(htmldir).resolve()
-    # precondition: jinja2 has created the files in "rendered".
-    newgen = Path("rendered")
-    newgen_sum = walksum(newgen)
-    outdir = Path("out")
-    outdir.mkdir(parents=True, exist_ok=True)
-    newgen_target = Path("out") / newgen_sum
-    newgen.rename(newgen_target)
-    html = Path(htmldir)
-    html.unlink()
-    fileop(newgen, html, "link")
-    rm_rf(oldgen)
diff --git a/inc/i18nfix.py b/inc/i18nfix.py
deleted file mode 100644
index 69fe177..0000000
--- a/inc/i18nfix.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2017, 2018 GNUnet e.V.
-#
-# Copying and distribution of this file, with or without modification,
-# are permitted in any medium without royalty provided the copyright
-# notice and this notice are preserved.  This file is offered as-is,
-# without any warranty.
-"""
-Extract translations from a Jinja2 template, stripping leading newlines.
-
-@author Florian Dold
-"""
-
-import re
-import jinja2.ext
-
-
-def normalize(message):
-    message = message.strip()
-    # collapse whitespaces (including newlines) into one space.
-    message = re.sub("\s+", " ", message)
-    return message
-
-
-def babel_extract(fileobj, keywords, comment_tags, options):
-    res = jinja2.ext.babel_extract(fileobj, keywords, comment_tags, options)
-    for lineno, funcname, message, comments in res:
-        message = normalize(message)
-        yield lineno, funcname, message, comments
-
-
-def wrap_gettext(f):
-    """
-    Call gettext with whitespace normalized.
-    """
-    def wrapper(message):
-        message = normalize(message)
-        return f(message)
-
-    return wrapper
diff --git a/inc/news.macro.j2 b/inc/news.macro.j2
deleted file mode 100644
index 0ed9972..0000000
--- a/inc/news.macro.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-{% macro newspreview(name) -%}
-  <section class="item-preview">
-    <header>
-      <h3>{{ name['title']|e }}</h3>
-      <p class="item-date">
-        {{ name['date'] }}
-      </p>
-    </header>
-    <p class="item-abstract">
-      {{ name['abstract'] }} [<a href="{% if kwargs|length > 0 %}{{ 
kwargs['prefix'] }}{% endif %}{{ name['page'] }}" title="{{ name['date']}}">{{ 
_("read more") }}</a>]
-    </p>
-  </section>
-{% endmacro -%}
diff --git a/inc/site.py b/inc/site.py
deleted file mode 100644
index 9fd9ac8..0000000
--- a/inc/site.py
+++ /dev/null
@@ -1,153 +0,0 @@
-import os
-import os.path
-import sys
-import re
-import gettext
-import glob
-import codecs
-import jinja2
-from pathlib import Path, PurePosixPath, PurePath
-from ruamel.yaml import YAML
-import inc.i18nfix as i18nfix
-from inc.textproc import cut_news_text
-from inc.fileproc import copy_files, copy_tree
-
-
-class gen_site:
-    def __init__(self, debug):
-        self.debug = debug
-
-    def load_config(self, name="www.yml"):
-        yaml = YAML(typ='safe')
-        site_configfile = Path(name)
-        return yaml.load(site_configfile)
-
-    def copy_trees(self, directory):
-        """ Take a directory name (string) and pass it to copy_tree() as Path 
object. """
-        i = Path(directory)
-        o = Path("rendered/" + directory)
-        copy_tree(i, o)
-
-    def gen_abstract(self, conf, name, member, pages, length):
-        if self.debug:
-            print("generating abstracts...")
-        for item in conf[name]:
-            item[member] = cut_news_text(item[pages], length)
-        if self.debug:
-            print("cwd: " + str(Path.cwd()))
-        if self.debug > 1:
-            print(conf["newsposts"])
-        if self.debug:
-            print("[done] generating abstracts")
-
-    def run(self, root, conf, env):
-        # root = "../" + root
-        if self.debug:
-            _ = Path(".")
-            q = list(_.glob("**/*.j2"))
-            print(q)
-        # for in_file in glob.glob(root + "/*.j2"):
-        for in_file in Path(".").glob(root + "/*.j2"):
-            in_file = str(in_file)
-            if self.debug:
-                print(in_file)
-            name, ext = re.match(r"(.*)\.([^.]+)$",
-                                 in_file.rstrip(".j2")).groups()
-            tmpl = env.get_template(in_file)
-
-            def self_localized(other_locale):
-                """
-                Return URL for the current page in another locale.
-                """
-                return "../" + other_locale + "/" + in_file.replace(
-                    root + '/', '').rstrip(".j2")
-
-            def url_localized(filename):
-                if root == "news":
-                    return "../../" + locale + "/" + filename
-                else:
-                    return "../" + locale + "/" + filename
-
-            def url_static(filename):
-                if root == "news":
-                    return "../../static/" + filename
-                else:
-                    return "../static/" + filename
-
-            def url_dist(filename):
-                if root == "news":
-                    return "../../dist/" + filename
-                else:
-                    return "../dist/" + filename
-
-            def svg_localized(filename):
-                lf = filename + "." + locale + ".svg"
-                if locale == "en" or not Path(lf).is_file():
-                    return "../" + filename + ".svg"
-                else:
-                    return "../" + lf
-
-            def url(x):
-                # TODO: look at the app root environment variable
-                # TODO: check if file exists
-                #if root == "news":
-                #    return "../" + "../" + x
-                #else:
-                #    return "../" + x
-                return "../" + x
-
-            # for l in glob.glob("locale/*/"):
-            # https://bugs.python.org/issue22276
-            for l in list(x for x in Path(".").glob("locale/*/") if 
x.is_dir()):
-                l = str(PurePath(l).name)
-                if self.debug:
-                    print(l)
-                # locale = os.path.basename(l[:-1])
-                locale = l
-
-                tr = gettext.translation("messages",
-                                         localedir="locale",
-                                         languages=[locale])
-
-                tr.gettext = i18nfix.wrap_gettext(tr.gettext)
-
-                env.install_gettext_translations(tr, newstyle=True)
-
-                content = tmpl.render(lang=locale,
-                                      lang_full=conf["langs_full"][locale],
-                                      url=url,
-                                      meetingnotesdata=conf["meetingnotes"],
-                                      newsdata=conf["newsposts"],
-                                      videosdata=conf["videoslist"],
-                                      self_localized=self_localized,
-                                      url_localized=url_localized,
-                                      url_static=url_static,
-                                      url_dist=url_dist,
-                                      svg_localized=svg_localized,
-                                      filename=name + "." + ext)
-
-                if root == "news":
-                    out_name = "./rendered/" + locale + "/" + root + "/" + 
in_file.replace(
-                        root + '/', '').rstrip(".j2")
-                else:
-                    out_name = "./rendered/" + locale + "/" + in_file.replace(
-                        root + '/', '').rstrip(".j2")
-
-                outdir = Path("rendered")
-
-                if root == "news":
-                    langdir = outdir / locale / root
-                else:
-                    langdir = outdir / locale
-
-                try:
-                    langdir.mkdir(parents=True, exist_ok=True)
-                except e as FileNotFoundError:
-                    print(e)
-
-                with codecs.open(out_name, "w", encoding='utf-8') as f:
-                    try:
-                        print(Path.cwd())
-                        f.write(content)
-                    except e as Error:
-                        print(e)
diff --git a/inc/sitemap.py b/inc/sitemap.py
deleted file mode 100644
index 5ccf744..0000000
--- a/inc/sitemap.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import os
-from pathlib import Path, PurePosixPath
-
-
-def sitemap_tree(path):
-    tree = dict(name=PurePosixPath(path).name, children=[])
-    try:
-        mylist = os.listdir(path)
-    except OSError:
-        pass
-    else:
-        for name in mylist:
-            fn = os.path.join(path, name)
-            if os.path.isdir(fn):
-                tree['children'].append(sitemap_tree(fn))
-            else:
-                np = os.path.join(name)
-                if np.startswith('/'):
-                    np = np[1:]
-                tree['children'].append(dict(name=np))
-    return tree
diff --git a/inc/sum.py b/inc/sum.py
deleted file mode 100644
index fff7a81..0000000
--- a/inc/sum.py
+++ /dev/null
@@ -1,33 +0,0 @@
-def sha256sum(_):
-    sha256 = hashlib.sha256()
-    with io.open(_, mode="rb") as fd:
-        content = fd.read()
-        sha256.update(content)
-    return sha256.hexdigest()
-
-
-def walksum(_):
-    sha256 = hashlib.sha256()
-    x = Path(_)
-    if not x.exists():
-        return -1
-    try:
-        for root, directories, files in os.walk(_):
-            for names in sorted(files):
-                filepath = os.path.join(root, names)
-                try:
-                    fl = open(filepath, 'rb')
-                except:
-                    fl.close()
-                    continue
-                while 1:
-                    buf = fl.read(4096)
-                    if not buf:
-                        break
-                    sha256.update(hashlib.sha256(buf).hexdigest())
-                fl.close()
-    except:
-        import traceback
-        traceback.print_exc()
-        return -2
-    return sha256.hexdigest()
diff --git a/inc/textproc.py b/inc/textproc.py
deleted file mode 100644
index f3b97d3..0000000
--- a/inc/textproc.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import html.parser
-from bs4 import BeautifulSoup
-
-
-class extractText(html.parser.HTMLParser):
-    def __init__(self):
-        super(extractText, self).__init__()
-        self.result = []
-
-    def handle_data(self, data):
-        self.result.append(data)
-
-    def text_in(self):
-        return ''.join(self.result)
-
-
-def html2text(html):
-    k = extractText()
-    k.feed(html)
-    return k.text_in()
-
-
-def cut_text(filename, count):
-    with open(filename) as html:
-        soup = BeautifulSoup(html, features="lxml")
-        for script in soup(["script", "style"]):
-            script.extract()
-        k = []
-        for i in soup.findAll('p')[1]:
-            k.append(i)
-        b = ''.join(str(e) for e in k)
-        text = html2text(b.replace("\n", ""))
-        textreduced = (text[:count] + '...') if len(text) > count else (text +
-                                                                        '..')
-        return (textreduced)
-
-
-def cut_news_text(filename, count):
-    return cut_text("news/" + filename + ".j2", count)

-- 
To stop receiving notification emails like this one, please contact
address@hidden.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]