import datetime
import bs4
import html as htmllib
import marko
import re
import urllib.parse
import parse_content
days = parse_content.days
entries = parse_content.entries
months = parse_content.months
series = parse_content.series
slugify = parse_content.slugify
tags = parse_content.tags
updated = parse_content.updated
years = parse_content.years
def atom_entry(id):
return (
""
+ entries[id]["title"]
+ ""
+ base_url
+ url_of_entry(id)
+ ""
f""""""
"" + entries[id]["date"].isoformat() + ""
""
+ (
entries[id]["modified"].isoformat()
if "modified" in entries[id]
else entries[id]["date"].isoformat()
)
+ ""
+ (
(
(
""
+ print_html_as_text(entries[id]["description"])
+ ""
)
if "description" in entries[id]
else ""
)
)
+ ''
+ "\n"
+ print_html_escaped(marko.convert(entries[id].content))
+ ""
+ ""
)
base_url = "https://gkn.me.uk"
colour_class = f"month-{list(reversed(months))[0].month:02d}"
def entries_in_series(series):
return dict(
[
(id, entries[id])
for id in entries
if "series" in entries[id]
and slugify(series) == slugify(entries[id]["series"])
]
)
def entries_with_tag(tag):
return dict(
[
(id, entries[id])
for id in entries
if "tags" in entries[id]
and slugify(tag) in map(slugify, entries[id]["tags"])
]
)
head = """
"""
hues = [15, 30, 52, 82, 149, 187, 210, 246, 269, 291, 321, 352]
link_home = 'Home'
link_to_all_entries = 'All entries'
link_to_feed = (
''
"Subscribe"
""
)
footer = (
""
)
def link_to_entry(id, **kwargs):
return (
f'"
+ ""
+ ("" + entries[id]["title"] + "")
+ ""
+ (' ·' + " ")
+ (
"from " + "" + entries[id]["series"] + "" + " · "
if "series" in entries[id] and "omit_series" not in kwargs
else ""
)
+ (
f'"
)
+ (
" / "
+ f'"
if "modified" in entries[id]
else ""
)
+ (
" · " + ", ".join("" + label + "" for label in kwargs["labels"])
if "labels" in kwargs
else ""
)
+ ""
+ (
(
' ·'
+ " "
+ ''
+ print_html_as_text(entries[id]["description"])
+ ""
)
if "description" in entries[id]
else ""
)
+ ""
+ ""
)
def link_with_details(url, title, description=None, rel=None, type=None):
return (
f'"
+ ""
+ ("" + title + "")
+ (
(
' ·'
+ " "
+ ''
+ description
+ ""
)
if description
else ""
)
+ ""
+ ""
)
def offset_id(seq, id, offset):
part_seq = list(seq)[list(seq).index(id) :: offset]
if len(part_seq) > 1:
return part_seq[1]
def print_html(html):
return str(bs4.BeautifulSoup(html, "html.parser"))
def print_html_as_text(html):
return str(bs4.BeautifulSoup(html, "html.parser").get_text())
def print_html_escaped(html):
return htmllib.escape(str(bs4.BeautifulSoup(html, "html.parser")))
repeated_tags = [tag for tag in tags if len(entries_with_tag(tag)) >= 4]
def stardate(date):
return format(int(date.strftime("%s")) / 100000, ".1f")
def title(string):
return string + " · Grey Nicholson" if string else "Grey Nicholson"
def url_of_day(date):
return f"/{date.year}/{date.month:02d}/{date.day:02d}"
def url_of_entry(id):
return f"/{id}"
def url_of_month(date):
return f"/{date.year}/{date.month:02d}"
def url_of_tag(tag):
return f"/entries/{slugify(tag)}"
def url_of_tag_feed(tag):
return f"/entries/{slugify(tag)}/feed"
def url_of_series(series):
return f"/{slugify(series)}"
def url_of_series_feed(series):
return f"/{slugify(series)}/feed"
def url_of_year(date):
return f"/{date.year}"