#!/usr/bin/env python3 import cgi, os, sys, math, mistune, re site_name = "site" # Link to the homepage with this name blog_title = "wlog" # Rename wlog to anything you want :) full_url = "http://pqrs.dk/~alvin/wlog/" # used for RSS t_z = ":00+01:00" # used for RSS #blog_url = "/wlog/" # Absolute location for wlog on the server blog_url = "wlog.py3" # Absolute location for wlog on the server pages_dir = "./pages/" # Location of blog posts relative to script on disk theme_dir = "./themes/" blog_theme = "bluer" # Use head.wlog and post.wlog for wlog theme pagin = 5 # Change this to change how many posts are shown per page markdown = mistune.Markdown() pages_extension = ".md" # Default file extension. Markdown recommended len_ext = len(pages_extension) readmore = "<...> " # Line beginning with this creates a preview for indexing post_prefix = blog_url+"?title=" # Optionally change to blog_url date_prefix = "date: " ldp = len(date_prefix) #post_prefix = blog_url # Optionally change to "blog_url+"?title=" def escape_html(text): """escape strings for display in HTML""" return cgi.escape(text, quote=True).\ replace(u'\n', u'
').\ replace(u'\t', u' ').\ replace(u' ', u'  ') def main(): form = cgi.FieldStorage() page_name = form.getvalue('title') if not page_name: wlog_head("1") post_list(pages_dir,1) return if page_name == 'posts.atom': do_rss() return page_name = escape_html(page_name) wlog_head(page_name) if page_name == '0': post_list(pages_dir, '0') return elif os.path.isfile(pages_dir+page_name+pages_extension): post_printer(page_name) wlog_foot() return try: page_no = int(page_name.split('/')[-1]) page_tag = page_name.split('/')[0:] if int(page_tag[0]) != page_no: post_list(pages_dir+page_tag[0],page_no) else: post_list(pages_dir,page_no) except: try: page_no = int(page_name.split('/')[-1]) page_tag = str(page_name.split('/')[:-1])[2:-2] post_list(pages_dir+page_tag,page_no) except: try: page_tag = page_name.split('/')[0] post_list(pages_dir+page_tag,1) except: post_heading("404 :: "+page_name,"page not found ...",".") post_printer(page_name) # fix this def wlog_head(page_name=' '): print("Content-type: text/html\r\n") with open(theme_dir+"head."+blog_theme) as f: if page_name: if page_name.split('/')[-1] == "": page_name = " "+str(page_name.split('/')[0]) else: page_name = " "+str(page_name.split('/')[-1]) content = [site_name, blog_title, " // " + page_name, post_prefix] print(str(f.read()).format(*content)) else: print(str(f.read()).format(content[:1], blog_title,'')) def post_heading(post_title, page_date="0", post_filename="", post_tag="/", tag_alias="/"): content = [page_date, post_filename, post_title.strip(), post_tag, tag_alias, post_prefix] with open(theme_dir+"post."+blog_theme) as f: print(str(f.read()).format(*content)) def post_printer(page_name,preview=0,tag="",date="0"): post_date = "Stickied post" # sticky, draft, etc? tag_alias = "/" post_tag = "." if not os.path.isfile(pages_dir+page_name+pages_extension): print("

Sorry, but ["+page_name+"] does not exist :(


") return with open(pages_dir+page_name+pages_extension) as f: contents = list(f) if contents[0][:ldp] == date_prefix: post_date = contents.pop(0)[ldp:] if post_tag != page_name: post_tag = str(page_name.split('/')[0]) tag_alias = "/"+post_tag+"/" page_name = page_name.split("/")[-1] page_path = str(post_prefix+tag_alias[1:]+page_name) previewed = [] for line in contents[1:]: if line[:len(readmore)] == readmore and preview == 1: previewed.append("\nRead more...") break elif line[:len(readmore)] == readmore and not preview: previewed.append(line[len(readmore):]) else: previewed.append(line) post_heading(contents[0], post_date, page_path, post_tag, tag_alias) print(markdown(''.join(previewed))) print('\n') def wlog_foot(page_no=0,page_nos=0,page_tag=""): print("""


[{0}][{1}]""".format(site_name,blog_title,blog_url)) print("♦ [all]") if page_tag == "/": page_tag = "" if page_no > 1: print("♦ [prev]") if page_no < page_nos: print("♦ [next]") if page_nos > 0: print("♦ ["+str(page_no)+"/"+str(page_nos)+"]") print("""
""") def post_list(pages_dir,page_no=0,rss=0): page_path = [] for root, dirs, fils in os.walk(pages_dir): for filename in fils: page_path.append(os.path.join(root,filename)) dict = {} for each_page in page_path: if each_page[-len_ext:] == pages_extension: each_page = each_page[len(pages_dir):] with open(pages_dir+each_page) as f: contents = list(f) if contents[0][:5] == "date:": post_date=contents.pop(0)[5:].strip() else: post_date='s' # work on this sometime tag = str(pages_dir.split('/')[-1]) dict[post_date] = each_page[:-len_ext] if rss != 1 and page_no != '0': post_list_pages(dict,page_no,tag) elif page_no == '0': print("
") print("

post archive

") print(len(dict), "posts.

") print("

") print("
DateTitle") bent = [] for a in dict: bent.append([a, dict[a]]) for n, i in enumerate(sorted(bent, reverse=1)): print("
", (n+1)) i[1] = "" + i[1].replace('/', ': ') + "" print("", i[0], "", i[1]) print("

") wlog_foot() else: dlist = [] for dic in dict: dlist.append([dic, dict[dic]]) return dlist def post_list_pages(dict,page_no=1,tag=""): post_no = (len(dict)) page_nos=(math.ceil(post_no/pagin)) if page_no > page_nos: page_no = page_nos if page_no <= 0: page_no = 1 if page_nos > 1: page_range = slice(((int(page_no) - 1)* pagin),(int(page_no) * pagin)) for key in sorted(dict,reverse=True)[page_range]: post_printer(tag+dict[key],preview=1) wlog_foot(page_no,page_nos,tag+"/") if page_nos == 1: for key in sorted(dict,reverse=True): post_printer(tag+dict[key],preview=1) wlog_foot(page_no,page_nos,tag) def do_rss(): print("Content-type: application/atom+xml\r\n") print('') print('') print("", blog_title, "") print("") print("") print("{0}{1}".format(full_url, blog_url)) posts = sorted(post_list(pages_dir,1,1), reverse=True) upd = posts[0][0].replace(" ", "").replace(".", "-") upd = re.sub(r'\[(.*?)\]', 'T', upd) upd += t_z print("" + upd + "") for post in posts: print("\n") post[0] = post[0].replace(" ","").replace(".", "-") post[0] = re.sub(r'\[(.*?)\]', 'T', post[0]) post[0] += t_z print("" + post[0] + "") purl = full_url + post_prefix + post[1] p_id = full_url.replace("http://","tag:") + "," + post[0][:10] p_id += ":" + post_prefix + post[1] print("" + p_id + "") print("") if "/" in posts[1]: print("", posts[1].split("/"), "") with open(pages_dir + post[1] + pages_extension) as p: p = p.read().splitlines() p[2] = "\n".join(p[2:]) if readmore in p[2]: p[2] = p[2].split(readmore) p[2] = p[2][0] + "

[*Post shortened*]({0})".format(purl) p[2] = markdown(p[2]) p[2] = cgi.escape(p[2]) p[2] = p[2].replace('&lt;', '<').replace('&gt;', '>') print("", p[1], "") print("", p[2], "") print("\n") print("") def do_list(): print("ahaha") main()