Add Hakyll site generator and assets
Add site executable and Haskell modules (site.hs, ChaoDoc.hs, SideNoteHTML.hs, Pangu.hs) to handle Pandoc/Hakyll compilation, theorem/sidenote processing and CJK spacing. Add CSS, font files, favicon, templates, Makefile, and a CSL bibliographic style. Update .gitignore to ignore build artifacts.
This commit is contained in:
278
src/ChaoDoc.hs
Normal file
278
src/ChaoDoc.hs
Normal file
@@ -0,0 +1,278 @@
|
||||
{-# LANGUAGE BlockArguments #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module ChaoDoc (chaoDocRead, chaoDocWrite, chaoDocPandocCompiler, chaoDocCompiler) where
|
||||
|
||||
import Control.Monad.State
|
||||
import Data.Either
|
||||
import Data.Functor
|
||||
import Data.List (intersect)
|
||||
import qualified Data.Map as M
|
||||
import Data.Maybe
|
||||
import Data.Text (Text, pack)
|
||||
import qualified Data.Text as T
|
||||
import Hakyll
|
||||
import Pangu (isCJK, pangu)
|
||||
import SideNoteHTML (usingSideNotesHTML)
|
||||
import System.IO.Unsafe
|
||||
import Text.Pandoc
|
||||
-- import Text.Pandoc.Builder
|
||||
import Text.Pandoc.Walk (query, walk, walkM)
|
||||
|
||||
-- setMeta key val (Pandoc (Meta ms) bs) = Pandoc (Meta $ M.insert key val ms) bs
|
||||
|
||||
-- On mac, please do `export LANG=C` before using this thing
|
||||
chaoDocRead :: ReaderOptions
|
||||
chaoDocRead =
|
||||
def
|
||||
{ readerExtensions =
|
||||
enableExtension Ext_tex_math_double_backslash $
|
||||
enableExtension Ext_tex_math_single_backslash $
|
||||
enableExtension Ext_latex_macros $
|
||||
enableExtension Ext_raw_tex pandocExtensions
|
||||
}
|
||||
|
||||
chaoDocWrite :: WriterOptions
|
||||
chaoDocWrite =
|
||||
def
|
||||
{ writerHTMLMathMethod = MathML,
|
||||
-- writerHtml5 = True,
|
||||
-- writerHighlightStyle = Just syntaxHighlightingStyle,
|
||||
writerNumberSections = True,
|
||||
writerTableOfContents = True,
|
||||
writerTOCDepth = 2
|
||||
}
|
||||
|
||||
cslFile :: String
|
||||
cslFile = "bib_style.csl"
|
||||
|
||||
bibFile :: String
|
||||
bibFile = "reference.bib"
|
||||
|
||||
chaoDocPandocCompiler :: Compiler (Item Pandoc)
|
||||
chaoDocPandocCompiler = do
|
||||
macros <- T.pack <$> loadBody "math-macros.tex"
|
||||
csl <- load $ fromFilePath cslFile
|
||||
bib <- load $ fromFilePath bibFile
|
||||
body <- getResourceBody
|
||||
let bodyWithMacros =
|
||||
fmap (T.unpack . prependMacros macros . T.pack) body
|
||||
prepare =
|
||||
addMeta "link-citations" (MetaBool True)
|
||||
. addMeta "reference-section-title" (MetaInlines [Str "References"])
|
||||
. myFilter
|
||||
readPandocWith chaoDocRead bodyWithMacros
|
||||
>>= processPandocBiblio csl bib . fmap prepare
|
||||
|
||||
chaoDocCompiler :: Compiler (Item String)
|
||||
chaoDocCompiler = chaoDocPandocCompiler <&> writePandocWith chaoDocWrite
|
||||
|
||||
addMeta :: T.Text -> MetaValue -> Pandoc -> Pandoc
|
||||
addMeta name value (Pandoc meta a) =
|
||||
let prevMap = unMeta meta
|
||||
newMap = M.insert name value prevMap
|
||||
newMeta = Meta newMap
|
||||
in Pandoc newMeta a
|
||||
|
||||
myFilter :: Pandoc -> Pandoc
|
||||
myFilter = usingSideNotesHTML chaoDocWrite . theoremFilter . panguFilter . displayMathFilter
|
||||
|
||||
pandocToInline :: Pandoc -> [Inline]
|
||||
pandocToInline (Pandoc _ blocks) = go (reverse blocks)
|
||||
where
|
||||
go (Plain inlines : _) = inlines
|
||||
go (Para inlines : _) = inlines
|
||||
go (_ : xs) = go xs
|
||||
go [] = []
|
||||
|
||||
incrementalBlock :: [Text]
|
||||
incrementalBlock =
|
||||
[ "Theorem",
|
||||
"Conjecture",
|
||||
"Definition",
|
||||
"Example",
|
||||
"Lemma",
|
||||
"Problem",
|
||||
"Proposition",
|
||||
"Corollary",
|
||||
"Observation",
|
||||
"定理",
|
||||
"猜想",
|
||||
"定义",
|
||||
"例",
|
||||
"引理",
|
||||
"问题",
|
||||
"命题",
|
||||
"推论",
|
||||
"观察"
|
||||
]
|
||||
|
||||
otherBlock :: [Text]
|
||||
otherBlock = ["Proof", "Remark", "证明", "备注"]
|
||||
|
||||
theoremClasses :: [Text]
|
||||
theoremClasses = incrementalBlock ++ otherBlock
|
||||
|
||||
-- create a filter for theorems
|
||||
getClass :: Attr -> [Text]
|
||||
getClass (_, c, _) = c
|
||||
|
||||
addClass :: Attr -> Text -> Attr
|
||||
addClass (a, b, c) d = (a, d : b, c)
|
||||
|
||||
addAttr :: Attr -> Text -> Text -> Attr
|
||||
addAttr (a, b, c) x y = (a, b, (x, y) : c)
|
||||
|
||||
-- For each theorem, add a number, and also add add class theorem
|
||||
preprocessTheorems :: Block -> State Int Block
|
||||
preprocessTheorems (Div attr xs)
|
||||
| isIncremental = do
|
||||
curId <- get
|
||||
put (curId + 1)
|
||||
return $ Div (addAttr attr' "index" (pack $ show curId)) xs
|
||||
| isOtherBlock = return $ Div attr' xs
|
||||
| otherwise = return (Div attr xs)
|
||||
where
|
||||
isIncremental = getClass attr `intersect` incrementalBlock /= []
|
||||
isOtherBlock = getClass attr `intersect` otherBlock /= []
|
||||
theoremType = head (getClass attr `intersect` theoremClasses)
|
||||
attr' = addAttr attr "type" theoremType
|
||||
preprocessTheorems x = return x
|
||||
|
||||
theoremFilter :: Pandoc -> Pandoc
|
||||
theoremFilter doc = walk makeTheorem $ autorefFilter $ evalState (walkM preprocessTheorems doc) 1
|
||||
|
||||
-- [index, type, idx]
|
||||
theoremIndex :: Block -> [(Text, (Text, Text))]
|
||||
theoremIndex (Div attr _)
|
||||
| isNothing t = []
|
||||
| isIncremental = [(idx, (fromJust t, fromJust index))]
|
||||
| otherwise = []
|
||||
where
|
||||
(idx, _, parm) = attr
|
||||
t = lookup "type" parm
|
||||
index = lookup "index" parm
|
||||
isIncremental = fromJust t `elem` incrementalBlock
|
||||
theoremIndex _ = []
|
||||
|
||||
autoref :: [(Text, (Text, Text))] -> Inline -> Inline
|
||||
autoref x (Cite citations inlines)
|
||||
| valid = Link nullAttr [Str linkTitle] ("#" <> citeid, linkTitle)
|
||||
| otherwise = Cite citations inlines
|
||||
where
|
||||
citeid = citationId $ head citations
|
||||
valid = citeid `elem` map fst x
|
||||
(theoremType, num) = fromJust $ lookup citeid x
|
||||
linkTitle = theoremType <> " " <> num
|
||||
autoref _ y = y
|
||||
|
||||
autorefFilter :: Pandoc -> Pandoc
|
||||
autorefFilter x = walk (autoref links) x
|
||||
where
|
||||
links = query theoremIndex x
|
||||
|
||||
-- processCitations works on AST. If you want to use citations in theorem name,
|
||||
-- then you need to convert citations there to AST as well and then use processCitations\
|
||||
-- Thus one need to apply the theorem filter first.
|
||||
-- autoref still does not work.
|
||||
mathMacros :: Text
|
||||
mathMacros = unsafePerformIO (pack <$> readFile "math-macros.tex")
|
||||
{-# NOINLINE mathMacros #-}
|
||||
|
||||
prependMacros :: Text -> Text -> Text
|
||||
prependMacros macros body = macros <> "\n\n" <> body
|
||||
|
||||
prependMathMacros :: Text -> Text
|
||||
prependMathMacros = prependMacros mathMacros
|
||||
|
||||
thmNamePandoc :: Text -> Pandoc
|
||||
thmNamePandoc x =
|
||||
fromRight (Pandoc nullMeta []) . runPure $
|
||||
readMarkdown chaoDocRead (prependMathMacros x)
|
||||
|
||||
makeTheorem :: Block -> Block
|
||||
makeTheorem (Div attr xs)
|
||||
| isNothing t = Div attr xs
|
||||
| otherwise = Div (addClass attr "theorem-environment") (Plain [header] : xs)
|
||||
where
|
||||
(_, _, parm) = attr
|
||||
t = lookup "type" parm
|
||||
name = lookup "title" parm
|
||||
index = lookup "index" parm
|
||||
header = Span (addClass nullAttr "theorem-header") [typetext, indextext, nametext]
|
||||
typetext = Span (addClass nullAttr "type") [Str $ fromJust t]
|
||||
indextext =
|
||||
if isNothing index
|
||||
then Str ""
|
||||
else Span (addClass nullAttr "index") [Str $ fromJust index]
|
||||
nametext =
|
||||
if isNothing name
|
||||
then Str ""
|
||||
else Span (addClass nullAttr "name") (pandocToInline $ thmNamePandoc $ fromJust name)
|
||||
makeTheorem x = x
|
||||
|
||||
-- pangu filter
|
||||
lastChar :: Inline -> Maybe Char
|
||||
lastChar e = case e of
|
||||
Str s -> if null (T.unpack s) then Nothing else Just (last (T.unpack s))
|
||||
Emph is -> lastCharList is
|
||||
Strong is -> lastCharList is
|
||||
Strikeout is -> lastCharList is
|
||||
Link _ is _ -> lastCharList is
|
||||
Span _ is -> lastCharList is
|
||||
Quoted _ is -> lastCharList is
|
||||
_ -> Nothing
|
||||
where
|
||||
lastCharList [] = Nothing
|
||||
lastCharList is = lastChar (last is)
|
||||
|
||||
firstChar :: Inline -> Maybe Char
|
||||
firstChar e = case e of
|
||||
Str s -> if null (T.unpack s) then Nothing else Just (head (T.unpack s))
|
||||
Emph is -> firstCharList is
|
||||
Strong is -> firstCharList is
|
||||
Strikeout is -> firstCharList is
|
||||
Link _ is _ -> firstCharList is
|
||||
Span _ is -> firstCharList is
|
||||
Quoted _ is -> firstCharList is
|
||||
_ -> Nothing
|
||||
where
|
||||
firstCharList [] = Nothing
|
||||
firstCharList is = firstChar (head is)
|
||||
|
||||
panguInline :: Inline -> Inline
|
||||
panguInline e = case e of
|
||||
Str s -> Str (pangu s)
|
||||
Emph is -> Emph (panguInlines is)
|
||||
Strong is -> Strong (panguInlines is)
|
||||
Strikeout is -> Strikeout (panguInlines is)
|
||||
Link at is tg -> Link at (panguInlines is) tg
|
||||
Span at is -> Span at (panguInlines is)
|
||||
Quoted qt is -> Quoted qt (panguInlines is)
|
||||
_ -> e
|
||||
|
||||
panguInlines :: [Inline] -> [Inline]
|
||||
panguInlines = foldr (addSpace . panguInline) []
|
||||
where
|
||||
addSpace x [] = [x]
|
||||
addSpace x (y : ys)
|
||||
| shouldSpace x y = x : Space : y : ys
|
||||
| otherwise = x : y : ys
|
||||
shouldSpace x y = case (lastChar x, firstChar y) of
|
||||
(Just lc, Just fc) -> isCJK lc /= isCJK fc
|
||||
_ -> False
|
||||
|
||||
panguFilter :: Pandoc -> Pandoc
|
||||
panguFilter = walk transformBlocks
|
||||
where
|
||||
transformBlocks :: Block -> Block
|
||||
transformBlocks (Para inlines) = Para (panguInlines inlines)
|
||||
transformBlocks x = x
|
||||
|
||||
-- display math wrapper for MathML
|
||||
displayMathFilter :: Pandoc -> Pandoc
|
||||
displayMathFilter = walk wrapDisplayMath
|
||||
where
|
||||
wrapDisplayMath m@(Math DisplayMath _) =
|
||||
Span ("math-container", [], []) [m]
|
||||
wrapDisplayMath x = x
|
||||
227
src/Pangu.hs
Normal file
227
src/Pangu.hs
Normal file
@@ -0,0 +1,227 @@
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
|
||||
module Pangu (pangu, isCJK) where
|
||||
|
||||
import Data.Function (fix)
|
||||
import Data.Text (Text)
|
||||
import qualified Data.Text as T
|
||||
import Data.Void (Void)
|
||||
import Replace.Megaparsec (streamEdit)
|
||||
import Text.Megaparsec
|
||||
import Text.Megaparsec.Char
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
type Parser = Parsec Void Text
|
||||
|
||||
type Rule = Parser Text
|
||||
|
||||
type RuleSet = [Rule]
|
||||
|
||||
applyUntilFixed :: Rule -> Text -> Text
|
||||
applyUntilFixed rule =
|
||||
fix
|
||||
( \loop current ->
|
||||
let next = streamEdit (try rule) id current
|
||||
in if next == current then next else loop next
|
||||
)
|
||||
|
||||
applyRulesRecursively :: RuleSet -> Text -> Text
|
||||
applyRulesRecursively rules input = foldl (flip applyUntilFixed) input rules
|
||||
|
||||
applyRules :: RuleSet -> Text -> Text
|
||||
applyRules rules input = foldl (flip applyOnce) input rules
|
||||
where
|
||||
applyOnce rule = streamEdit (try rule) id
|
||||
|
||||
-------------------------------------------------------------------------------
|
||||
-- rules for pangu
|
||||
|
||||
-- alphaNumChar from megaparsec matches CJK chars...
|
||||
-- need to implement a new one
|
||||
alphanumericChar :: Parser Char
|
||||
alphanumericChar = satisfy $ \c ->
|
||||
(c >= 'a' && c <= 'z')
|
||||
|| (c >= 'A' && c <= 'Z')
|
||||
|| (c >= '0' && c <= '9')
|
||||
|
||||
-- | Check if a character falls within the CJK ranges provided
|
||||
isCJK :: Char -> Bool
|
||||
isCJK c = any (\(start, end) -> c >= start && c <= end) cjkRanges
|
||||
where
|
||||
cjkRanges =
|
||||
[ ('\x2e80', '\x2eff'),
|
||||
('\x2f00', '\x2fdf'),
|
||||
('\x3040', '\x309f'),
|
||||
('\x30a0', '\x30fa'),
|
||||
('\x30fc', '\x30ff'),
|
||||
('\x3100', '\x312f'),
|
||||
('\x3200', '\x32ff'),
|
||||
('\x3400', '\x4dbf'),
|
||||
('\x4e00', '\x9fff'),
|
||||
('\xf900', '\xfaff')
|
||||
]
|
||||
|
||||
convertToFullwidth :: Char -> Char
|
||||
convertToFullwidth c =
|
||||
case c of
|
||||
':' -> ':'
|
||||
'.' -> '。'
|
||||
'~' -> '~'
|
||||
'!' -> '!'
|
||||
'?' -> '?'
|
||||
',' -> ','
|
||||
';' -> ';'
|
||||
'\"' -> '”'
|
||||
'\'' -> '’'
|
||||
_ -> c
|
||||
|
||||
-- A parser that matches a single CJK character
|
||||
cjkChar :: Parser Char
|
||||
cjkChar = satisfy isCJK
|
||||
|
||||
-- use python.py as reference for these rules
|
||||
|
||||
fullwidthCJKsymCJK :: Rule
|
||||
fullwidthCJKsymCJK = do
|
||||
lcjk <- cjkChar
|
||||
_ <- many (char ' ')
|
||||
sym <- try (some (char ':')) <|> count 1 (char '.')
|
||||
_ <- many (char ' ')
|
||||
rcjk <- cjkChar
|
||||
let transformedsym = map convertToFullwidth sym
|
||||
return $ T.pack $ [lcjk] ++ transformedsym ++ [rcjk]
|
||||
|
||||
fullwidthCJKsym :: Rule
|
||||
fullwidthCJKsym = do
|
||||
cjk <- cjkChar
|
||||
_ <- many (char ' ')
|
||||
sym <- some $ oneOf ("~!?,;" :: [Char])
|
||||
_ <- many (char ' ')
|
||||
let transformedsym = T.pack $ map convertToFullwidth sym
|
||||
return $ T.pack [cjk] <> transformedsym
|
||||
|
||||
dotsCJK :: Rule
|
||||
dotsCJK = do
|
||||
dots <- chunk "..." <|> chunk "…"
|
||||
cjk <- cjkChar
|
||||
return $ dots <> T.pack (" " ++ [cjk])
|
||||
|
||||
fixCJKcolAN :: Rule
|
||||
fixCJKcolAN = do
|
||||
cjk <- cjkChar
|
||||
_ <- char ':'
|
||||
an <- alphanumericChar
|
||||
return $ T.pack $ [cjk] ++ ":" ++ [an]
|
||||
|
||||
-- quotes
|
||||
-- seems confusing ...
|
||||
quotesym :: [Char]
|
||||
quotesym = "'`\x05f4\""
|
||||
|
||||
cjkquote :: Rule
|
||||
cjkquote = do
|
||||
cjk <- cjkChar
|
||||
quote <- oneOf quotesym
|
||||
return $ T.pack $ [cjk] ++ " " ++ [quote]
|
||||
|
||||
quoteCJK :: Rule
|
||||
quoteCJK = do
|
||||
quote <- oneOf quotesym
|
||||
cjk <- cjkChar
|
||||
return $ T.pack $ [quote] ++ " " ++ [cjk]
|
||||
|
||||
fixQuote :: Rule
|
||||
fixQuote = do
|
||||
openQuotes <- T.pack <$> some (oneOf quotesym)
|
||||
_ <- many spaceChar
|
||||
content <- T.pack <$> someTill anySingle (lookAhead $ some (oneOf quotesym))
|
||||
closeQuotes <- T.pack <$> some (oneOf quotesym)
|
||||
return $ openQuotes <> T.strip content <> closeQuotes
|
||||
|
||||
cjkpossessivequote :: Rule
|
||||
cjkpossessivequote = do
|
||||
cjk <- cjkChar
|
||||
_ <- char '\''
|
||||
_ <- lookAhead $ anySingleBut 's'
|
||||
return $ T.pack $ cjk : " '"
|
||||
|
||||
-- This singlequoteCJK rule will turn '你好' into ' 你好'
|
||||
-- which seems not desirable...
|
||||
-- however, the behavior is aligned with python version
|
||||
singlequoteCJK :: Rule
|
||||
singlequoteCJK = do
|
||||
_ <- char '\''
|
||||
cjk <- cjkChar
|
||||
return $ T.pack $ "' " ++ [cjk]
|
||||
|
||||
fixPossessivequote :: Rule
|
||||
fixPossessivequote = do
|
||||
pre <- cjkChar <|> alphanumericChar
|
||||
_ <- some spaceChar
|
||||
_ <- chunk "'s"
|
||||
return $ T.pack $ pre : "'s"
|
||||
|
||||
-- hash
|
||||
hashANSCJKhash :: Rule
|
||||
hashANSCJKhash = do
|
||||
cjk1 <- cjkChar
|
||||
_ <- char '#'
|
||||
mid <- some cjkChar
|
||||
_ <- char '#'
|
||||
cjk2 <- cjkChar
|
||||
return $ T.pack $ [cjk1] ++ " #" ++ mid ++ "# " ++ [cjk2]
|
||||
|
||||
cjkhash :: Rule
|
||||
cjkhash = do
|
||||
cjk <- cjkChar
|
||||
_ <- char '#'
|
||||
_ <- lookAhead $ anySingleBut ' '
|
||||
return $ T.pack $ cjk : " #"
|
||||
|
||||
hashcjk :: Rule
|
||||
hashcjk = do
|
||||
_ <- char '#'
|
||||
_ <- lookAhead $ anySingleBut ' '
|
||||
cjk <- cjkChar
|
||||
return $ T.pack $ "# " ++ [cjk]
|
||||
|
||||
-- operators
|
||||
cjkOPTan :: Rule
|
||||
cjkOPTan = do
|
||||
cjk <- cjkChar
|
||||
opt <- oneOf ("+-=*/&|<>%" :: [Char])
|
||||
an <- alphanumericChar
|
||||
return $ T.pack [cjk, ' ', opt, ' ', an]
|
||||
|
||||
anOPTcjk :: Rule
|
||||
anOPTcjk = do
|
||||
an <- alphanumericChar
|
||||
opt <- oneOf ("+-=*/&|<>%" :: [Char])
|
||||
cjk <- cjkChar
|
||||
return $ T.pack [an, ' ', opt, ' ', cjk]
|
||||
|
||||
-- slash/bracket rules are not implemented
|
||||
|
||||
-- CJK and alphanumeric without space
|
||||
|
||||
cjkans :: Rule
|
||||
cjkans = do
|
||||
cjk <- cjkChar
|
||||
_ <- lookAhead (alphanumericChar <|> oneOf ("@$%^&*-+\\=|/" :: [Char]))
|
||||
return $ T.pack [cjk, ' ']
|
||||
|
||||
anscjk :: Rule
|
||||
anscjk = do
|
||||
an <- alphanumericChar <|> oneOf ("~!$%^&*-+\\=|;:,./?" :: [Char])
|
||||
_ <- lookAhead cjkChar
|
||||
return $ T.pack [an, ' ']
|
||||
|
||||
-- rule set, the order matters
|
||||
recursiveRules :: RuleSet
|
||||
recursiveRules = [fullwidthCJKsymCJK, fullwidthCJKsym]
|
||||
|
||||
onepassRules :: RuleSet
|
||||
onepassRules = [anscjk, cjkans]
|
||||
|
||||
pangu :: Text -> Text
|
||||
pangu input = applyRules onepassRules $ applyRulesRecursively recursiveRules input
|
||||
161
src/SideNoteHTML.hs
Normal file
161
src/SideNoteHTML.hs
Normal file
@@ -0,0 +1,161 @@
|
||||
{-# LANGUAGE BangPatterns #-}
|
||||
{-# LANGUAGE DerivingStrategies #-}
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
{- |
|
||||
Module : Text.Pandoc.SideNoteHTML
|
||||
Description : Convert pandoc footnotes to sidenotes
|
||||
Copyright : (c) Tony Zorman 2023
|
||||
License : MIT
|
||||
Maintainer : Tony Zorman <soliditsallgood@mailbox.org>
|
||||
Stability : experimental
|
||||
Portability : non-portable
|
||||
-}
|
||||
module SideNoteHTML (usingSideNotesHTML) where
|
||||
|
||||
import Control.Monad (foldM)
|
||||
import Control.Monad.State (State, get, modify', runState)
|
||||
import Data.Text (Text)
|
||||
import Text.Pandoc (runPure, writeHtml5String)
|
||||
import Text.Pandoc.Definition (Block (..), Inline (..), Pandoc (..))
|
||||
import Text.Pandoc.Options (WriterOptions)
|
||||
import Text.Pandoc.Shared (tshow)
|
||||
import Text.Pandoc.Walk (walkM)
|
||||
import qualified Data.Text as T
|
||||
|
||||
-- type NoteType :: Type
|
||||
data NoteType = Sidenote | Marginnote
|
||||
deriving stock (Show, Eq)
|
||||
|
||||
-- type SidenoteState :: Type
|
||||
data SidenoteState = SNS
|
||||
{ _writer :: !WriterOptions
|
||||
, counter :: !Int
|
||||
}
|
||||
|
||||
-- type Sidenote :: Type -> Type
|
||||
type Sidenote = State SidenoteState
|
||||
|
||||
-- | Like 'Text.Pandoc.SideNote.usingSideNotes', but immediately
|
||||
-- pre-render the sidenotes. This has the advantage that sidenotes may
|
||||
-- be wrapped in a @<div>@ (instead of a 'Span'), which allows arbitrary
|
||||
-- blocks to be nested in them. The disadvantage is that one now has to
|
||||
-- specify the 'WriterOptions' for the current document, meaning this is
|
||||
-- meant to be used as a module and is unlikely to be useful as a
|
||||
-- standalone application.
|
||||
--
|
||||
-- ==== __Example__
|
||||
--
|
||||
-- Using this function with <https://jaspervdj.be/hakyll/ hakyll> could
|
||||
-- look something like the following, defining an equivalent to the
|
||||
-- default @pandocCompiler@.
|
||||
--
|
||||
-- > myPandocCompiler :: Compiler (Item String)
|
||||
-- > myPandocCompiler =
|
||||
-- > pandocCompilerWithTransformM
|
||||
-- > defaultHakyllReaderOptions
|
||||
-- > defaultHakyllWriterOptions
|
||||
-- > (usingSideNotesHTML defaultHakyllWriterOptions)
|
||||
--
|
||||
usingSideNotesHTML :: WriterOptions -> Pandoc -> Pandoc
|
||||
usingSideNotesHTML writer (Pandoc meta blocks) =
|
||||
-- Drop a superfluous paragraph at the start of the document.
|
||||
Pandoc meta . someStart . walkBlocks (SNS writer 0) $ blocks
|
||||
where
|
||||
someStart :: [Block] -> [Block]
|
||||
someStart = \case
|
||||
(Para [Str ""] : bs) -> bs
|
||||
bs -> bs
|
||||
|
||||
walkBlocks :: SidenoteState -> [Block] -> [Block]
|
||||
walkBlocks sns = \case
|
||||
[] -> []
|
||||
(b : bs) -> b' <> walkBlocks s' bs
|
||||
where (b', s') = walkM mkSidenote [b] `runState` sns
|
||||
|
||||
-- Sidenotes can probably appear in more places; this should be
|
||||
-- filled-in at some point.
|
||||
mkSidenote :: [Block] -> Sidenote [Block]
|
||||
mkSidenote = foldM (\acc b -> (acc <>) <$> single b) []
|
||||
where
|
||||
-- Try to find and render a sidenote in a single block.
|
||||
single :: Block -> Sidenote [Block]
|
||||
single = \case
|
||||
-- Simulate a paragraph by inserting a dummy block; this is needed
|
||||
-- in case two consecutive paragraphs have sidenotes, or a paragraph
|
||||
-- doesn't have one at all.
|
||||
Para inlines -> (Para [Str ""] :) <$> renderSidenote [] inlines
|
||||
Plain inlines -> renderSidenote [] inlines
|
||||
OrderedList attrs bs -> (:[]) . OrderedList attrs <$> traverse mkSidenote bs
|
||||
BulletList bs -> (:[]) . BulletList <$> traverse mkSidenote bs
|
||||
block -> pure [block]
|
||||
|
||||
renderSidenote :: [Inline] -> [Inline] -> Sidenote [Block]
|
||||
renderSidenote !inlines = \case
|
||||
[] -> pure [plain inlines]
|
||||
Note bs : xs -> do block <- go bs
|
||||
mappend [ -- Start gluing before, see [Note Comment].
|
||||
plain (RawInline "html" commentStart : inlines)
|
||||
, block
|
||||
]
|
||||
<$> renderSidenote
|
||||
[RawInline "html" commentEnd] -- End gluing after
|
||||
xs
|
||||
b : xs -> renderSidenote (b : inlines) xs
|
||||
where
|
||||
go :: [Block] -> Sidenote Block
|
||||
go blocks = do
|
||||
SNS w i <- get <* modify' (\sns -> sns{ counter = 1 + counter sns })
|
||||
let (typ, noteText) = getNoteType (render w blocks)
|
||||
pure . RawBlock "html" $
|
||||
mconcat [ commentEnd -- End gluing before
|
||||
, label typ i <> input i <> note typ noteText
|
||||
, commentStart -- Start gluing after
|
||||
]
|
||||
|
||||
-- The '{-}' symbol differentiates between margin note and side note.
|
||||
getNoteType :: Text -> (NoteType, Text)
|
||||
getNoteType t
|
||||
| "{-} " `T.isPrefixOf` t = (Marginnote, T.drop 4 t)
|
||||
| otherwise = (Sidenote , t)
|
||||
|
||||
render :: WriterOptions -> [Block] -> Text
|
||||
render w bs = case runPure (writeHtml5String w (Pandoc mempty bs)) of
|
||||
Left err -> error $ "Text.Pandoc.SideNoteHTML.writePandocWith: " ++ show err
|
||||
Right txt -> T.drop 1 (T.dropWhile (/= '\n') txt)
|
||||
|
||||
commentEnd :: T.Text
|
||||
commentEnd = "-->"
|
||||
|
||||
commentStart :: T.Text
|
||||
commentStart = "<!--"
|
||||
|
||||
plain :: [Inline] -> Block
|
||||
plain = Plain . reverse
|
||||
|
||||
label :: NoteType -> Int -> Text
|
||||
label nt i = "<label for=\"sn-" <> tshow i <> "\" class=\"margin-toggle" <> sidenoteNumber <> "\">" <> altSymbol <> "</label>"
|
||||
where
|
||||
sidenoteNumber :: Text = case nt of
|
||||
Sidenote -> " sidenote-number"
|
||||
Marginnote -> ""
|
||||
altSymbol :: Text = case nt of
|
||||
Sidenote -> ""
|
||||
Marginnote -> "⊕"
|
||||
|
||||
input :: Int -> Text
|
||||
input i = "<input type=\"checkbox\" id=\"sn-" <> tshow i <> "\" class=\"margin-toggle\"/>"
|
||||
|
||||
note :: NoteType -> Text -> Text
|
||||
note nt body = "<div class=\"" <> T.toLower (tshow nt) <> "\">" <> body <> "</div>"
|
||||
|
||||
{- [Note Comment]
|
||||
|
||||
This is obviously horrible, but we have to do this in order for the
|
||||
blocks (which are now not inline elements anymore!) immediately before
|
||||
and after the sidenote to be "glued" to the sidenote itself. In this
|
||||
way, the number indicating the sidenote does not have an extra space
|
||||
associated to it on either side, which otherwise would be the case.
|
||||
|
||||
-}
|
||||
118
src/site.hs
Normal file
118
src/site.hs
Normal file
@@ -0,0 +1,118 @@
|
||||
{-# LANGUAGE BlockArguments #-}
|
||||
{-# LANGUAGE LambdaCase #-}
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
{-# LANGUAGE ScopedTypeVariables #-}
|
||||
|
||||
import ChaoDoc
|
||||
import qualified Data.Text as T
|
||||
import Hakyll
|
||||
import System.FilePath
|
||||
import Text.Pandoc
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- https://www.rohanjain.in/hakyll-clean-urls/
|
||||
cleanRoute :: Routes
|
||||
cleanRoute = customRoute createIndexRoute
|
||||
where
|
||||
createIndexRoute ident = takeDirectory p </> takeBaseName p </> "index.html"
|
||||
where
|
||||
p = toFilePath ident
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
config :: Configuration
|
||||
config =
|
||||
defaultConfiguration
|
||||
{ ignoreFile = \path ->
|
||||
ignoreFile defaultConfiguration path
|
||||
|| ".git" `elem` splitDirectories (normalise path)
|
||||
}
|
||||
|
||||
main :: IO ()
|
||||
main = hakyllWith config $ do
|
||||
match "images/**" $ do
|
||||
route idRoute
|
||||
compile copyFileCompiler
|
||||
|
||||
match "math-macros.tex" $ compile getResourceBody
|
||||
|
||||
match "bib_style.csl" $ compile cslCompiler
|
||||
|
||||
match "reference.bib" $ compile biblioCompiler
|
||||
|
||||
match "fonts/*.woff2" $ do
|
||||
route idRoute
|
||||
compile copyFileCompiler
|
||||
|
||||
match "favicon.ico" $ do
|
||||
route idRoute
|
||||
compile copyFileCompiler
|
||||
|
||||
match "css/*" $ do
|
||||
route idRoute
|
||||
compile compressCssCompiler
|
||||
|
||||
match "notes/*" $ do
|
||||
route cleanRoute
|
||||
compile $ do
|
||||
tocCtx <- getTocCtx defaultContext
|
||||
chaoDocCompiler
|
||||
>>= loadAndApplyTemplate "templates/post.html" tocCtx
|
||||
>>= loadAndApplyTemplate "templates/default.html" tocCtx
|
||||
>>= relativizeUrls
|
||||
|
||||
create ["index.html"] $ do
|
||||
route idRoute
|
||||
compile $ do
|
||||
posts <- loadAll "notes/*"
|
||||
let indexCtx =
|
||||
constField "title" "Notes"
|
||||
`mappend` constField "toc" ""
|
||||
`mappend` listField "posts" postCtx (return posts)
|
||||
`mappend` defaultContext
|
||||
makeItem ""
|
||||
>>= loadAndApplyTemplate "templates/post-list.html" indexCtx
|
||||
>>= loadAndApplyTemplate "templates/default.html" indexCtx
|
||||
>>= relativizeUrls
|
||||
|
||||
match "templates/*" $ compile templateBodyCompiler
|
||||
|
||||
postCtx :: Context String
|
||||
postCtx =
|
||||
dateField "date" "%B %e, %Y"
|
||||
<> dateField "date" "%Y-%m-%d"
|
||||
<> defaultContext
|
||||
|
||||
-- toc from https://github.com/slotThe/slotThe.github.io
|
||||
getTocCtx :: Context a -> Compiler (Context a)
|
||||
getTocCtx ctx = do
|
||||
noToc <- (Just "true" ==) <$> (getUnderlying >>= (`getMetadataField` "no-toc"))
|
||||
writerOpts <- mkTocWriter defaultHakyllWriterOptions
|
||||
toc <- writePandocWith writerOpts <$> chaoDocPandocCompiler
|
||||
pure $
|
||||
mconcat
|
||||
[ ctx,
|
||||
constField "toc" $ killLinkIds (itemBody toc),
|
||||
if noToc then boolField "no-toc" (pure noToc) else mempty
|
||||
]
|
||||
where
|
||||
mkTocWriter :: WriterOptions -> Compiler WriterOptions
|
||||
mkTocWriter writerOpts = do
|
||||
tmpl <- either (const Nothing) Just <$> unsafeCompiler (compileTemplate "" "$toc$")
|
||||
pure $
|
||||
writerOpts
|
||||
{ writerTableOfContents = True,
|
||||
writerTOCDepth = 2,
|
||||
writerTemplate = tmpl,
|
||||
writerHTMLMathMethod = MathML
|
||||
}
|
||||
|
||||
asTxt :: (T.Text -> T.Text) -> String -> String
|
||||
asTxt f = T.unpack . f . T.pack
|
||||
|
||||
killLinkIds :: String -> String
|
||||
killLinkIds = asTxt (mconcat . go . T.splitOn "id=\"toc-")
|
||||
where
|
||||
go :: [T.Text] -> [T.Text]
|
||||
go = \case
|
||||
[] -> []
|
||||
x : xs -> x : map (T.drop 1 . T.dropWhile (/= '\"')) xs
|
||||
Reference in New Issue
Block a user