Large refactoring of caching, split posts into own module
All checks were successful
Fly Deploy / Deploy app (push) Successful in 6m13s

This commit is contained in:
Gabriel Simmer 2023-10-01 02:36:27 +01:00
parent 692e765561
commit b40c7a0262
Signed by: arch
SSH key fingerprint: SHA256:m3OEcdtrnBpMX+2BDGh/byv3hrCekCLzDYMdvGEKPPQ
8 changed files with 437 additions and 261 deletions

1
Cargo.lock generated
View file

@ -1853,6 +1853,7 @@ dependencies = [
name = "quick-start" name = "quick-start"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-trait",
"axum", "axum",
"clap 4.3.21", "clap 4.3.21",
"comrak", "comrak",

View file

@ -26,3 +26,4 @@ frontmatter = "0.4.0"
file-format = "0.18.0" file-format = "0.18.0"
rss = "2.0.6" rss = "2.0.6"
time = { version = "0.3.28", features = ["parsing", "formatting", "macros"] } time = { version = "0.3.28", features = ["parsing", "formatting", "macros"] }
async-trait = "0.1.73"

View file

@ -32,7 +32,7 @@ proxy:
# the last command to be long-running (e.g. an application server). When the # the last command to be long-running (e.g. an application server). When the
# last command exits, LiteFS is shut down. # last command exits, LiteFS is shut down.
exec: exec:
- cmd: "/app/gabrielsimmerdotcom --bind 0.0.0.0:8081 -d /litefs/db" - cmd: "DATABASE_PATH=/litefs/db /app/gabrielsimmerdotcom --bind 0.0.0.0:8081"
# The lease section specifies how the cluster will be managed. We're using the # The lease section specifies how the cluster will be managed. We're using the
# "consul" lease type so that our application can dynamically change the primary. # "consul" lease type so that our application can dynamically change the primary.

44
src/cache/memory.rs vendored Normal file
View file

@ -0,0 +1,44 @@
use std::collections::HashMap;
use async_trait::async_trait;
use tokio::sync::Mutex;
use super::{CacheMechanism, CachedItem, should_use, Tier};
#[derive(Clone, Debug)]
pub struct Memory {}
lazy_static! {
static ref CACHE: Mutex<HashMap<String, CachedItem>> = Mutex::new(HashMap::new());
}
pub fn new() -> Memory {
return Memory{};
}
#[async_trait]
impl CacheMechanism for Memory {
async fn get(&self, key: &String) -> Option<CachedItem> {
let data = CACHE.lock().await;
match data.get(key) {
Some(c) => {
if should_use(&c, Tier::Memory) {
let mut r = c.clone();
r.tier = Some(Tier::Memory);
Some(r)
} else { None }
},
None => None
}
}
async fn rm(&mut self, key: String) {
let mut data = CACHE.lock().await;
data.remove(&key);
}
async fn set(&self, key: String, item: CachedItem) {
let mut data = CACHE.lock().await;
data.insert(key, item);
}
}

121
src/cache/mod.rs vendored Normal file
View file

@ -0,0 +1,121 @@
mod memory;
mod sqlite;
use std::{time::{SystemTime, UNIX_EPOCH}, fmt};
use async_trait::async_trait;
use sqlx::FromRow;
use self::{memory::Memory, sqlite::Sqlite};
#[derive(Clone, Debug)]
pub struct Cache {
memory: Memory,
sqlite: Option<Sqlite>,
}
pub async fn init_cache() -> Cache {
Cache{ memory: memory::new(), sqlite: sqlite::new().await }
}
/// Tier enums take an i64, which is the amount of time in seconds
/// the tier should consider the contents of the cache valid.
#[derive(Clone, Debug, sqlx::Type)]
pub enum Tier {
Memory,
Sqlite,
External,
None
}
impl fmt::Display for Tier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Tier::Memory => write!(f, "memory"),
Tier::Sqlite => write!(f, "sqlite"),
Tier::External => write!(f, "external"),
Tier::None => write!(f, ""),
}
}
}
#[derive(Clone, Debug, FromRow)]
pub struct CachedItem {
pub content_type: String,
pub content: String,
cached: i64,
#[sqlx(default)]
tier: Option<Tier>
}
impl CachedItem {
pub fn tier(&self) -> Tier {
self.tier.clone().unwrap_or(Tier::None)
}
}
#[async_trait]
pub trait CacheMechanism: Sized + Clone + Send + Sync + 'static {
async fn get(&self, key: &String) -> Option<CachedItem>;
async fn rm(&mut self, key: String);
async fn set(&self, key: String, item: CachedItem);
}
impl Cache {
pub async fn get(&self, key: &String) -> Option<CachedItem> {
let m = self.memory.get(key).await;
if m.is_some() {
return m;
}
if self.sqlite.is_some() {
let sq = self.sqlite.clone().unwrap();
let s = sq.get(key).await; if s.is_some() {
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("SystemTime before UNIX EPOCH!")
.as_secs()
.try_into()
.unwrap();
let mut refresh_memory = s.clone().unwrap();
refresh_memory.cached = current_time;
let _ = self.memory.set(key.clone(), refresh_memory).await;
return s
}
}
return None
}
pub async fn set(&self, key: String, content_type: String, content: String) -> bool {
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("SystemTime before UNIX EPOCH!")
.as_secs()
.try_into()
.unwrap();
let cached_item = CachedItem{ content_type, content, cached: current_time, tier: None };
self.memory.set(key.clone(), cached_item.clone()).await;
if self.sqlite.is_some() {
let sq = self.sqlite.clone().unwrap();
sq.set(key.clone(), cached_item.clone()).await;
}
true
}
}
/// Determine whether we should actually use the cached item or not.
fn should_use(item: &CachedItem, tier: Tier) -> bool {
// TODO: Make configurable.
let cache_time = match tier {
Tier::Memory => 2*60,
Tier::Sqlite => 10*60,
Tier::External => 0,
Tier::None => 0,
};
let current_time: i64 = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("SystemTime before UNIX EPOCH!")
.as_secs().try_into().unwrap();
current_time <= (item.cached + cache_time) && item.content != ""
}

55
src/cache/sqlite.rs vendored Normal file
View file

@ -0,0 +1,55 @@
use std::{env, str::FromStr};
use async_trait::async_trait;
use sqlx::{Pool, sqlite::{SqlitePoolOptions, SqliteJournalMode, SqliteConnectOptions}};
use super::{CacheMechanism, CachedItem, should_use, Tier};
#[derive(Clone, Debug)]
pub struct Sqlite {
pool: Pool<sqlx::Sqlite>
}
pub async fn new() -> Option<Sqlite> {
let path = env::var("DATABASE_PATH").unwrap_or("gs.db".to_owned());
let opts = SqliteConnectOptions::from_str(&path).unwrap()
.journal_mode(SqliteJournalMode::Wal)
.create_if_missing(true);
let pool = SqlitePoolOptions::new().connect_with(opts).await.unwrap();
return Some(Sqlite{ pool });
}
#[async_trait]
impl CacheMechanism for Sqlite {
async fn get(&self, key: &String) -> Option<CachedItem> {
let res = sqlx::query_as::<_, CachedItem>("SELECT * FROM cached WHERE route = $1")
.bind(&key)
.fetch_one(&self.pool).await;
if res.is_ok() {
let c = res.unwrap();
if should_use(&c, Tier::Sqlite) {
let mut r = c.clone();
r.tier = Some(Tier::Sqlite);
return Some(r);
}
}
None
}
async fn rm(&mut self, key: String) {
todo!()
}
async fn set(&self, key: String, item: CachedItem) {
let cache_sqlite = sqlx::query(
"INSERT OR REPLACE INTO cached (route, cached, content_type, content) VALUES ( $1, $2, $3, $4 )",
)
.bind(key)
.bind(item.cached)
.bind(item.content_type)
.bind(item.content)
.execute(&self.pool)
.await;
}
}

View file

@ -1,6 +1,9 @@
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
mod posts;
mod cache;
use axum::extract::Path; use axum::extract::Path;
use axum::response::IntoResponse; use axum::response::IntoResponse;
use axum::{ use axum::{
@ -16,42 +19,26 @@ use clap::Parser;
use file_format::{FileFormat, Kind}; use file_format::{FileFormat, Kind};
use hyper::body::Bytes; use hyper::body::Bytes;
use maud::{html, Markup, PreEscaped, Render, DOCTYPE}; use maud::{html, Markup, PreEscaped, Render, DOCTYPE};
use orgize::Org;
use rss::ChannelBuilder; use rss::ChannelBuilder;
use serde::Deserialize; use serde::Deserialize;
use sqlx::sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions}; use sqlx::sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions};
use sqlx::{FromRow, Pool, Sqlite}; use std::env;
use std::collections::HashMap;
use std::fs::{self, File};
use std::io::prelude::*;
use std::str::FromStr; use std::str::FromStr;
use std::time::{SystemTime, UNIX_EPOCH};
use time::{self, format_description, format_description::well_known::Rfc2822}; use time::{self, format_description, format_description::well_known::Rfc2822};
use tokio::sync::Mutex;
use tower_http::services::ServeDir; use tower_http::services::ServeDir;
lazy_static! { use crate::cache::{Cache, init_cache};
static ref CACHE: Mutex<HashMap<String, CachedPage>> = Mutex::new(HashMap::new());
}
#[derive(Parser)] #[derive(Parser)]
struct Cli { struct Cli {
#[arg(short, long)]
database_path: String,
#[arg(short, long, default_value_t=("0.0.0.0:3000").to_string())] #[arg(short, long, default_value_t=("0.0.0.0:3000").to_string())]
bind: String, bind: String,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
struct AppState { struct AppState {
database: Pool<Sqlite>, cache: Cache,
}
#[derive(Clone, Debug, FromRow)]
struct CachedPage {
content_type: String,
content: String,
cached: i64,
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@ -74,12 +61,6 @@ struct ProjectConfig {
experiments: Vec<Project>, experiments: Vec<Project>,
} }
struct Post {
name: String,
route: String,
date: String,
}
impl Render for Project { impl Render for Project {
fn render(&self) -> Markup { fn render(&self) -> Markup {
html! { html! {
@ -97,21 +78,22 @@ impl Render for Project {
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), sqlx::Error> { async fn main() -> Result<(), sqlx::Error> {
let args = Cli::parse(); let args = Cli::parse();
let opts = SqliteConnectOptions::from_str(&args.database_path)? let path = env::var("DATABASE_PATH").unwrap_or("gs.db".to_owned());
let opts = SqliteConnectOptions::from_str(&path)?
.journal_mode(SqliteJournalMode::Wal) .journal_mode(SqliteJournalMode::Wal)
.create_if_missing(true); .create_if_missing(true);
let pool = SqlitePoolOptions::new().connect_with(opts).await?; let pool = SqlitePoolOptions::new().connect_with(opts).await?;
sqlx::migrate!("./migrations").run(&pool).await?; sqlx::migrate!("./migrations").run(&pool).await?;
let state = AppState { database: pool }; let state = AppState { cache: init_cache().await };
let app = Router::new() let app = Router::new()
.route("/", get(homepage)) .route("/", get(homepage))
.route("/rss", get(rss)) .route("/rss", get(rss))
.route("/blog", get(list_blog_posts)) .route("/blog", get(list_blog_posts))
.route("/blog/:post", get(blog_post)) .route("/blog/:post", get(render_blog_post))
.route("/blog/:post/raw", get(raw_blog_post))
.nest_service("/assets", ServeDir::new("assets")) .nest_service("/assets", ServeDir::new("assets"))
.nest_service("/images", ServeDir::new("assets/images")) .nest_service("/images", ServeDir::new("assets/images"))
.layer(middleware::from_fn_with_state(state.clone(), cached_page)) .layer(middleware::from_fn_with_state(state.clone(), cached_page))
@ -126,54 +108,45 @@ async fn main() -> Result<(), sqlx::Error> {
Ok(()) Ok(())
} }
fn get_posts() -> Vec<Post> { async fn raw_blog_post(Path(post): Path<String>) -> Result<impl IntoResponse, StatusCode> {
let mut posts: Vec<Post> = Vec::new(); let post = posts::blog_post(post);
for entry in fs::read_dir("./posts").unwrap() { if post.is_err() {
let entry = entry.unwrap(); return Err(StatusCode::NOT_FOUND);
let path = entry.path();
let filename = path.file_name().unwrap().to_str().unwrap();
let ext = path.extension().unwrap().to_str().unwrap();
// strip extension
let fname = filename.replace(&format!(".{}", ext), "");
if ext == "md" || ext == "org" {
// We'll have the date at the beginning of the file
let mut content = File::open(&path).unwrap();
let mut buffer = [0; 100];
content.read(&mut buffer).unwrap();
// Match date data of `date: YYYY-MM-DD` in the first 100 bytes
let metadata = String::from_utf8_lossy(&buffer);
let metadata_lines = metadata.split("\n").collect::<Vec<&str>>();
// dbg!(&metadata);
// Split by --- and get the second element
let date = metadata_lines
.iter()
.find(|&x| x.contains("date:"))
.unwrap_or(&"")
.split(":")
.collect::<Vec<&str>>()[1];
let title = metadata_lines
.iter()
.find(|&x| x.contains("title:"))
.unwrap_or(&"")
.split(":")
.collect::<Vec<&str>>()[1]
.trim();
let date = date.trim();
posts.push(Post {
name: title.to_owned(),
route: fname,
date: date.to_owned(),
});
}
} }
posts.sort_by(|a, b| b.date.cmp(&a.date)); Ok(Response::builder()
posts .header("content-type", "text/plain")
.status(StatusCode::OK)
.body(Full::from(post.unwrap().content))
.unwrap())
}
async fn render_blog_post(Path(post): Path<String>) -> Result<impl IntoResponse, StatusCode> {
let post = posts::blog_post(post);
if post.is_err() {
return Err(StatusCode::NOT_FOUND);
}
let p = post.unwrap();
let html_maud = PreEscaped(p.html);
let html = html! {
(header(p.title.as_str()))
body {
main {
h1 { (p.title) }
p { (p.date) }
(html_maud)
}
}
};
Ok(Response::builder()
.header("content-type", "text/html")
.status(StatusCode::OK)
.body(Full::from(html.into_string()))
.unwrap())
} }
async fn rss() -> Result<impl IntoResponse, StatusCode> { async fn rss() -> Result<impl IntoResponse, StatusCode> {
let posts = get_posts(); let posts = posts::get_posts();
let rss_posts: Vec<rss::Item> = posts.into_iter().map(|p| { let rss_posts: Vec<rss::Item> = posts.into_iter().map(|p| {
let date = format!("{} 00:00:00 +00:00:00", p.date); let date = format!("{} 00:00:00 +00:00:00", p.date);
let format = format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second] [offset_hour sign:mandatory]:[offset_minute]:[offset_second]").unwrap(); let format = format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second] [offset_hour sign:mandatory]:[offset_minute]:[offset_second]").unwrap();
@ -238,9 +211,7 @@ async fn homepage() -> Markup {
} }
async fn list_blog_posts() -> Markup { async fn list_blog_posts() -> Markup {
let posts = get_posts(); let posts = posts::get_posts();
// Sort posts by date
html! { html! {
(header("/blog")) (header("/blog"))
body { body {
@ -249,7 +220,7 @@ async fn list_blog_posts() -> Markup {
ul { ul {
@for post in posts { @for post in posts {
li { (post.date) " - " a href=(format!("/blog/{}", post.route)) { (post.name) } } (post);
} }
} }
} }
@ -257,207 +228,67 @@ async fn list_blog_posts() -> Markup {
} }
} }
async fn blog_post(Path(post): Path<String>) -> Result<impl IntoResponse, StatusCode> {
// Search through /posts directory and find the post with either .md or .org extension
// If the post is not found, return 404
for entry in fs::read_dir("./posts").unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let filename = path.file_name().unwrap().to_str().unwrap();
let ext = path.extension().unwrap().to_str().unwrap();
// strip extension
let fname = filename.replace(&format!(".{}", ext), "");
if fname == post && (ext == "md" || ext == "org") {
let content = fs::read_to_string(&path).unwrap();
let mut html = "".to_owned();
let mut date = "".to_owned();
let mut title = "".to_owned();
if ext == "md" {
let (parsed, content) = frontmatter::parse_and_find_content(&content).unwrap();
let metadata = parsed.unwrap();
date = metadata["date"].as_str().unwrap().to_owned();
title = metadata["title"].as_str().unwrap().to_owned();
html = comrak::markdown_to_html(&content, &comrak::ComrakOptions::default());
} else if ext == "org" {
let mut writer = Vec::new();
let parsed = Org::parse(&content);
let keywords = parsed.keywords();
// Get date and title from keywords iterator
for keyword in keywords {
if keyword.key == "date" {
date = keyword.value.to_string();
} else if keyword.key == "title" {
title = keyword.value.to_string();
}
}
parsed.write_html(&mut writer).unwrap();
html = String::from_utf8(writer).unwrap();
}
let html_maud = PreEscaped(html);
let html = html! {
(header(title.as_str()))
body {
main {
h1 { (title) }
p { (date) }
(html_maud)
}
}
};
return Ok(Response::builder()
.header("content-type", "text/html")
.status(StatusCode::OK)
.body(Full::from(html.into_string()))
.unwrap());
}
}
return Err(StatusCode::NOT_FOUND);
}
async fn cached_page<T>( async fn cached_page<T>(
State(state): State<AppState>, State(state): State<AppState>,
request: Request<T>, request: Request<T>,
next: Next<T>, next: Next<T>,
) -> Response<Full<Bytes>> { ) -> Response<Full<Bytes>> {
let default = CachedPage {
content_type: "text/plain".to_owned(),
content: "".to_owned(),
cached: 0,
};
let path = request.uri().path().to_string(); let path = request.uri().path().to_string();
let mut data = CACHE.lock().await;
let content = data.get(&path).unwrap_or(&default);
let current_time = SystemTime::now() let item = state.cache.get(&path).await;
.duration_since(UNIX_EPOCH) if item.is_none() {
.expect("SystemTime before UNIX EPOCH!"); let res = next.run(request).await;
let (res_parts, res_body) = res.into_parts();
let bytes = match hyper::body::to_bytes(res_body).await {
Ok(bytes) => bytes,
Err(_err) => {
return Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Full::from("error"))
.unwrap()
}
};
if current_time.as_secs() <= (content.cached as u64 + 120 as u64) && content.content != "" { let res = bytes.to_vec();
// Return the cached page content let contenttype = match res_parts.headers.get("content-type") {
let c = content.clone(); Some(c) => c.to_str().unwrap(),
None => "text/plain",
};
return Response::builder() if !res_parts.status.is_success() {
.header("content-type", c.content_type)
.header("cache", "hit-memory")
.status(StatusCode::OK)
.body(Full::from(c.content))
.unwrap();
}
let res = sqlx::query_as::<_, CachedPage>("SELECT * FROM cached WHERE route = $1")
.bind(&path)
.fetch_one(&state.database)
.await;
if let Ok(res) = res {
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("SystemTime before UNIX EPOCH!");
// SQLite cache is valid for 10 minutes.
if current_time.as_secs() <= (content.cached as u64 + (12 * 60) as u64) {
let c = CachedPage {
content_type: res.content_type,
content: res.content,
cached: SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Failed to get current time")
.as_secs()
.try_into()
.unwrap(),
};
// Refresh our memory cache.
data.insert(path, c.clone());
return Response::builder() return Response::builder()
.header("content-type", c.content_type) .header("content-type", contenttype)
.header("cache", "hit-sqlite") .status(res_parts.status)
.status(StatusCode::OK) .body(Full::from(bytes))
.body(Full::from(c.content))
.unwrap(); .unwrap();
} else {
let _cache_sqlite = sqlx::query("DELETE FROM cached WHERE route = $1")
.bind(&path)
.execute(&state.database)
.await;
} }
}
let res = next.run(request).await; // Make sure we only cache text.
let (res_parts, res_body) = res.into_parts(); let format = FileFormat::from_bytes(&res);
let bytes = match hyper::body::to_bytes(res_body).await { if format.kind() != Kind::Text && format.kind() != Kind::Application {
Ok(bytes) => bytes,
Err(_err) => {
return Response::builder() return Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR) .header("content-type", contenttype)
.body(Full::from("error")) .header("cache", "not")
.unwrap() .status(StatusCode::OK)
.body(Full::from(bytes))
.unwrap();
} }
};
let res = bytes.to_vec(); let content = String::from_utf8(res).unwrap();
let contenttype = match res_parts.headers.get("content-type") { state.cache.set(path, contenttype.to_owned(), content).await;
Some(c) => c.to_str().unwrap(),
None => "text/plain",
};
if !res_parts.status.is_success() {
return Response::builder() return Response::builder()
.header("content-type", contenttype) .header("content-type", contenttype)
.status(res_parts.status) .header("cache", "miss")
.body(Full::from(bytes))
.unwrap();
}
// Make sure we only cache text.
let format = FileFormat::from_bytes(&res);
if format.kind() != Kind::Text && format.kind() != Kind::Application {
dbg!(format.kind());
return Response::builder()
.header("content-type", contenttype)
.header("cache", "not")
.status(StatusCode::OK) .status(StatusCode::OK)
.body(Full::from(bytes)) .body(Full::from(bytes))
.unwrap()
} else {
let i = item.unwrap();
return Response::builder()
.header("content-type", &i.content_type)
.header("cache", format!("hit-{}", &i.tier()))
.status(StatusCode::OK)
.body(Full::from(i.content))
.unwrap(); .unwrap();
} }
let content = String::from_utf8(res).unwrap();
let cache = CachedPage {
content_type: String::from_str(contenttype).unwrap(),
content,
cached: SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Failed to get current time")
.as_secs()
.try_into()
.unwrap(),
};
data.insert(path.clone(), cache.clone());
let cache_sqlite = sqlx::query(
"INSERT INTO cached (route, cached, content_type, content) VALUES ( $1, $2, $3, $4 )",
)
.bind(path)
.bind(cache.cached)
.bind(cache.content_type)
.bind(cache.content)
.execute(&state.database)
.await;
match cache_sqlite {
Ok(_) => println!("cached"),
Err(e) => println!("{}", e),
}
Response::builder()
.header("content-type", contenttype)
.header("cache", "miss")
.status(StatusCode::OK)
.body(Full::from(bytes))
.unwrap()
} }

123
src/posts.rs Normal file
View file

@ -0,0 +1,123 @@
use std::{fs::{self, File}, io::Read};
use maud::{Render, Markup, html};
use orgize::Org;
pub struct PostMetadata {
pub name: String,
pub route: String,
pub date: String,
}
pub struct PostContent {
pub title: String,
pub date: String,
pub content: String,
pub html: String
}
impl Render for PostMetadata {
fn render(&self) -> Markup {
html! {
li { (self.date) " - " a href=(format!("/blog/{}", self.route)) { (self.name) } }
}
}
}
pub fn get_posts() -> Vec<PostMetadata> {
let mut posts: Vec<PostMetadata> = Vec::new();
for entry in fs::read_dir("./posts").unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let filename = path.file_name().unwrap().to_str().unwrap();
let ext = path.extension().unwrap().to_str().unwrap();
// strip extension
let fname = filename.replace(&format!(".{}", ext), "");
if ext == "md" || ext == "org" {
// We'll have the date at the beginning of the file
let mut content = File::open(&path).unwrap();
let mut buffer = [0; 100];
content.read(&mut buffer).unwrap();
// Match date data of `date: YYYY-MM-DD` in the first 100 bytes
let metadata = String::from_utf8_lossy(&buffer);
let metadata_lines = metadata.split("\n").collect::<Vec<&str>>();
// dbg!(&metadata);
// Split by --- and get the second element
let date = metadata_lines
.iter()
.find(|&x| x.contains("date:"))
.unwrap_or(&"")
.split(":")
.collect::<Vec<&str>>()[1];
let title = metadata_lines
.iter()
.find(|&x| x.contains("title:"))
.unwrap_or(&"")
.split(":")
.collect::<Vec<&str>>()[1]
.trim();
let date = date.trim();
posts.push(PostMetadata {
name: title.to_owned(),
route: fname,
date: date.to_owned(),
});
}
}
posts.sort_by(|a, b| b.date.cmp(&a.date));
posts
}
// Render the actual blog post as HTML.
pub fn blog_post(post: String) -> Result<PostContent, bool> {
// Search through /posts directory and find the post with either .md or .org extension
// If the post is not found, return 404
for entry in fs::read_dir("./posts").unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let filename = path.file_name().unwrap().to_str().unwrap();
let ext = path.extension().unwrap().to_str().unwrap();
// strip extension
let fname = filename.replace(&format!(".{}", ext), "");
if fname == post && (ext == "md" || ext == "org") {
let content = fs::read_to_string(&path).unwrap();
let mut html = "".to_owned();
let mut date = "".to_owned();
let mut title = "".to_owned();
if ext == "md" {
let (parsed, content) = frontmatter::parse_and_find_content(&content).unwrap();
let metadata = parsed.unwrap();
date = metadata["date"].as_str().unwrap().to_owned();
title = metadata["title"].as_str().unwrap().to_owned();
html = comrak::markdown_to_html(&content, &comrak::ComrakOptions::default());
} else if ext == "org" {
let mut writer = Vec::new();
let parsed = Org::parse(&content);
let keywords = parsed.keywords();
// Get date and title from keywords iterator
for keyword in keywords {
if keyword.key == "date" {
date = keyword.value.to_string();
} else if keyword.key == "title" {
title = keyword.value.to_string();
}
}
parsed.write_html(&mut writer).unwrap();
html = String::from_utf8(writer).unwrap();
}
return Ok(PostContent{
title,
date,
content,
html
});
}
}
return Err(false);
}