From b40c7a0262f4e39b511ff99c0ece3c27e4aa6807 Mon Sep 17 00:00:00 2001 From: Gabriel Simmer Date: Sun, 1 Oct 2023 02:36:27 +0100 Subject: [PATCH] Large refactoring of caching, split posts into own module --- Cargo.lock | 1 + Cargo.toml | 3 +- litefs.yml | 2 +- src/cache/memory.rs | 44 ++++++ src/cache/mod.rs | 121 +++++++++++++++ src/cache/sqlite.rs | 55 +++++++ src/main.rs | 349 ++++++++++++-------------------------------- src/posts.rs | 123 ++++++++++++++++ 8 files changed, 437 insertions(+), 261 deletions(-) create mode 100644 src/cache/memory.rs create mode 100644 src/cache/mod.rs create mode 100644 src/cache/sqlite.rs create mode 100644 src/posts.rs diff --git a/Cargo.lock b/Cargo.lock index 2d35ff9..9a33888 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1853,6 +1853,7 @@ dependencies = [ name = "quick-start" version = "0.1.0" dependencies = [ + "async-trait", "axum", "clap 4.3.21", "comrak", diff --git a/Cargo.toml b/Cargo.toml index 53d124c..99565e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,4 +25,5 @@ serde_dhall = "0.12.1" frontmatter = "0.4.0" file-format = "0.18.0" rss = "2.0.6" -time = { version = "0.3.28", features = ["parsing", "formatting", "macros"] } \ No newline at end of file +time = { version = "0.3.28", features = ["parsing", "formatting", "macros"] } +async-trait = "0.1.73" \ No newline at end of file diff --git a/litefs.yml b/litefs.yml index 0ffc530..f7f096c 100644 --- a/litefs.yml +++ b/litefs.yml @@ -32,7 +32,7 @@ proxy: # the last command to be long-running (e.g. an application server). When the # last command exits, LiteFS is shut down. exec: - - cmd: "/app/gabrielsimmerdotcom --bind 0.0.0.0:8081 -d /litefs/db" + - cmd: "DATABASE_PATH=/litefs/db /app/gabrielsimmerdotcom --bind 0.0.0.0:8081" # The lease section specifies how the cluster will be managed. We're using the # "consul" lease type so that our application can dynamically change the primary. diff --git a/src/cache/memory.rs b/src/cache/memory.rs new file mode 100644 index 0000000..605f957 --- /dev/null +++ b/src/cache/memory.rs @@ -0,0 +1,44 @@ +use std::collections::HashMap; + +use async_trait::async_trait; +use tokio::sync::Mutex; + +use super::{CacheMechanism, CachedItem, should_use, Tier}; + +#[derive(Clone, Debug)] +pub struct Memory {} + +lazy_static! { + static ref CACHE: Mutex> = Mutex::new(HashMap::new()); +} + +pub fn new() -> Memory { + return Memory{}; +} + +#[async_trait] +impl CacheMechanism for Memory { + async fn get(&self, key: &String) -> Option { + let data = CACHE.lock().await; + match data.get(key) { + Some(c) => { + if should_use(&c, Tier::Memory) { + let mut r = c.clone(); + r.tier = Some(Tier::Memory); + Some(r) + } else { None } + }, + None => None + } + } + + async fn rm(&mut self, key: String) { + let mut data = CACHE.lock().await; + data.remove(&key); + } + + async fn set(&self, key: String, item: CachedItem) { + let mut data = CACHE.lock().await; + data.insert(key, item); + } +} diff --git a/src/cache/mod.rs b/src/cache/mod.rs new file mode 100644 index 0000000..7c3aff8 --- /dev/null +++ b/src/cache/mod.rs @@ -0,0 +1,121 @@ +mod memory; +mod sqlite; + +use std::{time::{SystemTime, UNIX_EPOCH}, fmt}; +use async_trait::async_trait; +use sqlx::FromRow; + +use self::{memory::Memory, sqlite::Sqlite}; + +#[derive(Clone, Debug)] +pub struct Cache { + memory: Memory, + sqlite: Option, +} + +pub async fn init_cache() -> Cache { + Cache{ memory: memory::new(), sqlite: sqlite::new().await } +} + +/// Tier enums take an i64, which is the amount of time in seconds +/// the tier should consider the contents of the cache valid. +#[derive(Clone, Debug, sqlx::Type)] +pub enum Tier { + Memory, + Sqlite, + External, + None +} + +impl fmt::Display for Tier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Tier::Memory => write!(f, "memory"), + Tier::Sqlite => write!(f, "sqlite"), + Tier::External => write!(f, "external"), + Tier::None => write!(f, ""), + } + } +} + +#[derive(Clone, Debug, FromRow)] +pub struct CachedItem { + pub content_type: String, + pub content: String, + cached: i64, + #[sqlx(default)] + tier: Option +} + +impl CachedItem { + pub fn tier(&self) -> Tier { + self.tier.clone().unwrap_or(Tier::None) + } +} + +#[async_trait] +pub trait CacheMechanism: Sized + Clone + Send + Sync + 'static { + async fn get(&self, key: &String) -> Option; + async fn rm(&mut self, key: String); + async fn set(&self, key: String, item: CachedItem); +} + +impl Cache { + pub async fn get(&self, key: &String) -> Option { + let m = self.memory.get(key).await; + if m.is_some() { + return m; + } + if self.sqlite.is_some() { + let sq = self.sqlite.clone().unwrap(); + let s = sq.get(key).await; if s.is_some() { + let current_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("SystemTime before UNIX EPOCH!") + .as_secs() + .try_into() + .unwrap(); + let mut refresh_memory = s.clone().unwrap(); + refresh_memory.cached = current_time; + let _ = self.memory.set(key.clone(), refresh_memory).await; + return s + } + } + + return None + } + + pub async fn set(&self, key: String, content_type: String, content: String) -> bool { + let current_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("SystemTime before UNIX EPOCH!") + .as_secs() + .try_into() + .unwrap(); + let cached_item = CachedItem{ content_type, content, cached: current_time, tier: None }; + self.memory.set(key.clone(), cached_item.clone()).await; + if self.sqlite.is_some() { + let sq = self.sqlite.clone().unwrap(); + sq.set(key.clone(), cached_item.clone()).await; + } + true + } +} + +/// Determine whether we should actually use the cached item or not. +fn should_use(item: &CachedItem, tier: Tier) -> bool { + // TODO: Make configurable. + let cache_time = match tier { + Tier::Memory => 2*60, + Tier::Sqlite => 10*60, + Tier::External => 0, + Tier::None => 0, + }; + + let current_time: i64 = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("SystemTime before UNIX EPOCH!") + .as_secs().try_into().unwrap(); + + current_time <= (item.cached + cache_time) && item.content != "" +} diff --git a/src/cache/sqlite.rs b/src/cache/sqlite.rs new file mode 100644 index 0000000..699bcfa --- /dev/null +++ b/src/cache/sqlite.rs @@ -0,0 +1,55 @@ +use std::{env, str::FromStr}; + +use async_trait::async_trait; +use sqlx::{Pool, sqlite::{SqlitePoolOptions, SqliteJournalMode, SqliteConnectOptions}}; + +use super::{CacheMechanism, CachedItem, should_use, Tier}; + +#[derive(Clone, Debug)] +pub struct Sqlite { + pool: Pool +} + +pub async fn new() -> Option { + let path = env::var("DATABASE_PATH").unwrap_or("gs.db".to_owned()); + let opts = SqliteConnectOptions::from_str(&path).unwrap() + .journal_mode(SqliteJournalMode::Wal) + .create_if_missing(true); + + let pool = SqlitePoolOptions::new().connect_with(opts).await.unwrap(); + return Some(Sqlite{ pool }); +} + +#[async_trait] +impl CacheMechanism for Sqlite { + async fn get(&self, key: &String) -> Option { + let res = sqlx::query_as::<_, CachedItem>("SELECT * FROM cached WHERE route = $1") + .bind(&key) + .fetch_one(&self.pool).await; + if res.is_ok() { + let c = res.unwrap(); + if should_use(&c, Tier::Sqlite) { + let mut r = c.clone(); + r.tier = Some(Tier::Sqlite); + return Some(r); + } + } + None + } + + async fn rm(&mut self, key: String) { + todo!() + } + + async fn set(&self, key: String, item: CachedItem) { + let cache_sqlite = sqlx::query( + "INSERT OR REPLACE INTO cached (route, cached, content_type, content) VALUES ( $1, $2, $3, $4 )", + ) + .bind(key) + .bind(item.cached) + .bind(item.content_type) + .bind(item.content) + .execute(&self.pool) + .await; + } +} diff --git a/src/main.rs b/src/main.rs index e56900f..5010df1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,9 @@ #[macro_use] extern crate lazy_static; +mod posts; +mod cache; + use axum::extract::Path; use axum::response::IntoResponse; use axum::{ @@ -16,42 +19,26 @@ use clap::Parser; use file_format::{FileFormat, Kind}; use hyper::body::Bytes; use maud::{html, Markup, PreEscaped, Render, DOCTYPE}; -use orgize::Org; use rss::ChannelBuilder; use serde::Deserialize; use sqlx::sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions}; -use sqlx::{FromRow, Pool, Sqlite}; -use std::collections::HashMap; -use std::fs::{self, File}; -use std::io::prelude::*; +use std::env; use std::str::FromStr; -use std::time::{SystemTime, UNIX_EPOCH}; use time::{self, format_description, format_description::well_known::Rfc2822}; -use tokio::sync::Mutex; use tower_http::services::ServeDir; -lazy_static! { - static ref CACHE: Mutex> = Mutex::new(HashMap::new()); -} +use crate::cache::{Cache, init_cache}; + #[derive(Parser)] struct Cli { - #[arg(short, long)] - database_path: String, #[arg(short, long, default_value_t=("0.0.0.0:3000").to_string())] bind: String, } #[derive(Clone, Debug)] struct AppState { - database: Pool, -} - -#[derive(Clone, Debug, FromRow)] -struct CachedPage { - content_type: String, - content: String, - cached: i64, + cache: Cache, } #[derive(Deserialize)] @@ -74,12 +61,6 @@ struct ProjectConfig { experiments: Vec, } -struct Post { - name: String, - route: String, - date: String, -} - impl Render for Project { fn render(&self) -> Markup { html! { @@ -97,21 +78,22 @@ impl Render for Project { #[tokio::main] async fn main() -> Result<(), sqlx::Error> { let args = Cli::parse(); - let opts = SqliteConnectOptions::from_str(&args.database_path)? + let path = env::var("DATABASE_PATH").unwrap_or("gs.db".to_owned()); + let opts = SqliteConnectOptions::from_str(&path)? .journal_mode(SqliteJournalMode::Wal) .create_if_missing(true); let pool = SqlitePoolOptions::new().connect_with(opts).await?; - sqlx::migrate!("./migrations").run(&pool).await?; - let state = AppState { database: pool }; + let state = AppState { cache: init_cache().await }; let app = Router::new() .route("/", get(homepage)) .route("/rss", get(rss)) .route("/blog", get(list_blog_posts)) - .route("/blog/:post", get(blog_post)) + .route("/blog/:post", get(render_blog_post)) + .route("/blog/:post/raw", get(raw_blog_post)) .nest_service("/assets", ServeDir::new("assets")) .nest_service("/images", ServeDir::new("assets/images")) .layer(middleware::from_fn_with_state(state.clone(), cached_page)) @@ -126,54 +108,45 @@ async fn main() -> Result<(), sqlx::Error> { Ok(()) } -fn get_posts() -> Vec { - let mut posts: Vec = Vec::new(); - for entry in fs::read_dir("./posts").unwrap() { - let entry = entry.unwrap(); - let path = entry.path(); - let filename = path.file_name().unwrap().to_str().unwrap(); - let ext = path.extension().unwrap().to_str().unwrap(); - - // strip extension - let fname = filename.replace(&format!(".{}", ext), ""); - if ext == "md" || ext == "org" { - // We'll have the date at the beginning of the file - let mut content = File::open(&path).unwrap(); - let mut buffer = [0; 100]; - content.read(&mut buffer).unwrap(); - // Match date data of `date: YYYY-MM-DD` in the first 100 bytes - let metadata = String::from_utf8_lossy(&buffer); - let metadata_lines = metadata.split("\n").collect::>(); - // dbg!(&metadata); - // Split by --- and get the second element - let date = metadata_lines - .iter() - .find(|&x| x.contains("date:")) - .unwrap_or(&"") - .split(":") - .collect::>()[1]; - let title = metadata_lines - .iter() - .find(|&x| x.contains("title:")) - .unwrap_or(&"") - .split(":") - .collect::>()[1] - .trim(); - let date = date.trim(); - - posts.push(Post { - name: title.to_owned(), - route: fname, - date: date.to_owned(), - }); - } +async fn raw_blog_post(Path(post): Path) -> Result { + let post = posts::blog_post(post); + if post.is_err() { + return Err(StatusCode::NOT_FOUND); } - posts.sort_by(|a, b| b.date.cmp(&a.date)); - posts + Ok(Response::builder() + .header("content-type", "text/plain") + .status(StatusCode::OK) + .body(Full::from(post.unwrap().content)) + .unwrap()) +} + +async fn render_blog_post(Path(post): Path) -> Result { + let post = posts::blog_post(post); + if post.is_err() { + return Err(StatusCode::NOT_FOUND); + } + let p = post.unwrap(); + let html_maud = PreEscaped(p.html); + + let html = html! { + (header(p.title.as_str())) + body { + main { + h1 { (p.title) } + p { (p.date) } + (html_maud) + } + } + }; + Ok(Response::builder() + .header("content-type", "text/html") + .status(StatusCode::OK) + .body(Full::from(html.into_string())) + .unwrap()) } async fn rss() -> Result { - let posts = get_posts(); + let posts = posts::get_posts(); let rss_posts: Vec = posts.into_iter().map(|p| { let date = format!("{} 00:00:00 +00:00:00", p.date); let format = format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second] [offset_hour sign:mandatory]:[offset_minute]:[offset_second]").unwrap(); @@ -238,9 +211,7 @@ async fn homepage() -> Markup { } async fn list_blog_posts() -> Markup { - let posts = get_posts(); - // Sort posts by date - + let posts = posts::get_posts(); html! { (header("/blog")) body { @@ -249,7 +220,7 @@ async fn list_blog_posts() -> Markup { ul { @for post in posts { - li { (post.date) " - " a href=(format!("/blog/{}", post.route)) { (post.name) } } + (post); } } } @@ -257,207 +228,67 @@ async fn list_blog_posts() -> Markup { } } -async fn blog_post(Path(post): Path) -> Result { - // Search through /posts directory and find the post with either .md or .org extension - // If the post is not found, return 404 - for entry in fs::read_dir("./posts").unwrap() { - let entry = entry.unwrap(); - let path = entry.path(); - let filename = path.file_name().unwrap().to_str().unwrap(); - let ext = path.extension().unwrap().to_str().unwrap(); - // strip extension - let fname = filename.replace(&format!(".{}", ext), ""); - if fname == post && (ext == "md" || ext == "org") { - let content = fs::read_to_string(&path).unwrap(); - - let mut html = "".to_owned(); - let mut date = "".to_owned(); - let mut title = "".to_owned(); - - if ext == "md" { - let (parsed, content) = frontmatter::parse_and_find_content(&content).unwrap(); - let metadata = parsed.unwrap(); - date = metadata["date"].as_str().unwrap().to_owned(); - title = metadata["title"].as_str().unwrap().to_owned(); - html = comrak::markdown_to_html(&content, &comrak::ComrakOptions::default()); - } else if ext == "org" { - let mut writer = Vec::new(); - let parsed = Org::parse(&content); - let keywords = parsed.keywords(); - // Get date and title from keywords iterator - - for keyword in keywords { - if keyword.key == "date" { - date = keyword.value.to_string(); - } else if keyword.key == "title" { - title = keyword.value.to_string(); - } - } - parsed.write_html(&mut writer).unwrap(); - html = String::from_utf8(writer).unwrap(); - } - let html_maud = PreEscaped(html); - let html = html! { - (header(title.as_str())) - body { - main { - h1 { (title) } - p { (date) } - (html_maud) - } - } - }; - - return Ok(Response::builder() - .header("content-type", "text/html") - .status(StatusCode::OK) - .body(Full::from(html.into_string())) - .unwrap()); - } - } - - return Err(StatusCode::NOT_FOUND); -} - async fn cached_page( State(state): State, request: Request, next: Next, ) -> Response> { - let default = CachedPage { - content_type: "text/plain".to_owned(), - content: "".to_owned(), - cached: 0, - }; let path = request.uri().path().to_string(); - let mut data = CACHE.lock().await; - let content = data.get(&path).unwrap_or(&default); - let current_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("SystemTime before UNIX EPOCH!"); + let item = state.cache.get(&path).await; + if item.is_none() { + let res = next.run(request).await; + let (res_parts, res_body) = res.into_parts(); + let bytes = match hyper::body::to_bytes(res_body).await { + Ok(bytes) => bytes, + Err(_err) => { + return Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(Full::from("error")) + .unwrap() + } + }; - if current_time.as_secs() <= (content.cached as u64 + 120 as u64) && content.content != "" { - // Return the cached page content - let c = content.clone(); + let res = bytes.to_vec(); + let contenttype = match res_parts.headers.get("content-type") { + Some(c) => c.to_str().unwrap(), + None => "text/plain", + }; - return Response::builder() - .header("content-type", c.content_type) - .header("cache", "hit-memory") - .status(StatusCode::OK) - .body(Full::from(c.content)) - .unwrap(); - } - let res = sqlx::query_as::<_, CachedPage>("SELECT * FROM cached WHERE route = $1") - .bind(&path) - .fetch_one(&state.database) - .await; - if let Ok(res) = res { - let current_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("SystemTime before UNIX EPOCH!"); - - // SQLite cache is valid for 10 minutes. - if current_time.as_secs() <= (content.cached as u64 + (12 * 60) as u64) { - let c = CachedPage { - content_type: res.content_type, - content: res.content, - cached: SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Failed to get current time") - .as_secs() - .try_into() - .unwrap(), - }; - - // Refresh our memory cache. - data.insert(path, c.clone()); + if !res_parts.status.is_success() { return Response::builder() - .header("content-type", c.content_type) - .header("cache", "hit-sqlite") - .status(StatusCode::OK) - .body(Full::from(c.content)) + .header("content-type", contenttype) + .status(res_parts.status) + .body(Full::from(bytes)) .unwrap(); - } else { - let _cache_sqlite = sqlx::query("DELETE FROM cached WHERE route = $1") - .bind(&path) - .execute(&state.database) - .await; } - } - let res = next.run(request).await; - let (res_parts, res_body) = res.into_parts(); - let bytes = match hyper::body::to_bytes(res_body).await { - Ok(bytes) => bytes, - Err(_err) => { + // Make sure we only cache text. + let format = FileFormat::from_bytes(&res); + if format.kind() != Kind::Text && format.kind() != Kind::Application { return Response::builder() - .status(StatusCode::INTERNAL_SERVER_ERROR) - .body(Full::from("error")) - .unwrap() + .header("content-type", contenttype) + .header("cache", "not") + .status(StatusCode::OK) + .body(Full::from(bytes)) + .unwrap(); } - }; - let res = bytes.to_vec(); - let contenttype = match res_parts.headers.get("content-type") { - Some(c) => c.to_str().unwrap(), - None => "text/plain", - }; - - if !res_parts.status.is_success() { + let content = String::from_utf8(res).unwrap(); + state.cache.set(path, contenttype.to_owned(), content).await; return Response::builder() .header("content-type", contenttype) - .status(res_parts.status) - .body(Full::from(bytes)) - .unwrap(); - } - - // Make sure we only cache text. - let format = FileFormat::from_bytes(&res); - if format.kind() != Kind::Text && format.kind() != Kind::Application { - dbg!(format.kind()); - return Response::builder() - .header("content-type", contenttype) - .header("cache", "not") + .header("cache", "miss") .status(StatusCode::OK) .body(Full::from(bytes)) + .unwrap() + } else { + let i = item.unwrap(); + return Response::builder() + .header("content-type", &i.content_type) + .header("cache", format!("hit-{}", &i.tier())) + .status(StatusCode::OK) + .body(Full::from(i.content)) .unwrap(); } - - let content = String::from_utf8(res).unwrap(); - - let cache = CachedPage { - content_type: String::from_str(contenttype).unwrap(), - content, - cached: SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Failed to get current time") - .as_secs() - .try_into() - .unwrap(), - }; - - data.insert(path.clone(), cache.clone()); - - let cache_sqlite = sqlx::query( - "INSERT INTO cached (route, cached, content_type, content) VALUES ( $1, $2, $3, $4 )", - ) - .bind(path) - .bind(cache.cached) - .bind(cache.content_type) - .bind(cache.content) - .execute(&state.database) - .await; - - match cache_sqlite { - Ok(_) => println!("cached"), - Err(e) => println!("{}", e), - } - - Response::builder() - .header("content-type", contenttype) - .header("cache", "miss") - .status(StatusCode::OK) - .body(Full::from(bytes)) - .unwrap() } diff --git a/src/posts.rs b/src/posts.rs new file mode 100644 index 0000000..b047379 --- /dev/null +++ b/src/posts.rs @@ -0,0 +1,123 @@ +use std::{fs::{self, File}, io::Read}; + +use maud::{Render, Markup, html}; +use orgize::Org; + +pub struct PostMetadata { + pub name: String, + pub route: String, + pub date: String, +} + +pub struct PostContent { + pub title: String, + pub date: String, + pub content: String, + pub html: String +} + +impl Render for PostMetadata { + fn render(&self) -> Markup { + html! { + li { (self.date) " - " a href=(format!("/blog/{}", self.route)) { (self.name) } } + } + } +} + +pub fn get_posts() -> Vec { + let mut posts: Vec = Vec::new(); + for entry in fs::read_dir("./posts").unwrap() { + let entry = entry.unwrap(); + let path = entry.path(); + let filename = path.file_name().unwrap().to_str().unwrap(); + let ext = path.extension().unwrap().to_str().unwrap(); + + // strip extension + let fname = filename.replace(&format!(".{}", ext), ""); + if ext == "md" || ext == "org" { + // We'll have the date at the beginning of the file + let mut content = File::open(&path).unwrap(); + let mut buffer = [0; 100]; + content.read(&mut buffer).unwrap(); + // Match date data of `date: YYYY-MM-DD` in the first 100 bytes + let metadata = String::from_utf8_lossy(&buffer); + let metadata_lines = metadata.split("\n").collect::>(); + // dbg!(&metadata); + // Split by --- and get the second element + let date = metadata_lines + .iter() + .find(|&x| x.contains("date:")) + .unwrap_or(&"") + .split(":") + .collect::>()[1]; + let title = metadata_lines + .iter() + .find(|&x| x.contains("title:")) + .unwrap_or(&"") + .split(":") + .collect::>()[1] + .trim(); + let date = date.trim(); + + posts.push(PostMetadata { + name: title.to_owned(), + route: fname, + date: date.to_owned(), + }); + } + } + posts.sort_by(|a, b| b.date.cmp(&a.date)); + posts +} + +// Render the actual blog post as HTML. +pub fn blog_post(post: String) -> Result { + // Search through /posts directory and find the post with either .md or .org extension + // If the post is not found, return 404 + for entry in fs::read_dir("./posts").unwrap() { + let entry = entry.unwrap(); + let path = entry.path(); + let filename = path.file_name().unwrap().to_str().unwrap(); + let ext = path.extension().unwrap().to_str().unwrap(); + // strip extension + let fname = filename.replace(&format!(".{}", ext), ""); + if fname == post && (ext == "md" || ext == "org") { + let content = fs::read_to_string(&path).unwrap(); + + let mut html = "".to_owned(); + let mut date = "".to_owned(); + let mut title = "".to_owned(); + + if ext == "md" { + let (parsed, content) = frontmatter::parse_and_find_content(&content).unwrap(); + let metadata = parsed.unwrap(); + date = metadata["date"].as_str().unwrap().to_owned(); + title = metadata["title"].as_str().unwrap().to_owned(); + html = comrak::markdown_to_html(&content, &comrak::ComrakOptions::default()); + } else if ext == "org" { + let mut writer = Vec::new(); + let parsed = Org::parse(&content); + let keywords = parsed.keywords(); + // Get date and title from keywords iterator + + for keyword in keywords { + if keyword.key == "date" { + date = keyword.value.to_string(); + } else if keyword.key == "title" { + title = keyword.value.to_string(); + } + } + parsed.write_html(&mut writer).unwrap(); + html = String::from_utf8(writer).unwrap(); + } + return Ok(PostContent{ + title, + date, + content, + html + }); + } + } + + return Err(false); +}