Initial commit of proof-of-concept

This is /really/ rough, but is a really rough sketch of how I plan to build my
overengineered personal website with Rust and eventually Fly.io + LiteFS.
This commit is contained in:
Gabriel Simmer 2023-07-19 21:03:58 +01:00
parent a83729bd08
commit 3b849e6e6e
Signed by: arch
GPG key ID: C81B106D46C5B875
8 changed files with 641 additions and 0 deletions

1
.envrc Normal file
View file

@ -0,0 +1 @@
use flake

5
.gitignore vendored
View file

@ -14,3 +14,8 @@ Cargo.lock
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
.direnv/
gs.*
result
posts/
!posts/.keep

22
Cargo.toml Normal file
View file

@ -0,0 +1,22 @@
[package]
name = "quick-start"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
axum = { version = "0.6.18", features = ["json"] }
sqlx = { version = "0.7", features = [ "runtime-tokio", "tls-rustls", "sqlite" ] }
serde = { version = "1.0.167", features = ["derive"] }
tokio = { version = "1.29.1", features = ["full"] }
maud = { version = "*", features = ["axum"] }
tower = "0.4.13"
hyper = { version = "0.14", features = ["full"] }
tower-http = { version = "0.4.1", features = ["add-extension", "auth", "compression-full", "trace"] }
sha2 = "0.10.7"
hex = "0.4"
lazy_static = "1.4.0"
futures = "0.3.28"
comrak = "0.1"
orgize = "0.9"

198
flake.lock Normal file
View file

@ -0,0 +1,198 @@
{
"nodes": {
"advisory-db": {
"flake": false,
"locked": {
"lastModified": 1689698236,
"narHash": "sha256-Qz9JxGKeA3jwuj1CdK9ejMJ7VsJRdiZniF8lx4mft9s=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "4aa517564d1d06f0e79784c8ad973a59d68aa9c8",
"type": "github"
},
"original": {
"owner": "rustsec",
"repo": "advisory-db",
"type": "github"
}
},
"crane": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"nixpkgs": [
"nixpkgs"
],
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1688772518,
"narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=",
"owner": "ipetkov",
"repo": "crane",
"rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": []
},
"locked": {
"lastModified": 1689747703,
"narHash": "sha256-abwTXTz2u2P32fN9XRQKV+TUkcRZDfNIQ73mq9fyTxg=",
"owner": "nix-community",
"repo": "fenix",
"rev": "5e70fbab6c431bd8454d336ef06ef609f4d6e6f3",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1687709756,
"narHash": "sha256-Y5wKlQSkgEK2weWdOu4J3riRd+kV/VCgHsqLNTTWQ/0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "dbabf0ca0c0c4bce6ea5eaf65af5cb694d2082c7",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1689068808,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1689631193,
"narHash": "sha256-AGSkBZaiTODQc8eT1rZDrQIjtb8JtFwJ0wVPzArlrnM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "57695599bdc4f7bfe5d28cfa23f14b3d8bdf8a5f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"advisory-db": "advisory-db",
"crane": "crane",
"fenix": "fenix",
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"crane",
"flake-utils"
],
"nixpkgs": [
"crane",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688351637,
"narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "f9b92316727af9e6c7fee4a761242f7f46880329",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

139
flake.nix Normal file
View file

@ -0,0 +1,139 @@
{
description = "Build a cargo project";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
crane = {
url = "github:ipetkov/crane";
inputs.nixpkgs.follows = "nixpkgs";
};
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
inputs.rust-analyzer-src.follows = "";
};
flake-utils.url = "github:numtide/flake-utils";
advisory-db = {
url = "github:rustsec/advisory-db";
flake = false;
};
};
outputs = { self, nixpkgs, crane, fenix, flake-utils, advisory-db, ... }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
};
inherit (pkgs) lib;
craneLib = crane.lib.${system};
src = craneLib.cleanCargoSource (craneLib.path ./.);
# Common arguments can be set here to avoid repeating them later
commonArgs = {
inherit src;
buildInputs = [
pkgs.sqlite
] ++ lib.optionals pkgs.stdenv.isDarwin [
# Additional darwin specific inputs can be set here
pkgs.libiconv
];
};
craneLibLLvmTools = craneLib.overrideToolchain
(fenix.packages.${system}.complete.withComponents [
"cargo"
"llvm-tools"
"rustc"
]);
# Build *just* the cargo dependencies, so we can reuse
# all of that work (e.g. via cachix) when running in CI
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
# Build the actual crate itself, reusing the dependency
# artifacts from above.
my-crate = craneLib.buildPackage (commonArgs // {
inherit cargoArtifacts;
});
in
{
checks = {
# Build the crate as part of `nix flake check` for convenience
inherit my-crate;
# Run clippy (and deny all warnings) on the crate source,
# again, resuing the dependency artifacts from above.
#
# Note that this is done as a separate derivation so that
# we can block the CI if there are issues here, but not
# prevent downstream consumers from building our crate by itself.
my-crate-clippy = craneLib.cargoClippy (commonArgs // {
inherit cargoArtifacts;
cargoClippyExtraArgs = "--all-targets -- --deny warnings";
});
my-crate-doc = craneLib.cargoDoc (commonArgs // {
inherit cargoArtifacts;
});
# Check formatting
my-crate-fmt = craneLib.cargoFmt {
inherit src;
};
# Audit dependencies
my-crate-audit = craneLib.cargoAudit {
inherit src advisory-db;
};
# Run tests with cargo-nextest
# Consider setting `doCheck = false` on `my-crate` if you do not want
# the tests to run twice
my-crate-nextest = craneLib.cargoNextest (commonArgs // {
inherit cargoArtifacts;
partitions = 1;
partitionType = "count";
});
} // lib.optionalAttrs (system == "x86_64-linux") {
# NB: cargo-tarpaulin only supports x86_64 systems
# Check code coverage (note: this will not upload coverage anywhere)
my-crate-coverage = craneLib.cargoTarpaulin (commonArgs // {
inherit cargoArtifacts;
});
};
packages = {
default = my-crate;
my-crate-llvm-coverage = craneLibLLvmTools.cargoLlvmCov (commonArgs // {
inherit cargoArtifacts;
});
};
apps.default = flake-utils.lib.mkApp {
drv = my-crate;
};
devShells.default = pkgs.mkShell {
inputsFrom = builtins.attrValues self.checks.${system};
# Additional dev-shell environment variables can be set directly
# MY_CUSTOM_DEVELOPMENT_VAR = "something else";
# Extra inputs can be added here
nativeBuildInputs = with pkgs; [
cargo
rustc
rust-analyzer
sqlite
sqlx-cli
];
};
});
}

View file

@ -0,0 +1,9 @@
-- Add migration script here
create table cached (
route text not null unique,
cached int not null,
content_type text not null,
content text not null
);
create unique index path_unique_idx on cached (lower(route));

0
posts/.keep Normal file
View file

267
src/main.rs Normal file
View file

@ -0,0 +1,267 @@
#[macro_use]
extern crate lazy_static;
use std::fs;
use std::str::FromStr;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use std::{collections::HashMap, time};
use axum::extract::Path;
use axum::response::IntoResponse;
use axum::{
body::Full,
extract::State,
http::StatusCode,
http::{Request, Response},
middleware::{self, Next},
routing::get,
Router,
};
use hyper::body::Bytes;
use maud::{html, Markup};
use orgize::Org;
use sha2::{Digest, Sha256};
use sqlx::sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions};
use sqlx::{FromRow, Pool, Sqlite};
use tokio::sync::Mutex;
lazy_static! {
static ref CACHE: Mutex<HashMap<String, CachedPage>> = { Mutex::new(HashMap::new()) };
}
#[derive(Clone, Debug)]
struct AppState {
database: Pool<Sqlite>,
}
#[derive(Clone, Debug, FromRow)]
struct CachedPage {
content_type: String,
content: String,
cached: i64,
}
#[tokio::main]
async fn main() -> Result<(), sqlx::Error> {
let opts = SqliteConnectOptions::from_str("sqlite:gs.db")?
.journal_mode(SqliteJournalMode::Wal)
.create_if_missing(true);
let pool = SqlitePoolOptions::new().connect_with(opts).await?;
sqlx::migrate!("./migrations").run(&pool).await?;
let state = AppState { database: pool };
let app = Router::new()
.route("/", get(homepage))
.route("/blog", get(list_blog_posts))
.route("/blog/:post", get(blog_post))
.layer(middleware::from_fn_with_state(state.clone(), cached_page))
.with_state(state);
println!("Running webserver on port :3000");
axum::Server::bind(&"0.0.0.0:3000".parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
Ok(())
}
async fn homepage() -> Markup {
html! {
h1 { "Gabriel Simmer" }
h2 { "Infrastructure and DevOps" }
}
}
async fn list_blog_posts() -> Markup {
let mut posts = Vec::new();
for entry in fs::read_dir("./posts").unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let filename = path.file_name().unwrap().to_str().unwrap();
let ext = path.extension().unwrap().to_str().unwrap();
// strip extension
let fname = filename.replace(&format!(".{}", ext), "");
if ext == "md" || ext == "org" {
posts.push(fname);
}
}
html! {
h1 { "Blog Posts" }
ul {
@for post in posts {
li { a href=(format!("/blog/{}", post)) { (post) } }
}
}
}
}
async fn blog_post(Path(post): Path<String>) -> Result<impl IntoResponse, StatusCode> {
// Search through /posts directory and find the post with either .md or .org extension
// If the post is not found, return 404
for entry in fs::read_dir("./posts").unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let filename = path.file_name().unwrap().to_str().unwrap();
let ext = path.extension().unwrap().to_str().unwrap();
// strip extension
let fname = filename.replace(&format!(".{}", ext), "");
if fname == post && (ext == "md" || ext == "org") {
let content = fs::read_to_string(&path).unwrap();
// Parse markdown with comark
let mut html = "".to_owned();
if ext == "md" {
html = comrak::markdown_to_html(&content, &comrak::ComrakOptions::default());
} else if ext == "org" {
let mut writer = Vec::new();
Org::parse(&content).write_html(&mut writer).unwrap();
html = String::from_utf8(writer).unwrap();
}
return Ok(Response::builder()
.header("content-type", "text/html")
.header("cache", "hit")
.status(StatusCode::OK)
.body(Full::from(html))
.unwrap());
}
}
return Err(StatusCode::NOT_FOUND);
}
async fn cached_page<T>(
State(state): State<AppState>,
request: Request<T>,
next: Next<T>,
) -> Response<Full<Bytes>> {
let default = CachedPage {
content_type: "text/plain".to_owned(),
content: "".to_owned(),
cached: 0,
};
let path = request.uri().path().to_string();
let mut data = CACHE.lock().await;
let content = data.get(&path).unwrap_or(&default);
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("SystemTime before UNIX EPOCH!");
if current_time.as_secs() <= (content.cached as u64 + 30 as u64) && content.content != "" {
// Return the cached page content
let c = content.clone();
return Response::builder()
.header("content-type", c.content_type)
.header("cache", "hit-memory")
.status(StatusCode::OK)
.body(Full::from(c.content))
.unwrap();
}
let res = sqlx::query_as::<_, CachedPage>("SELECT * FROM cached WHERE route = $1")
.bind(&path)
.fetch_one(&state.database)
.await;
if let Ok(res) = res {
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("SystemTime before UNIX EPOCH!");
// SQLite cache is valid for 10 minutes.
if current_time.as_secs() <= (content.cached as u64 + (10 * 60) as u64) {
let c = CachedPage {
content_type: res.content_type,
content: res.content,
cached: SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Failed to get current time")
.as_secs()
.try_into()
.unwrap(),
};
// Refresh our memory cache.
data.insert(path, c.clone());
return Response::builder()
.header("content-type", c.content_type)
.header("cache", "hit-sqlite")
.status(StatusCode::OK)
.body(Full::from(c.content))
.unwrap();
} else {
let cache_sqlite = sqlx::query("DELETE FROM cached WHERE route = $1")
.bind(&path)
.execute(&state.database)
.await;
}
}
let res = next.run(request).await;
let (res_parts, res_body) = res.into_parts();
let bytes = match hyper::body::to_bytes(res_body).await {
Ok(bytes) => bytes,
Err(_err) => {
return Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Full::from("error"))
.unwrap()
}
};
let res = bytes.to_vec();
let contenttype = match res_parts.headers.get("content-type") {
Some(c) => c.to_str().unwrap(),
None => "text/plain",
};
if !res_parts.status.is_success() {
return Response::builder()
.header("content-type", contenttype)
.status(res_parts.status)
.body(Full::from(bytes))
.unwrap();
}
let content = String::from_utf8(res).unwrap();
let cache = CachedPage {
content_type: String::from_str(contenttype).unwrap(),
content,
cached: SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Failed to get current time")
.as_secs()
.try_into()
.unwrap(),
};
data.insert(
path.clone(),
cache.clone()
);
let cache_sqlite = sqlx::query("INSERT INTO cached (route, cached, content_type, content) VALUES ( $1, $2, $3, $4 )")
.bind(path)
.bind(cache.cached)
.bind(cache.content_type)
.bind(cache.content)
.execute(&state.database)
.await;
match cache_sqlite {
Ok(_) => println!("cached"),
Err(e) => println!("{}", e),
}
Response::builder()
.header("content-type", contenttype)
.header("cache", "miss")
.status(StatusCode::OK)
.body(Full::from(bytes))
.unwrap()
}