Fix linting, build-test-publish workflow, Docker image
Some checks failed
Build Docker Image / nix-flake-check (push) Failing after 2m26s
Build Docker Image / docker-build (push) Has been skipped
Build Docker Image / arm-docker-build (push) Has been skipped

This commit is contained in:
Gabriel Simmer 2023-11-10 12:19:31 +00:00
parent 012771f2fb
commit 466f00dd0e
Signed by: arch
SSH key fingerprint: SHA256:m3OEcdtrnBpMX+2BDGh/byv3hrCekCLzDYMdvGEKPPQ
5 changed files with 302 additions and 181 deletions

View file

@ -0,0 +1,110 @@
name: Build Docker Image
on:
push:
branches:
- master
jobs:
nix-flake-check:
runs-on: debian-latest
steps:
- name: Install prerequisites
run: apt update && apt install -y sudo zstd
- name: "Cache Nix store"
uses: actions/cache@v3.0.8
id: nix-cache
with:
path: /nix
key: "vr-event-tracker-cache-v1"
- name: Install Nix
uses: https://github.com/cachix/install-nix-action@v22
with:
extra_nix_config: "experimental-features = nix-command flakes"
nix_path: nixpkgs=channel:nixos-23.05
- name: Remove access_tokens
run: sed -i '/^access-tokens/d' /etc/nix/nix.conf
- name: Check out repository
uses: actions/checkout@v3.5.3
with:
ref: master
- name: Check codebase
run: nix flake check -L
docker-build:
needs: nix-flake-check
runs-on: debian-latest
steps:
- name: Install prerequisites
run: apt update && apt install -y sudo zstd
- name: "Cache Nix store"
uses: actions/cache@v3.0.8
id: nix-cache
with:
path: /nix
key: "vr-event-tracker-cache-v1"
- name: Install Nix
uses: https://github.com/cachix/install-nix-action@v22
with:
extra_nix_config: "experimental-features = nix-command flakes"
nix_path: nixpkgs=channel:nixos-23.05
- name: Remove access_tokens
run: sed -i '/^access-tokens/d' /etc/nix/nix.conf
- name: Check out repository
uses: actions/checkout@v3.5.3
with:
ref: master
- name: Build image
run: nix build .#docker
- name: Push image with Skopeo
run: |
nix-env -i skopeo -f '<nixpkgs>'
wget https://raw.githubusercontent.com/containers/skopeo/main/default-policy.json && mkdir /etc/containers && mv default-policy.json /etc/containers/policy.json
skopeo login --username arch --password $REGISTRY_TOKEN git.gmem.ca
skopeo copy docker-archive:result docker://git.gmem.ca/arch/vrchat-prometheus-adapter:latest
env:
REGISTRY_TOKEN: ${{ secrets.REGISTRY_TOKEN }}
arm-docker-build:
needs: nix-flake-check
runs-on: debian-latest-arm
steps:
- name: Install prerequisites
run: apt update && apt install -y sudo zstd
- name: "Cache Nix store"
uses: actions/cache@v3.0.8
id: nix-cache
with:
path: /nix
key: "vr-event-tracker-cache-arm-v1"
- name: Install Nix
uses: https://github.com/cachix/install-nix-action@v22
with:
extra_nix_config: "experimental-features = nix-command flakes"
nix_path: nixpkgs=channel:nixos-23.05
- name: Remove access_tokens
run: sed -i '/^access-tokens/d' /etc/nix/nix.conf
- name: Check out repository
uses: actions/checkout@v3.5.3
with:
ref: master
- name: Build image
run: nix build .#docker
- name: Push image with Skopeo
run: |
nix-env -i skopeo -f '<nixpkgs>'
wget https://raw.githubusercontent.com/containers/skopeo/main/default-policy.json && mkdir /etc/containers && mv default-policy.json /etc/containers/policy.json
skopeo login --username arch --password $REGISTRY_TOKEN git.gmem.ca
skopeo copy docker-archive:result docker://git.gmem.ca/arch/vrchat-prometheus-adapter:arm
env:
REGISTRY_TOKEN: ${{ secrets.REGISTRY_TOKEN }}

7
Cargo.lock generated
View file

@ -670,16 +670,9 @@ dependencies = [
"lazy_static",
"memchr",
"parking_lot",
"protobuf",
"thiserror",
]
[[package]]
name = "protobuf"
version = "2.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94"
[[package]]
name = "quote"
version = "1.0.33"

View file

@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
axum = "0.6.20"
prometheus = "0.13.3"
prometheus = { version = "0.13.3", default-features = false }
tokio = { version = "1.29.1", features = [ "full" ] }
serde = { version = "1.0.189", features = [ "derive" ] }
toml = "0.8.2"

263
flake.nix
View file

@ -23,130 +23,151 @@
};
};
outputs = { self, nixpkgs, crane, fenix, flake-utils, advisory-db, ... }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
outputs = { self, nixpkgs, crane, fenix, flake-utils, advisory-db, ... }: {
nixosModules.default = ({ pkgs, ... }: {
imports = [ ./module.nix ];
# defined overlays injected by the nixflake
nixpkgs.overlays = [
(_self: _super: {
vrchat-prometheus-adapter = self.packages.${pkgs.system}.vrchat-prometheus-adapter;
})
];
});
} //
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
};
inherit (pkgs) lib;
craneLib = crane.lib.${system};
src = lib.cleanSourceWith {
src = craneLib.path ./.; # The original, unfiltered source
filter = path: type:
let
protoFilter = path: _type: builtins.match ".*proto$" path != null;
protoOrCargo = path: type:
(protoFilter path type) || (craneLib.filterCargoSources path type);
in
protoOrCargo path type;
};
# Common arguments can be set here to avoid repeating them later
commonArgs = {
inherit src;
buildInputs = [
pkgs.sqlite
pkgs.pkg-config
pkgs.openssl
pkgs.protobuf
] ++ lib.optionals pkgs.stdenv.isDarwin [
# Additional darwin specific inputs can be set here
pkgs.libiconv
];
};
craneLibLLvmTools = craneLib.overrideToolchain
(fenix.packages.${system}.complete.withComponents [
"cargo"
"llvm-tools"
"rustc"
]);
# Build *just* the cargo dependencies, so we can reuse
# all of that work (e.g. via cachix) when running in CI
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
# Build the actual crate itself, reusing the dependency
# artifacts from above.
my-crate = craneLib.buildPackage (commonArgs // {
inherit cargoArtifacts;
});
dockerImage = pkgs.dockerTools.buildImage {
name = "vrchat-prometheus-exporter";
config = {
Cmd = [ "${my-crate}/bin/vr-event-tracker" ];
ExposedPorts = {
"6534/tcp" = {};
};
};
};
in
{
checks = {
# Build the crate as part of `nix flake check` for convenience
inherit my-crate;
# Run clippy (and deny all warnings) on the crate source,
# again, resuing the dependency artifacts from above.
#
# Note that this is done as a separate derivation so that
# we can block the CI if there are issues here, but not
# prevent downstream consumers from building our crate by itself.
my-crate-clippy = craneLib.cargoClippy (commonArgs // {
inherit cargoArtifacts;
cargoClippyExtraArgs = "--all-targets -- --deny warnings";
});
my-crate-doc = craneLib.cargoDoc (commonArgs // {
inherit cargoArtifacts;
});
# Check formatting
my-crate-fmt = craneLib.cargoFmt {
inherit src;
};
# Audit dependencies
my-crate-audit = craneLib.cargoAudit {
inherit src advisory-db;
};
# Run tests with cargo-nextest
# Consider setting `doCheck = false` on `my-crate` if you do not want
# the tests to run twice
my-crate-nextest = craneLib.cargoNextest (commonArgs // {
inherit cargoArtifacts;
partitions = 1;
partitionType = "count";
});
} // lib.optionalAttrs (system == "x86_64-linux") {
# NB: cargo-tarpaulin only supports x86_64 systems
# Check code coverage (note: this will not upload coverage anywhere)
my-crate-coverage = craneLib.cargoTarpaulin (commonArgs // {
inherit cargoArtifacts;
});
};
inherit (pkgs) lib;
craneLib = crane.lib.${system};
src = lib.cleanSourceWith {
src = craneLib.path ./.; # The original, unfiltered source
filter = path: type:
let
protoFilter = path: _type: builtins.match ".*proto$" path != null;
protoOrCargo = path: type:
(protoFilter path type) || (craneLib.filterCargoSources path type);
in
protoOrCargo path type;
packages = {
default = my-crate;
docker = dockerImage;
my-crate-llvm-coverage = craneLibLLvmTools.cargoLlvmCov (commonArgs // {
inherit cargoArtifacts;
});
};
# Common arguments can be set here to avoid repeating them later
commonArgs = {
inherit src;
buildInputs = [
pkgs.sqlite
pkgs.pkg-config
pkgs.openssl
pkgs.protobuf
] ++ lib.optionals pkgs.stdenv.isDarwin [
# Additional darwin specific inputs can be set here
pkgs.libiconv
apps.default = flake-utils.lib.mkApp {
drv = my-crate;
};
devShells.default = pkgs.mkShell {
inputsFrom = builtins.attrValues self.checks.${system};
# Additional dev-shell environment variables can be set directly
# MY_CUSTOM_DEVELOPMENT_VAR = "something else";
# Extra inputs can be added here
nativeBuildInputs = with pkgs; [
cargo
rustc
rust-analyzer
cargo-flamegraph
cargo-bloat
];
};
craneLibLLvmTools = craneLib.overrideToolchain
(fenix.packages.${system}.complete.withComponents [
"cargo"
"llvm-tools"
"rustc"
]);
# Build *just* the cargo dependencies, so we can reuse
# all of that work (e.g. via cachix) when running in CI
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
# Build the actual crate itself, reusing the dependency
# artifacts from above.
my-crate = craneLib.buildPackage (commonArgs // {
inherit cargoArtifacts;
});
in
{
checks = {
# Build the crate as part of `nix flake check` for convenience
inherit my-crate;
# Run clippy (and deny all warnings) on the crate source,
# again, resuing the dependency artifacts from above.
#
# Note that this is done as a separate derivation so that
# we can block the CI if there are issues here, but not
# prevent downstream consumers from building our crate by itself.
my-crate-clippy = craneLib.cargoClippy (commonArgs // {
inherit cargoArtifacts;
cargoClippyExtraArgs = "--all-targets -- --deny warnings";
});
my-crate-doc = craneLib.cargoDoc (commonArgs // {
inherit cargoArtifacts;
});
# Check formatting
my-crate-fmt = craneLib.cargoFmt {
inherit src;
};
# Audit dependencies
my-crate-audit = craneLib.cargoAudit {
inherit src advisory-db;
};
# Run tests with cargo-nextest
# Consider setting `doCheck = false` on `my-crate` if you do not want
# the tests to run twice
my-crate-nextest = craneLib.cargoNextest (commonArgs // {
inherit cargoArtifacts;
partitions = 1;
partitionType = "count";
});
} // lib.optionalAttrs (system == "x86_64-linux") {
# NB: cargo-tarpaulin only supports x86_64 systems
# Check code coverage (note: this will not upload coverage anywhere)
my-crate-coverage = craneLib.cargoTarpaulin (commonArgs // {
inherit cargoArtifacts;
});
};
packages = {
default = my-crate;
my-crate-llvm-coverage = craneLibLLvmTools.cargoLlvmCov (commonArgs // {
inherit cargoArtifacts;
});
};
apps.default = flake-utils.lib.mkApp {
drv = my-crate;
};
devShells.default = pkgs.mkShell {
inputsFrom = builtins.attrValues self.checks.${system};
# Additional dev-shell environment variables can be set directly
# MY_CUSTOM_DEVELOPMENT_VAR = "something else";
# Extra inputs can be added here
nativeBuildInputs = with pkgs; [
cargo
rustc
rust-analyzer
sqlite
sqlx-cli
cargo-flamegraph
];
};
});
});
}

View file

@ -28,7 +28,6 @@ static WORLD_FAVORITES: OnceLock<IntCounterVec> = OnceLock::new();
enum WsError {
Reqwest(reqwest::Error),
Url(url::ParseError),
Custom(String),
}
impl From<url::ParseError> for WsError {
@ -49,7 +48,6 @@ impl fmt::Display for WsError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
WsError::Reqwest(e) => write!(f, "Reqwest error: {}", e),
WsError::Custom(e) => write!(f, "Error: {}", e),
WsError::Url(_) => todo!(),
}
}
@ -107,10 +105,7 @@ struct VrcGroupInstance {
#[derive(Clone, Debug, Deserialize)]
struct VrcWorldData {
favorites: u64,
version: f64,
visits: u64,
popularity: f64,
heat: f64,
#[serde(rename = "publicOccupants")]
public_occupants: f64,
#[serde(rename = "privateOccupants")]
@ -227,42 +222,12 @@ async fn group_metrics(
name: String,
group: VrcGroup,
) -> Result<(), WsError> {
let instance_list_url = format!(
"https://api.vrchat.cloud/api/1/groups/{}/instances",
group.id
);
let url = Url::parse(&instance_list_url).unwrap();
let req = client
.get(url)
.header(
USER_AGENT,
"vr-event-tracker(git.gmem.ca/arch/vr-event-tracker)",
)
.header("Cookie", auth_cookie)
.send()
.await?;
let data: Vec<VrcGroupInstance> = req.json().await?;
let instances: Vec<VrcInstance> = data
.into_iter()
.map(|f| {
let spl: Vec<&str> = f.location.split(":").collect();
VrcInstance {
instance: Some(spl[0].to_owned()),
world: Some(spl[1].to_owned()),
location: Some(f.location),
name: None,
}
})
.collect();
for instance in instances {
let api_url = format!(
"https://api.vrchat.cloud/api/1/instances/{}",
&instance.location.clone().unwrap()
if !group.id.is_empty() {
let instance_list_url = format!(
"https://api.vrchat.cloud/api/1/groups/{}/instances",
group.id
);
let url = Url::parse(&api_url).unwrap();
let url = Url::parse(&instance_list_url).unwrap();
let req = client
.get(url)
.header(
@ -273,18 +238,50 @@ async fn group_metrics(
.send()
.await?;
let data: VrcInstanceData = req.json().await?;
let instance_name = instance.name.unwrap_or(instance.location.unwrap());
PLAYER_COUNT
.get()
.unwrap()
.with_label_values(&[
&instance.world.unwrap(),
&instance.instance.unwrap(),
&instance_name,
&name,
])
.set(data.user_count.unwrap_or(0 as f64));
let data: Vec<VrcGroupInstance> = req.json().await?;
let instances: Vec<VrcInstance> = data
.into_iter()
.map(|f| {
let spl: Vec<&str> = f.location.split(':').collect();
VrcInstance {
instance: Some(spl[0].to_owned()),
world: Some(spl[1].to_owned()),
location: Some(f.location),
name: None,
}
})
.collect();
for instance in instances {
let api_url = format!(
"https://api.vrchat.cloud/api/1/instances/{}",
&instance.location.clone().unwrap()
);
let url = Url::parse(&api_url).unwrap();
let req = client
.get(url)
.header(
USER_AGENT,
"vr-event-tracker(git.gmem.ca/arch/vr-event-tracker)",
)
.header("Cookie", auth_cookie)
.send()
.await?;
let data: VrcInstanceData = req.json().await?;
let instance_name = instance.name.unwrap_or(instance.location.unwrap());
PLAYER_COUNT
.get()
.unwrap()
.with_label_values(&[
&instance.world.unwrap(),
&instance.instance.unwrap(),
&instance_name,
&name,
])
.set(data.user_count.unwrap_or(0 as f64));
}
}
if group.vrcdn.is_some() {