From 3e83b51e9a926cb6cdf25a13038ac9863362558b Mon Sep 17 00:00:00 2001
From: Volkor <me@volkor.me>
Date: Sun, 29 Jan 2023 23:12:39 +1100
Subject: [PATCH] add metrics page, and fix up some warnings

---
 docs/info.md  |  14 ++++---
 src/db.rs     | 114 ++++++++++++++++++++++++++++++++++++++++++--------
 src/engine.rs |   4 +-
 src/main.rs   |  68 +++++++++++++++++++++++++++---
 4 files changed, 170 insertions(+), 30 deletions(-)

diff --git a/docs/info.md b/docs/info.md
index 16267a8..eb519a6 100644
--- a/docs/info.md
+++ b/docs/info.md
@@ -13,12 +13,16 @@ Deleted files should no longer show up in most stats generated, since storing th
 
 ### General Stats
 
-- Total number of files uploaded
-- Total number of files alive /right now/
-- Total number of files uploaded (by x ip)
-- Total bandwidth served (calculated by views * filesize)
-- Geo-map of files uploaded
+- Total number of files alive /right now/ `total_alive`
+- Total number of files uploaded `total_dead`
+- Total number of files uploaded (by x ip) `filecount_ip`
+- Total filesize of alive files `filesize_alive`
+- Total filesize of dead files `filesize_dead`
+- Total bandwidth served for alive files(calculated by views * filesize) `bandwidth_alive`
+- Total bandwidth served for dead files(calculated by views * filesize) `bandwidth_dead`
+- Geo-map of files uploaded (I'm not too sure how we can do this)
 - Geo-map of files served (would require nginx logs)
+- Scrape render time `render_time`
 
 ### Filetype Stats (Pick a filetype from the list, or even multiple?)
 
diff --git a/src/db.rs b/src/db.rs
index a952562..b1ff410 100644
--- a/src/db.rs
+++ b/src/db.rs
@@ -1,4 +1,4 @@
-use std::time::SystemTime;
+use std::{collections::HashMap, hash::Hash, time::SystemTime};
 
 use sqlx::{sqlite::SqliteQueryResult, Pool, Sqlite};
 
@@ -136,16 +136,20 @@ pub async fn delete_file(sqlconn: &Pool<Sqlite>, filename: String) -> Option<u64
 
     if result.is_err() {
         // If error, return none
-        return None;
+        None
     } else {
-        return Some(result.unwrap().rows_affected());
+        Some(result.unwrap().rows_affected())
     }
 }
 
 // Updating a files viewcount and accesstime.
 // This receives the a String with the filename, i32 unix timestamp (and sqlpool)
 // This returns a Some(u64), with the number of rows affected.
-pub async fn update_fileview(sqlconn: &Pool<Sqlite>, filename: String, accessed: i32) -> Option<u64> {
+pub async fn update_fileview(
+    sqlconn: &Pool<Sqlite>,
+    filename: String,
+    accessed: i32,
+) -> Option<u64> {
     let result = sqlx::query!(
         "UPDATE files SET accessed = ?, views = views + 1 WHERE file = ?",
         accessed,
@@ -156,9 +160,9 @@ pub async fn update_fileview(sqlconn: &Pool<Sqlite>, filename: String, accessed:
 
     if result.is_err() {
         // If error, return none
-        return None;
+        None
     } else {
-        return Some(result.unwrap().rows_affected());
+        Some(result.unwrap().rows_affected())
     }
 }
 
@@ -172,10 +176,10 @@ pub async fn get_accesss_time(sqlconn: &Pool<Sqlite>, filename: String) -> i32 {
     if result.is_err() {
         // If result is an error, very likely this is the first upload of the file, so lets return the current time!
         // If it isn't, we're in trouble.
-        return SystemTime::now()
+        SystemTime::now()
             .duration_since(SystemTime::UNIX_EPOCH)
             .unwrap()
-            .as_secs() as i32;
+            .as_secs() as i32
     } else {
         // This will only panic if the i64 doesn't fit into the i32
         // I guess I'll have to patch this in 2038 anyway.
@@ -229,10 +233,9 @@ fn update_expiry_override() {
     // implement this I guess.
 }
 
-// File Statistics
-
+// Globally important stats.
 // This function counts the number of files that haven't been deleted yet.
-pub async fn get_total_files_live(sqlconn: &Pool<Sqlite>) -> Option<i32> {
+pub async fn get_total_files_alive(sqlconn: &Pool<Sqlite>) -> Option<i32> {
     // SELECT COUNT(*) FROM files WHERE isDeleted == 0
     let result = sqlx::query!(
         "SELECT COUNT(*) as count 
@@ -243,10 +246,11 @@ pub async fn get_total_files_live(sqlconn: &Pool<Sqlite>) -> Option<i32> {
     .await;
 
     if result.is_err() {
-        // If error, return none
-        return None;
+        // If error, return none and log.
+        tracing::error!("Problem getting live file count: {:?}", result);
+        None
     } else {
-        return Some(result.unwrap().count as i32);
+        Some(result.unwrap().count as i32)
     }
 }
 
@@ -262,9 +266,83 @@ pub async fn get_total_files_dead(sqlconn: &Pool<Sqlite>) -> Option<i32> {
     .await;
 
     if result.is_err() {
-        // If error, return none
-        return None;
+        // If error, return none and log.
+        tracing::error!("Problem getting total file count: {:?}", result);
+        None
+    } else {
+        Some(result.unwrap().count as i32)
+    }
+}
+
+// This function counts the total files that have been uploaded per ip.
+// This returns a hashmap<String, i32> containing the IP, and the number of files uploaded.
+pub async fn get_total_uploads_ip(sqlconn: &Pool<Sqlite>) -> Option<HashMap<String, i32>> {
+    // SELECT ip, COUNT(file) as count FROM files GROUP BY ip ORDER BY COUNT;
+    let result = sqlx::query!(
+        "SELECT ip, COUNT(file) as filecount
+            FROM files 
+            GROUP BY ip 
+            ORDER BY filecount",
+    )
+    .fetch_all(sqlconn)
+    .await;
+
+    let mut ipcount: HashMap<String, i32> = HashMap::new();
+
+    if result.is_err() {
+        // If Error, return none and log.
+        tracing::error!("Problem getting total files by ip: {:?}", result);
+        None
+    } else {
+        for row in result.unwrap() {
+            ipcount.insert(row.IP, row.filecount.expect("You shouldn't see this - this means there was an ip with 0 uploaded files in the db. HOW??"));
+        }
+        Some(ipcount)
+    }
+}
+
+// This function queries the db for the total filesize of all alive files.
+// Returns a Option<u128> because why not, an i32 isn't big enough anyway.
+pub async fn total_alive_filesize(sqlconn: &Pool<Sqlite>) -> Option<u128> {
+    let mut total_filesize: u128 = 0;
+    let result = sqlx::query!(
+        "SELECT filesize
+            FROM files 
+            WHERE isDeleted == 0"
+    )
+    .fetch_all(sqlconn)
+    .await;
+    if result.is_err() {
+        // If Error, return none and log.
+        tracing::error!("Problem getting total files by ip: {:?}", result);
+        None
+    } else {
+        for row in result.unwrap() {
+            total_filesize += row.filesize as u128;
+        }
+        Some(total_filesize)
+    }
+}
+
+// This function queries the db for the total filesize of all alive files.
+// Returns a Option<u128> because why not, an i32 isn't big enough anyway.
+pub async fn total_dead_filesize(sqlconn: &Pool<Sqlite>) -> Option<u128> {
+    let mut total_dead_filesize: u128 = 0;
+    let result = sqlx::query!(
+        "SELECT filesize
+            FROM files 
+            WHERE isDeleted == 1"
+    )
+    .fetch_all(sqlconn)
+    .await;
+    if result.is_err() {
+        // If Error, return none and log.
+        tracing::error!("Problem getting total files by ip: {:?}", result);
+        None
     } else {
-        return Some(result.unwrap().count as i32);
+        for row in result.unwrap() {
+            total_dead_filesize += row.filesize as u128;
+        }
+        Some(total_dead_filesize)
     }
-}
\ No newline at end of file
+}
diff --git a/src/engine.rs b/src/engine.rs
index ebb35a2..6abeb86 100644
--- a/src/engine.rs
+++ b/src/engine.rs
@@ -63,9 +63,9 @@ pub async fn generate_adminkey(sqlconn: &Pool<Sqlite>) -> String {
     let result = db::check_adminkey(sqlconn, adminkey.clone()).await;
     if result.is_some() {
         // Adminkey resolves to a filename, so we better regenerate it.
-        let adminkey = nanoid!(size);
+        return nanoid!(size);
     }
-    return adminkey;
+    adminkey
 }
 
 pub async fn calculate_expiry(sqlconn: &Pool<Sqlite>, filename: String, filesize: i32) -> i32 {
diff --git a/src/main.rs b/src/main.rs
index 56532d0..4089826 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1,4 +1,3 @@
-
 use once_cell::sync::OnceCell;
 use ramhorns::{Content, Ramhorns};
 use salvo::fs::NamedFile;
@@ -11,10 +10,10 @@ use chrono::{TimeZone, Utc};
 use config::Config;
 use lazy_static::lazy_static;
 use rand::Rng;
+use std::fs;
 use std::fs::create_dir_all;
 use std::path::Path;
-use std::time::{Duration, SystemTime};
-use std::fs;
+use std::time::{Duration, Instant, SystemTime};
 use tokio::{task, time};
 use tracing_subscriber::filter::EnvFilter;
 use tracing_subscriber::fmt;
@@ -460,6 +459,64 @@ async fn serve_static(req: &mut Request, res: &mut Response) {
     }
 }
 
+#[handler]
+async fn serve_metrics(req: &mut Request, res: &mut Response) {
+    // Lets start timing this:
+    let start = Instant::now();
+
+    // Setup db pool
+    let sqlconn = SQLITE.get().unwrap();
+    // Setup the massive string of metrics
+    let mut rendered = String::new();
+
+    // Add total number of files
+    rendered = format!(
+        "total_alive {}\n",
+        db::get_total_files_alive(sqlconn).await.unwrap()
+    );
+
+    // Add total number of files dead
+    rendered = format!(
+        "{}total_dead {}\n",
+        rendered,
+        db::get_total_files_dead(sqlconn).await.unwrap()
+    );
+
+    // Counting how many files each IP has uploaded.
+    let mut metrics = db::get_total_uploads_ip(sqlconn).await;
+    // Loop through each IP and render it as a new line with a label for each IP
+    for metric in metrics.as_mut().unwrap() {
+        rendered = format!(
+            "{}filecount_ip{{ip={}}} {}\n",
+            rendered,
+            &metric.0.to_string(),
+            &metric.1.to_string()
+        );
+    }
+
+    rendered = format!(
+        "{}filesize_alive {}\n",
+        rendered,
+        db::total_alive_filesize(sqlconn).await.unwrap()
+    );
+    rendered = format!(
+        "{}filesize_dead {}\n",
+        rendered,
+        db::total_dead_filesize(sqlconn).await.unwrap()
+    );
+
+    // Add how long it took to get all of those metrics to the page!
+    let end = Instant::now();
+    rendered = format!(
+        "{}render_time {}\n",
+        rendered,
+        end.duration_since(start).as_nanos().to_owned()
+    );
+
+    // Actually render the final metrics page
+    res.render(Text::Plain(rendered));
+}
+
 #[tokio::main]
 async fn main() {
     // Logging
@@ -486,8 +543,8 @@ async fn main() {
         tracing::info!("Using Postgres backend");
         // We're gonna need to load /all/ the postgres stuff up here
         let psql = CONFIG
-        .get_string("operations.postgres_url")
-        .expect("Couldn't find 'postgres_url' in config. :(");
+            .get_string("operations.postgres_url")
+            .expect("Couldn't find 'postgres_url' in config. :(");
         // let pool = PgPool::connect(&psql).await;
         // TODO: figure out how to use sqlx::Pool struct https://docs.rs/sqlx/latest/sqlx/struct.Pool.html
         // This is on hold until https://github.com/launchbadge/sqlx/issues/964
@@ -519,6 +576,7 @@ async fn main() {
         .push(Router::with_path("/faq").get(serve_static))
         .push(Router::with_path("/dmca").get(serve_static))
         .push(Router::with_path("/welcome").get(serve_static))
+        .push(Router::with_path("/metrics").get(serve_metrics))
         .push(Router::with_path("/favicon.ico").get(StaticFile::new("static/favicon32.webp")))
         // Static File Serving
         .push(
-- 
GitLab