diff --git a/README.md b/README.md
index be34498ed913c1795c3e0b659fa3a19f32f1af58..3069e71b5f762e30f1cd07241c4b3a5d1b8cf28d 100644
--- a/README.md
+++ b/README.md
@@ -32,21 +32,9 @@ Eventually we'll have 2 different environments, development and production, each
 
 ### Configuration Options
 
-Configuration is done by settings environment variables in the launch command.
-(This definitely isn't because I cannot be bothered implementing a proper config file in code.)
-
-| Variable         | Default Value |                                                                                  |
-|------------------|---------------|----------------------------------------------------------------------------------|
-| HOST             | 0.0.0.0       | What IP the application listens on                                               |
-| PORT             | 8282          | What port the application listens on.                                            |
-| EPHEMERAL_LOG    |               | (Required) Sets the log level output                                             |
-| ENGINE           | 2             | Sets the engine mode                                                             |
-| CLEANER_INTERVAL | 1800          | How long the cleaner task runs, in seconds.                                      |
-| FILE_EXPIRY_MIN  | 7             | (Depends on engine setting) The Minimum a file /should/ exist on the server for. |
-| FILE_EXPIRY_MAX  | 365           | (Depends on engine setting) The Longest a file /should/ exist on the server for. |
-| MAX_FILESIZE*     | 1073741824    | The 'cap' for calculating expiry.                                               |
-
-\*MAX_FILESIZE doesn't actually set the maximum allowed filesize, it's only used for calculating the expiry.
+Configuration options are available in the `config.toml' file.
+They're all fairly self explanatory with nice comments.
+Just make sure to restart your server to see the changes. I definitely have wasted a few hours bug-fixing in vain.
 
 ## Wierd Security things you should probably be aware of
 
diff --git a/docs/info.md b/docs/info.md
index eb519a6aa3e320d59a5389d875a16b69ed099140..6cce51cd732eeb9a50ba7c1b11dd57957ccdce16 100644
--- a/docs/info.md
+++ b/docs/info.md
@@ -26,23 +26,18 @@ Deleted files should no longer show up in most stats generated, since storing th
 
 ### Filetype Stats (Pick a filetype from the list, or even multiple?)
 
-- Total number of files uploaded (by x filetype)
-- Total number of files alive /right now/ (by x filetype)
+- Total number of alive files uploaded (by x filetype) `total_alive{filetype=mp4}`
+- Total number of dead files (by x filetype) `total_dead{filetype=mp4}`
+- Total filesize of alive files `filesize_alive{filetype=mp4}`
+- Total filesize of dead files `filesize_dead{filetype=mp4}`
+- Total number of views for dead files `views_dead{filetype=mp4}`
+- Total number of views for alive files `(count(file{views}))`
 - Filesize Graph (Average/total? filesize per filetype)
 - Filesize Graph (Filesize vs lifetime)
 
 ### File Stats (Pick a individual file from a list, or even multiple?)
 
-- File views over time
-- Filesize Graph (Filesize vs lifetime)
-
-### View Stats
-
-- Total number of views total
-- Total number of views by x filetype
-- View count of files uploaded.
-- View count of files grouped by x filetype
-- View count of files uploaded total.
+- File Stats `file{file=hUMZCp.jpg filesize=2345677 filetype=jpg views=123, expiry=11111111}`
 
 ### Malicious/Error Stats
 
diff --git a/src/db.rs b/src/db.rs
index b1ff410f017952c07d86530ed108adc601424780..f50c9c65d45d9943b3b826d2ed77b48c80b91d99 100644
--- a/src/db.rs
+++ b/src/db.rs
@@ -1,6 +1,15 @@
 use std::{collections::HashMap, hash::Hash, time::SystemTime};
 
-use sqlx::{sqlite::SqliteQueryResult, Pool, Sqlite};
+use sqlx::{sqlite::SqliteQueryResult, Pool, Sqlite, query};
+
+// This struct is used to store values for metrics file stats.
+pub struct FileMetric {
+    pub filename: String,
+    pub filesize: i64,
+    pub filetype: String,
+    pub views: i64,
+    pub expiry: i64
+}
 
 // Adding a file to the database
 // TODO: Fix panic on fileadd with same filename (even if isDeleted) (UNIQUE constraint)
@@ -234,45 +243,6 @@ fn update_expiry_override() {
 }
 
 // Globally important stats.
-// This function counts the number of files that haven't been deleted yet.
-pub async fn get_total_files_alive(sqlconn: &Pool<Sqlite>) -> Option<i32> {
-    // SELECT COUNT(*) FROM files WHERE isDeleted == 0
-    let result = sqlx::query!(
-        "SELECT COUNT(*) as count 
-            FROM files 
-            WHERE isDeleted = 0",
-    )
-    .fetch_one(sqlconn)
-    .await;
-
-    if result.is_err() {
-        // If error, return none and log.
-        tracing::error!("Problem getting live file count: {:?}", result);
-        None
-    } else {
-        Some(result.unwrap().count as i32)
-    }
-}
-
-// This function counts the number of files that have been deleted.
-pub async fn get_total_files_dead(sqlconn: &Pool<Sqlite>) -> Option<i32> {
-    // SELECT COUNT(*) FROM files WHERE isDeleted == 1
-    let result = sqlx::query!(
-        "SELECT COUNT(*) as count 
-            FROM files 
-            WHERE isDeleted = 1",
-    )
-    .fetch_one(sqlconn)
-    .await;
-
-    if result.is_err() {
-        // If error, return none and log.
-        tracing::error!("Problem getting total file count: {:?}", result);
-        None
-    } else {
-        Some(result.unwrap().count as i32)
-    }
-}
 
 // This function counts the total files that have been uploaded per ip.
 // This returns a hashmap<String, i32> containing the IP, and the number of files uploaded.
@@ -346,3 +316,123 @@ pub async fn total_dead_filesize(sqlconn: &Pool<Sqlite>) -> Option<u128> {
         Some(total_dead_filesize)
     }
 }
+
+// This function queries db for the number of alive files, grouped by filetype.
+// Returns a Vec containing the filetype as a String, and the number of alive files.
+pub async fn total_alive_filetype(sqlconn: &Pool<Sqlite>) -> Option<HashMap<String, i32>> {
+    let result = sqlx::query!(
+        "SELECT COUNT(file) as filecount, filetype 
+            FROM files 
+            WHERE isdeleted == 0 
+            GROUP BY filetype",
+    )
+    .fetch_all(sqlconn)
+    .await;
+
+    let mut filecount: HashMap<String, i32> = HashMap::new();
+
+    if result.is_err() {
+        // If Error, return none and log.
+        tracing::error!("Problem getting total alive files by ip: {:?}", result);
+        None
+    } else {
+        for row in result.unwrap() {
+            filecount.insert(row.filetype.expect("Something went very wrong while getting the alive filetype count."),
+                 row.filecount);
+        }
+        Some(filecount)
+    }
+}
+
+// This function queries db for the number of dead files, grouped by filetype.
+// Returns a Hashmap containing the filetype as a String, and the number of dead files as i64.
+pub async fn total_dead_filetype(sqlconn: &Pool<Sqlite>) -> Option<HashMap<String, i64>> {
+    let result = sqlx::query!(
+        "SELECT COUNT(file) as filecount, filetype 
+            FROM files 
+            WHERE isdeleted == 1 
+            GROUP BY filetype",
+    )
+    .fetch_all(sqlconn)
+    .await;
+
+    let mut filecount: HashMap<String, i64> = HashMap::new();
+
+    if result.is_err() {
+        // If Error, return none and log.
+        tracing::error!("Problem getting total dead files by ip: {:?}", result);
+        None
+    } else {
+        for row in result.unwrap() {
+            filecount.insert(row.filetype.expect("Something went very wrong while getting the dead filetype count."),
+                 row.filecount);
+        }
+        Some(filecount)
+    }
+}
+
+// This fucntion queriest the db for number of dead files, grouped by filetype.
+// Returns a hashmap containing a String (filetype) and i32 (views).
+pub async fn get_dead_fileviews(sqlconn: &Pool<Sqlite>) -> Option<HashMap<String, i64>> {
+    let result = sqlx::query!(
+        "SELECT filetype, views 
+            FROM files 
+            WHERE isDeleted == 1
+            GROUP BY filetype"
+    )
+    .fetch_all(sqlconn)
+    .await;
+
+    let mut deadviews: HashMap<String, i64> = HashMap::new();
+
+    if result.is_err() {
+        // If Error, return none and log.
+        tracing::error!("Problem getting total views of dead files: {:?}", result);
+        None
+    } else {
+        for row in result.unwrap() {
+            deadviews.insert(row.filetype.expect("Something went very wrong while getting the dead filetype views."),
+                row.views.expect("Something went very wrong while getting the dead views"));
+        }
+        tracing::debug!("deadviews: {:?}", deadviews);
+        Some(deadviews)
+    }
+
+}
+
+// This function queries the db for the filesize for /each/ alive file. - We won't need to do this for dead files
+// since they were alive at some point, and when they are removed from the alive list, we don't really care.
+// This returns a Hashmap of the filename, filetype, and filesize inside a Vector. (How messy)
+// We want to group them by file, or filetype, or total in grafana, so we need to label it properly.
+pub async fn get_filemetrics(sqlconn: &Pool<Sqlite>) -> Option<Vec<FileMetric>> {
+    let result = sqlx::query!(
+        "SELECT file, filesize, filetype, views, expiry
+        FROM files 
+        WHERE isdeleted == 0",
+    )
+    .fetch_all(sqlconn)
+    .await;
+
+    let mut filevec: Vec<FileMetric> = Vec::new();
+
+    if result.is_err() {
+        // If Error, return none and log.
+        tracing::error!("Problem getting filesizes: {:?}", result);
+        None
+    } else {
+        for row in result.unwrap() {
+            // For each row (file), add it to the struct
+            let file = FileMetric {
+                filename: row.file,
+                filetype: row.filetype.unwrap(),
+                filesize: row.filesize,
+                views: row.views.unwrap(),
+                expiry: row.expiry
+            };
+            // Then add the struct to the Vec
+            filevec.push(file);
+        }
+        // Return vec or return nothing.
+        Some(filevec)
+    }
+}
\ No newline at end of file
diff --git a/src/main.rs b/src/main.rs
index 40898268f836604ffee6763282be8fecb4636284..feda152b9336c3064e24c2726a13044538e8a608 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -469,28 +469,15 @@ async fn serve_metrics(req: &mut Request, res: &mut Response) {
     // Setup the massive string of metrics
     let mut rendered = String::new();
 
-    // Add total number of files
-    rendered = format!(
-        "total_alive {}\n",
-        db::get_total_files_alive(sqlconn).await.unwrap()
-    );
-
-    // Add total number of files dead
-    rendered = format!(
-        "{}total_dead {}\n",
-        rendered,
-        db::get_total_files_dead(sqlconn).await.unwrap()
-    );
-
     // Counting how many files each IP has uploaded.
-    let mut metrics = db::get_total_uploads_ip(sqlconn).await;
+    let mut ipcount = db::get_total_uploads_ip(sqlconn).await;
     // Loop through each IP and render it as a new line with a label for each IP
-    for metric in metrics.as_mut().unwrap() {
+    for ipc in ipcount.as_mut().unwrap() {
         rendered = format!(
             "{}filecount_ip{{ip={}}} {}\n",
             rendered,
-            &metric.0.to_string(),
-            &metric.1.to_string()
+            &ipc.0.to_string(),
+            &ipc.1.to_string()
         );
     }
 
@@ -505,10 +492,66 @@ async fn serve_metrics(req: &mut Request, res: &mut Response) {
         db::total_dead_filesize(sqlconn).await.unwrap()
     );
 
+    // // Counting how many alive files have been uploaded per filetype
+    // let mut afilecount = db::total_alive_filetype(sqlconn).await;
+    // // Loop through each filetype and render it as a new line with a label for each type
+    // for afc in afilecount.as_mut().unwrap() {
+    //     rendered = format!(
+    //         "{}total_alive{{filetype={}}} {}\n",
+    //         rendered,
+    //         &afc.0.to_string(),
+    //         &afc.1.to_string()
+    //     );
+    // }
+
+    // Counting how many dead files have been uploaded per filetype
+    let mut dfilecount = db::total_dead_filetype(sqlconn).await;
+    // Loop through each filetype and render it as a new line with a label for each type
+    for dfc in dfilecount.as_mut().unwrap() {
+        rendered = format!(
+            "{}total_dead{{filetype={}}} {}\n",
+            rendered,
+            &dfc.0.to_string(),
+            &dfc.1.to_string()
+        );
+    }
+
+    // This is a pain, we're grabbing the individual file stats and parsing them for each file.
+    let filevec = db::get_filemetrics(sqlconn).await;
+    // Add a newline here so we can keep it pretty.
+    rendered = format!("{}\n", rendered);
+    // For each line in the Vec.
+    for file in filevec.unwrap() {
+        // Add the file to the rendered String :)
+        rendered = format!(
+            "{}file{{filename={} filesize={} filetype={} views={} expiry={}}}\n",
+            rendered,
+            file.filename,
+            file.filesize,
+            file.filetype,
+            file.views,
+            file.expiry,
+        );
+    }
+
+    // Getting the number of views for all dead filetypes.
+    let mut deadfileview = db::get_dead_fileviews(sqlconn).await;
+    // Add a newline here so we can keep it pretty.
+    rendered = format!("{}\n", rendered);
+    // Loop through each filetype and render it as a new line with a label for each type
+    for dfv in deadfileview.as_mut().unwrap() {
+        rendered = format!(
+            "{}views_dead{{views={}}} {}\n",
+            rendered,
+            &dfv.0.to_string(),
+            &dfv.1.to_string()
+        );
+    }
+
     // Add how long it took to get all of those metrics to the page!
     let end = Instant::now();
     rendered = format!(
-        "{}render_time {}\n",
+        "{}\nrender_time {}\n",
         rendered,
         end.duration_since(start).as_nanos().to_owned()
     );