diff --git a/README.md b/README.md index 702c3fc58bcaf2c50213b508543eab74127d8631..82aeefdcd3a3f156177bb0772dd4cd68446d065d 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,6 @@ HOST=127.0.0.1 PORT=8282 The above are the defaults, and you SHOULD be changing them. - #### Native (debian) TODO: Update for baked in templating - installation with a custom template requires a recompile. diff --git a/docs/DB.md b/docs/DB.md index e27cbf517e176e0e53fc878a9aa0b62fa97290e2..af1287924467be33a2fb4c37001022197abcc86b 100644 --- a/docs/DB.md +++ b/docs/DB.md @@ -34,13 +34,6 @@ Database should store everything in 1 db, multiple 2 or 3, because easy. Hopefully we can move away from having a separate stats table, and use metrics generated from the files table instead. (or maybe not for performance - might be cheaper to keep the stats table and update it every minute or so.) -## PostgreSQL installation - -1. `psql -U <admin>` -2. `CREATE DATABASE ephemeral;` -3. `CREATE USER ephemeral WITH ENCRYPTED PASSWORD 'yourpass';` -4. `GRANT ALL PRIVILEGES ON DATABASE ephemeral TO ephemeral;` - ## SQL Commands -- Files are stored here! @@ -65,4 +58,4 @@ CREATE TABLE "qrscan" ( IP TEXT NOT NULL, useragent TEXT NOT NULL, version INTEGER NOT NULL -); \ No newline at end of file +); diff --git a/docs/info.md b/docs/info.md index b05d722debccc369e7779a6ac7a8166c647805ff..52622de0c30f4cb56e6c94e2c430be52c34689aa 100644 --- a/docs/info.md +++ b/docs/info.md @@ -50,3 +50,4 @@ There's a way to get it working, but im not smart enough to figure it out. # TODO's TODO: See how many .unwraps I can handle to make this thing potentially crash less, and offer more decent errors. +TODO: Test the guess_ip function behind nginx properly, I don't know if ipv6/unix socket works at all, or if they're passed through the ipv4. diff --git a/src/db.rs b/src/db.rs index a5a16a3bdd82ea1f0487b095a8fc0661d0ed7dc9..e71858962a42149a9f85eed62db15bb6875c06fc 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,33 +1,101 @@ -use sqlx::{Sqlite, Pool}; +use sqlx::{Pool, Row, Sqlite, Executor, Execute}; +pub async fn create_db(sqlconn: &Pool<Sqlite>) { + let result = sqlx::query( + "CREATE TABLE IF NOT EXISTS 'files' ( + file TEXT PRIMARY KEY, + filetype TEXT NOT NULL, + expiry INTEGER NOT NULL, + expiry_override INTEGER NOT NULL, + views INTEGER DEFAULT '0', + isDeleted INTEGER DEFAULT '0', + adminkey TEXT NOT NULL, + accessed INTEGER NOT NULL, + filesize INTEGER NOT NULL, + IP TEXT NOT NULL, + domain TEXT NOT NULL);") + .execute(sqlconn); + + let qrresult = sqlx::query( + "CREATE TABLE 'qrscan' ( + scanid INTEGER PRIMARY KEY, + time INTEGER NOT NULL, + IP TEXT NOT NULL, + useragent TEXT NOT NULL, + version INTEGER NOT NULL + );") + .execute(sqlconn); + tracing::info!("Created Databases!"); +} + // Adding a file to the database -pub fn add_file(sqlthingy: &Pool<Sqlite>, - filename: String, - filetype: String, // Optional, only if there is a filetype. - expiry: i32, - expiry_override: Option<i32>, // Optional - for future use where you can specify expiry at upload. - adminkey: String, - filesize: i32, // I dread calculating this. - ip: &str, // set to the end-user IP of the upload request. - domain: &str, // set to the HOST header of the upload request. - ) { +pub async fn add_file( + sqlconn: &Pool<Sqlite>, + filename: String, + filetype: String, // Optional, only if there is a filetype. + expiry: i32, + expiry_override: Option<i32>, // Optional - for future use where you can specify expiry at upload. + adminkey: String, + filesize: i32, // I dread calculating this. + ip: String, // set to the end-user IP of the upload request. + domain: &str, // set to the HOST header of the upload request. +) -> Option<bool> { + tracing::debug!("add_file(We are now in add_file!)"); + + let result = sqlx::query( + "INSERT INTO files ( + file, + filetype, + expiry, + expiry_override, + adminkey, + filesize, + ip, + domain) + VALUES ( ?,?,?,?,?,?,?,? )", + ) + .bind(filename) + .bind(filetype) + .bind(expiry) + .bind(expiry_override) + .bind(adminkey) + .bind(filesize) + .bind(ip) + .bind(domain) + .execute(sqlconn) + .await; + tracing::debug!("add_file(Added file to the database)"); + Some(true) } // Marking a file as deleted -fn delete_file(sqlthingy: &Pool<Sqlite>) { - // implement this I guess. +pub async fn delete_file(sqlconn: &Pool<Sqlite>, adminkey: String) { + tracing::debug!("delete_file(adminkey: {})", adminkey); + // Delete file if the adminkey matches the filename. + let filename = sqlx::query("SELECT file FROM files WHERE adminkey = ?") + .bind(&adminkey) + .fetch_one(sqlconn) + .await; + + let f = filename.unwrap().try_get_raw("file").unwrap(); + + // tracing::info!("Deleting file: {:?} with adminkey: {})",f2, adminkey); + + let result = sqlx::query("DELETE FROM files WHERE adminkey = ?") + .bind(&adminkey) + .execute(sqlconn) + .await; } // Updating a files viewcount -fn update_fileview(sqlthingy: &Pool<Sqlite>) { +fn update_fileview(sqlconn: &Pool<Sqlite>) { // implement this I guess. // filecount = filecount + 1; } // Returns the unix timestamp of the last access - 0 if unviewed. -fn get_accesss_time(sqlthingy: &Pool<Sqlite>, filename: String) -> i32 { - - return 0 +fn get_accesss_time(sqlconn: &Pool<Sqlite>, filename: String) -> i32 { + return 0; } // Generating a list of files that should be deleted @@ -56,4 +124,4 @@ fn get_total_files_live() { fn get_total_files_dead() { // SELECT COUNT(*) FROM files WHERE isDeleted == 1 -} \ No newline at end of file +} diff --git a/src/lib.rs b/src/lib.rs index add7bb91a378386c1da402b93e4c734f46963de2..58d22d731abf028dbe88960e23aec1a92ba95af4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,12 @@ //////// /// This is just for the ugly functions that we don't really need in the main.rs file. /// I don't really know what I'm doing, so this makes it looks like im a better developer than I actually am. -//////// - +//////// +use std::net::SocketAddr; use nanoid::nanoid; -use sqlx::{Sqlite, Pool}; +use salvo::{hyper::header, Request}; +use sqlx::{Pool, Sqlite}; // Generate a random name for the uploaded file. pub fn generate_filename(filename: String) -> String { @@ -19,8 +20,7 @@ pub fn generate_filename(filename: String) -> String { // This works because a file like '.bleh' separates into ["", "bleh"], while 'bleh' separates into ["bleh"] if v.len() == 1 { // Length is 1, meaning no file extension, so go ahead and generate a 'size' nanoid! - nanoid!(size) - + nanoid!(size) } else { // Take the last 'element', and add that to the end of a nanoid. let newfilename = nanoid!(size) + "." + v.last().unwrap(); @@ -39,7 +39,6 @@ pub fn get_filetype(filename: String) -> Option<String> { if v.len() == 1 { // Length is 1, meaning no file extension, so lets return 'None' None - } else { // Length is > 1, maning there is a file extension. so lets return it. (or there's 0, and this filename is empty somehow) Some(v.last().unwrap().to_string()) @@ -53,7 +52,7 @@ pub fn generate_deletionkey() -> String { nanoid!(size) } -pub fn calculate_expiry(sqlthingy: &Pool<Sqlite>, filename: String, filesize: i32) -> Option<i32> { +pub fn calculate_expiry(sqlthingy: &Pool<Sqlite>, filename: String, filesize: i32) -> i32 { // TODO: Get config stuffs, so we can figure out what engine mode! let engine = 1; @@ -67,7 +66,7 @@ pub fn calculate_expiry(sqlthingy: &Pool<Sqlite>, filename: String, filesize: i3 std::process::exit(2); } }; - return expiry; + expiry.unwrap() } fn engine_1(sqlthingy: &Pool<Sqlite>, filename: String) -> i32 { @@ -75,17 +74,86 @@ fn engine_1(sqlthingy: &Pool<Sqlite>, filename: String) -> i32 { // TODO: Read this from config const file_expiry_min: i32 = 7; // Find the last time the file was viewed. - - return 1 + return 1; } fn engine_2(sqlthingy: &Pool<Sqlite>, filename: String, filesize: i32) -> i32 { // Do actual calculation. - return 2 + return 2; } fn engine_3(sqlthingy: &Pool<Sqlite>, filename: String, filesize: i32) -> i32 { // Do actual calculation. - return 3 -} \ No newline at end of file + return 3; +} + +pub fn guess_ip(req: &mut Request) -> String { + // This function returns what the best guess of the Client's IP address is. + // This isn't perfect, it won't magically figure out VPN's or proxies. It's + // just to figure out the incoming ip address, for legal and statistical reasons. + + // Basically just harcoded, priortise Cloudflare headers, then use nginx headers + // then remote_addr header + + // Re-clone the headers - because the ip might be in there, sometimes. + let headers = req.headers().clone(); + if headers.get("CF-Connecting-IP").is_none() { + // Either there was a problem with the cloudflare header, or there is no cloudflare. + tracing::debug!( + "guess_ip(No cloudflare header detected!, falling back to X-Real-IP header)" + ); + if headers.get("X-Real-IP").is_none() { + tracing::debug!("guess_ip(No X-Real-IP detected!, falling back to remote_addr header)"); + // Okay if that failed we just give up and return a remote_addr + let ip = req.remote_addr().unwrap().clone(); + // thanks trev + if ip.as_ipv4().is_none() { + tracing::debug!("guess_ip(We ain't using ipv4.)"); + if ip.as_ipv6().is_none() { + tracing::debug!("guess_ip(We ain't using ipv6.)"); + if ip.as_unix().is_none() { + } else { + tracing::error!( + "This request came from a unix socket, How the hell? {:?}", + req + ); + } + } else { + tracing::debug!("guess_ip(we out here using ipv4.)"); + let ip_address = ip.as_ipv6().map(|ip| ip.ip()); + tracing::debug!("guess_ip(ip_address): {:?}", ip_address); + return ip_address.unwrap().to_string(); + } + } else { + tracing::debug!("guess_ip(we out here using ipv4.)"); + let ip_address = ip.as_ipv4().map(|ip| ip.ip()); + tracing::debug!("guess_ip(ip_address): {:?}", ip_address); + return ip_address.unwrap().to_string(); + } + } else { + return headers + .get("X-Real-IP") + .unwrap() + .to_str() + .unwrap() + .to_string(); + } + } else { + // wow this is ugly. + return headers + .get("CF-Connecting-IP") + .unwrap() + .to_str() + .unwrap() + .to_string(); + } + + // tracing::debug!("guess_ip(ip): {:?}", ip); + + // ip.to_str().unwrap().to_string() + // Unable to determine ip, use 0.0.0.0 + tracing::debug!("guess_ip(headers): {:?}", headers); + tracing::error!("guess_ip(Failed to guess ip, falling back to 0.0.0.0)"); + "0.0.0.0".to_string() +} diff --git a/src/main.rs b/src/main.rs index 98384f809d45b95f9e6bfd69476861d834a61018..77070c07f274380e3b4a426fab855d7cefd5b993 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,22 +1,22 @@ #[macro_use] extern crate tera; -use sqlx::SqlitePool; use once_cell::sync::OnceCell; use salvo::hyper::header::HOST; use salvo::prelude::*; use salvo::serve_static::StaticDir; +use sqlx::SqlitePool; -use std::{env, fs}; use std::fs::create_dir_all; use std::path::Path; +use std::{env, fs}; +use tracing_subscriber::filter::{EnvFilter, LevelFilter}; use tracing_subscriber::fmt; use tracing_subscriber::prelude::*; -use tracing_subscriber::filter::{EnvFilter, LevelFilter}; // Import sub-modules. -mod lib; mod db; +mod lib; static SQLITE: OnceCell<SqlitePool> = OnceCell::new(); @@ -66,16 +66,27 @@ async fn upload(req: &mut Request, res: &mut Response) { let path = Path::new(file.path()); let filesize = fs::metadata(path).unwrap().len() as i32; tracing::debug!("upload(filesize): {:?}", filesize); - let expiry = lib::calculate_expiry(sqlthingy, filename, filesize); + let expiry = lib::calculate_expiry(sqlthingy, filename.clone(), filesize); tracing::debug!("upload(expiry): {:?}", expiry); - // This aint work - // let ip = &headers["IP"]; - - // Add all that cool stuff into the DB! - // Grab a sqlite thing from the pool. - - // db::add_file(&sqlthingy, filename, filetype, expiry, expiry_override, adminkey, filesize, ip, host); + // Determine what ip type it is. + tracing::debug!("upload(headers): {:?}", headers); + let ip = lib::guess_ip(req); + tracing::debug!("upload(ip): {:?}", ip); + + let expiry_override = Some(0); + + db::add_file( + sqlthingy, + filename, + filetype, + expiry, + expiry_override, + adminkey, + filesize, + ip, + host, + ).await; tracing::info!("File uploaded to {}", dest); res.render(Text::Plain("File upload sucess!")); @@ -106,6 +117,9 @@ async fn main() { // Sets the db pool to the static thingy, so we can access it /anywhere!/ SQLITE.set(pool).unwrap(); + // Create the tables if they don't already exist + db::create_db(SQLITE.get().unwrap()).await; + // Attempt to create the files directory create_dir_all("files").unwrap(); let router = Router::new() @@ -148,4 +162,4 @@ static INDEX_HTML: &str = r#"<!DOCTYPE html> </form> </body> </html> -"#; \ No newline at end of file +"#;