|
|
|
@ -5,7 +5,7 @@ use salvo::prelude::*;
|
|
|
|
|
use salvo::serve_static::{StaticDir, StaticFile};
|
|
|
|
|
use sqlx::SqlitePool;
|
|
|
|
|
|
|
|
|
|
use chrono::{DateTime, TimeZone, Utc};
|
|
|
|
|
use chrono::{TimeZone, Utc};
|
|
|
|
|
use rand::Rng;
|
|
|
|
|
use std::fs::create_dir_all;
|
|
|
|
|
use std::path::Path;
|
|
|
|
@ -15,12 +15,23 @@ use tokio::{task, time};
|
|
|
|
|
use tracing_subscriber::filter::EnvFilter;
|
|
|
|
|
use tracing_subscriber::fmt;
|
|
|
|
|
use tracing_subscriber::prelude::*;
|
|
|
|
|
use config::Config;
|
|
|
|
|
use lazy_static::lazy_static;
|
|
|
|
|
|
|
|
|
|
// Import sub-modules.
|
|
|
|
|
mod db;
|
|
|
|
|
mod engine;
|
|
|
|
|
|
|
|
|
|
// Setup the global sqlite db
|
|
|
|
|
static SQLITE: OnceCell<SqlitePool> = OnceCell::new();
|
|
|
|
|
// Setup the config globally because I can't figure out how to pass it to functions I don't call directly.
|
|
|
|
|
// This is evaluated at runtime, and not compilation. \o/
|
|
|
|
|
lazy_static! {
|
|
|
|
|
pub static ref CONFIG: Config = Config::builder()
|
|
|
|
|
.add_source(config::File::with_name("config.toml"))
|
|
|
|
|
.build()
|
|
|
|
|
.unwrap();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This is needed for templating, all the 'variables' go here!
|
|
|
|
|
#[derive(Content)]
|
|
|
|
@ -37,10 +48,9 @@ async fn index(req: &mut Request, res: &mut Response) {
|
|
|
|
|
// Get the headers (for Host header stuff thats needed later)
|
|
|
|
|
let headers = req.headers();
|
|
|
|
|
// build the path we need for the template.
|
|
|
|
|
let template_with_host = "./templates/".to_owned() + headers[HOST].to_str().unwrap();
|
|
|
|
|
let template_with_host = "./templates/".to_owned() + headers[HOST].to_str().unwrap_or_else(|_| "localhost:8282");
|
|
|
|
|
// Now we need to setup the templating engine.
|
|
|
|
|
// TODO: replace unwrap with error handling for templates not being found.
|
|
|
|
|
let tpls: Ramhorns = Ramhorns::from_folder(template_with_host).unwrap();
|
|
|
|
|
let tpls: Ramhorns = Ramhorns::from_folder(template_with_host).expect("Unable to find template, please place the templates correctly!");
|
|
|
|
|
let rendered = tpls.get("upload.html").unwrap().render(&"");
|
|
|
|
|
// Removed templating for debugging multiple template dirs - I should probably add it back in.
|
|
|
|
|
res.render(Text::Html(rendered));
|
|
|
|
@ -51,7 +61,8 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
|
|
|
|
|
let headers = req.headers();
|
|
|
|
|
let sqlconn = SQLITE.get().unwrap();
|
|
|
|
|
// Check if the filename exists in the DB
|
|
|
|
|
let filename: String = req.param("file").unwrap_or_default();
|
|
|
|
|
let filename: String = req.param("file").unwrap();
|
|
|
|
|
let filetype: String = engine::get_filetype(filename.clone()).unwrap_or("".to_string());
|
|
|
|
|
let valid = db::check_filename(sqlconn, filename.clone()).await;
|
|
|
|
|
|
|
|
|
|
if !valid {
|
|
|
|
@ -74,10 +85,42 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// override the mimetype if it's part of unsafe extensions
|
|
|
|
|
let r#unsafe = CONFIG.get_array("operations.unsafe_extensions").expect("Couldn't find 'unsafe_extensions' in config. :(");
|
|
|
|
|
for ext in r#unsafe {
|
|
|
|
|
// compare each value with the filetype.
|
|
|
|
|
if ext.clone().into_string().unwrap() == filetype.clone() {
|
|
|
|
|
tracing::info!("Unsafe Extension Filtered: {:?}", ext.clone().into_string().unwrap());
|
|
|
|
|
// Try overriding the content-type, otherwise throw an error.
|
|
|
|
|
let addheader = res.add_header("Content-Type", "text/plain", true);
|
|
|
|
|
if addheader.is_err() {
|
|
|
|
|
tracing::error!("Failed overwriting Content-Type {:?}", ext.clone().into_string().unwrap());
|
|
|
|
|
let template_with_host = "./templates/".to_owned() + headers[HOST].to_str().unwrap();
|
|
|
|
|
// Now we need to setup the templating engine.
|
|
|
|
|
// Yuck we need to setup a struct
|
|
|
|
|
let tpls: Ramhorns = Ramhorns::from_folder(template_with_host).unwrap();
|
|
|
|
|
let template = TemplateStruct {
|
|
|
|
|
domain: String::from(headers[HOST].to_str().unwrap()),
|
|
|
|
|
filename: String::from(""),
|
|
|
|
|
adminkey: String::from(""),
|
|
|
|
|
message1: String::from("Error 500: Internal Server Error"),
|
|
|
|
|
message2: String::from(
|
|
|
|
|
"This shouldn't happen, but it did. Tell the admin there was a problem overriding the content-type header.",
|
|
|
|
|
),
|
|
|
|
|
};
|
|
|
|
|
let rendered = tpls.get("error.html").unwrap().render(&template);
|
|
|
|
|
res.set_status_code(StatusCode::INTERNAL_SERVER_ERROR);
|
|
|
|
|
res.render(Text::Html(rendered));
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
// X-Accel-Redirect lets nginx serve the file directly, instead of us doing all that hard work.
|
|
|
|
|
let xsend = "/files/".to_string() + &filename.to_string();
|
|
|
|
|
res.add_header("X-Accel-Redirect", xsend, true).unwrap();
|
|
|
|
|
|
|
|
|
|
// Go through all the headers and print them out, just to check for now!
|
|
|
|
|
tracing::debug!("response headers: {:?}", res.headers());
|
|
|
|
|
|
|
|
|
|
// Get the current unix time
|
|
|
|
|
let accessed = SystemTime::now()
|
|
|
|
|
.duration_since(SystemTime::UNIX_EPOCH)
|
|
|
|
@ -86,10 +129,14 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
|
|
|
|
|
tracing::info!("New File View: {:?}", &filename.to_string());
|
|
|
|
|
|
|
|
|
|
db::update_fileview(sqlconn, filename.clone(), accessed).await;
|
|
|
|
|
// Recalculate expiry for some enginemodes
|
|
|
|
|
// Recalculate expiry for enginemode 1
|
|
|
|
|
// we don't need filesize here, so it's 0.
|
|
|
|
|
let filesize = 0;
|
|
|
|
|
// TODO: This will recalculate no matter what, even if engine mode is 2 :/
|
|
|
|
|
engine::calculate_expiry(sqlconn, filename.clone(), filesize).await;
|
|
|
|
|
|
|
|
|
|
// TODO: Add actual file serving from the disk HERE, since salvo's built-in way breaks content-type header.
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This takes the adminkey in, and deletes the file that matches it in the DB.
|
|
|
|
@ -97,7 +144,7 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
|
|
|
|
|
async fn delete_file(req: &mut Request, res: &mut Response) {
|
|
|
|
|
let headers = req.headers();
|
|
|
|
|
let sqlconn = SQLITE.get().unwrap();
|
|
|
|
|
let adminkey: &str = req.param("adminkey").unwrap_or_default();
|
|
|
|
|
let adminkey: &str = req.param("adminkey").unwrap();
|
|
|
|
|
tracing::debug!("delete_file(adminkey): {:?}", adminkey);
|
|
|
|
|
// Checks if the adminkey is valid, and the file is active.
|
|
|
|
|
let filename = db::check_adminkey(sqlconn, adminkey.to_string()).await;
|
|
|
|
@ -149,10 +196,32 @@ async fn upload(req: &mut Request, res: &mut Response) {
|
|
|
|
|
// Generate new filename.
|
|
|
|
|
// TODO: Do all the checks to make sure we actually want to generate a new filename (needs config working)
|
|
|
|
|
// Convert Option<&str> to Option<String> (and then generate a new filename for it)
|
|
|
|
|
let filename = engine::generate_filename(file.name().unwrap_or("file").to_string()).await;
|
|
|
|
|
// We should now check if the filename isn't already in the DB.
|
|
|
|
|
let filename = engine::generate_filename(CONFIG.clone(), file.name().unwrap_or("file").to_string()).await;
|
|
|
|
|
// TODO: We should now check if the filename isn't already in the DB.
|
|
|
|
|
|
|
|
|
|
// Grab the filetype from the filename
|
|
|
|
|
let filetype = engine::get_filetype(file.name().unwrap_or("file").to_string());
|
|
|
|
|
// Check if the filetype is on the 'banned' list
|
|
|
|
|
let banned = CONFIG.get_array("operations.banned_extensions").expect("Couldn't find 'banned_extensions' in config. :(");
|
|
|
|
|
for ext in banned {
|
|
|
|
|
// compare each value with the filetype.
|
|
|
|
|
if ext.clone().into_string().unwrap() == filetype.clone().unwrap() {
|
|
|
|
|
tracing::info!("Upload was blocked due to blocked extension: {:?}", ext.clone().into_string());
|
|
|
|
|
let template_with_host = "./templates/".to_owned() + headers[HOST].to_str().unwrap();
|
|
|
|
|
let tpls: Ramhorns = Ramhorns::from_folder(template_with_host).unwrap();
|
|
|
|
|
let template = TemplateStruct {
|
|
|
|
|
domain: String::from(headers[HOST].to_str().unwrap()),
|
|
|
|
|
filename: String::from(""),
|
|
|
|
|
adminkey: String::from(""),
|
|
|
|
|
message1: String::from("Error 403"),
|
|
|
|
|
message2: String::from("That filetype is not allowed."),
|
|
|
|
|
};
|
|
|
|
|
let rendered = tpls.get("error.html").unwrap().render(&template);
|
|
|
|
|
res.set_status_code(StatusCode::FORBIDDEN);
|
|
|
|
|
res.render(Text::Html(rendered));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let adminkey = engine::generate_adminkey(sqlconn).await;
|
|
|
|
|
|
|
|
|
|
tracing::debug!("upload(filename, adminkey): {:?}, {:?}", filename, adminkey);
|
|
|
|
@ -252,11 +321,11 @@ async fn upload(req: &mut Request, res: &mut Response) {
|
|
|
|
|
#[handler]
|
|
|
|
|
async fn serve_static(req: &mut Request, res: &mut Response) {
|
|
|
|
|
let headers = req.headers().clone();
|
|
|
|
|
let host = headers[HOST].to_str().unwrap();
|
|
|
|
|
let host = headers[HOST].to_str().unwrap_or_else(|_| "None");
|
|
|
|
|
match req.uri().path() {
|
|
|
|
|
"/services" => {
|
|
|
|
|
tracing::info!("New Request: /services");
|
|
|
|
|
let template_with_host = "./templates/".to_owned() + headers[HOST].to_str().unwrap();
|
|
|
|
|
let template_with_host = "./templates/".to_owned() + host;
|
|
|
|
|
let tpls: Ramhorns = Ramhorns::from_folder(template_with_host).unwrap();
|
|
|
|
|
let template = TemplateStruct {
|
|
|
|
|
domain: String::from(headers[HOST].to_str().unwrap()),
|
|
|
|
@ -342,8 +411,8 @@ async fn main() {
|
|
|
|
|
// TODO: Figure out how to make it default to INFO level.
|
|
|
|
|
// TODO: Disable salvo_extra::logging for info, add it on debug level instead.
|
|
|
|
|
tracing_subscriber::registry()
|
|
|
|
|
.with(fmt::layer())
|
|
|
|
|
.with(EnvFilter::from_env("EPHEMERAL_LOG"))
|
|
|
|
|
.with(fmt::layer().compact()) // Make sure the logging is pretty.
|
|
|
|
|
.with(EnvFilter::from_env("LOG")) // Grab the info from the envvar
|
|
|
|
|
.init();
|
|
|
|
|
|
|
|
|
|
// Set up DB Pool!
|
|
|
|
@ -351,11 +420,10 @@ async fn main() {
|
|
|
|
|
// Sets the db pool to the static thingy, so we can access it /anywhere!/
|
|
|
|
|
SQLITE.set(pool).unwrap();
|
|
|
|
|
|
|
|
|
|
// Setup the cleaner thread!
|
|
|
|
|
// Get the engine mode from the config
|
|
|
|
|
let interval = 1800; // 30 Minutes
|
|
|
|
|
// Will awaiting on this wait until the loop is finished? I hope not....
|
|
|
|
|
cleaner_thread(interval);
|
|
|
|
|
// Initialise the cleaner task
|
|
|
|
|
let interval = CONFIG.get_int("operations.cleaner_interval").expect("Couldn't find 'cleaner_interval' in config. :(");
|
|
|
|
|
tracing::info!("interval: {}", interval);
|
|
|
|
|
cleaner_thread(interval.try_into().expect("Cleaner interval was too long to fit in a i32.... wow"));
|
|
|
|
|
|
|
|
|
|
// Create the tables if they don't already exist
|
|
|
|
|
let (filesdb, qrscandb) = tokio::join!(
|
|
|
|
@ -398,8 +466,8 @@ async fn main() {
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Read environment variables for host and port
|
|
|
|
|
let host = env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_owned());
|
|
|
|
|
let port = env::var("PORT").unwrap_or_else(|_| "8282".to_owned());
|
|
|
|
|
let host = CONFIG.get_string("server.host").expect("Couldn't find 'host' in config. :(");
|
|
|
|
|
let port = CONFIG.get_int("server.port").expect("Couldn't find 'port' in config. :(");
|
|
|
|
|
let server_url = format!("{}:{}", host, port);
|
|
|
|
|
tracing::info!("Listening on http://{}", server_url);
|
|
|
|
|
Server::new(TcpListener::bind(&server_url))
|
|
|
|
@ -410,7 +478,7 @@ async fn main() {
|
|
|
|
|
// This spawns a tokio task to run a interval timer forever.
|
|
|
|
|
// the interval timer runs every 'period' seconds.
|
|
|
|
|
pub fn cleaner_thread(period: i32) {
|
|
|
|
|
let forever = task::spawn(async move {
|
|
|
|
|
let _forever = task::spawn(async move {
|
|
|
|
|
let mut interval = time::interval(Duration::from_secs(period.try_into().unwrap()));
|
|
|
|
|
let sqlconn = SQLITE.get().unwrap();
|
|
|
|
|
loop {
|
|
|
|
@ -422,10 +490,7 @@ pub fn cleaner_thread(period: i32) {
|
|
|
|
|
// For each file in old_files, delete them.
|
|
|
|
|
for file in old_files {
|
|
|
|
|
db::delete_file(sqlconn, file.clone()).await;
|
|
|
|
|
let files = engine::delete_file(file.clone()).await;
|
|
|
|
|
if files.is_err() {
|
|
|
|
|
tracing::info!("Failed to delete file: {:?}", file);
|
|
|
|
|
}
|
|
|
|
|
engine::delete_file(file.clone()).await;
|
|
|
|
|
}
|
|
|
|
|
tracing::info!("Cleaner finished")
|
|
|
|
|
}
|
|
|
|
|