Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
E
Ephemeral
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Volkor Barbarian Warrior
Ephemeral
Commits
adaf5956
Unverified
Commit
adaf5956
authored
2 years ago
by
Volkor Barbarian Warrior
Browse files
Options
Downloads
Patches
Plain Diff
re-write db functions to use query macro instead
parent
e114edd4
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
src/db.rs
+85
-66
85 additions, 66 deletions
src/db.rs
src/main.rs
+9
-4
9 additions, 4 deletions
src/main.rs
with
94 additions
and
70 deletions
src/db.rs
+
85
−
66
View file @
adaf5956
...
@@ -18,8 +18,8 @@ pub async fn add_file(
...
@@ -18,8 +18,8 @@ pub async fn add_file(
)
->
Result
<
SqliteQueryResult
,
sqlx
::
Error
>
{
)
->
Result
<
SqliteQueryResult
,
sqlx
::
Error
>
{
// We need to send the filetype, but only sometimes.
// We need to send the filetype, but only sometimes.
if
filetype
.is_none
()
{
if
filetype
.is_none
()
{
// Filetype is none, so don't send the filetype in the sql query
!
// Filetype is none, so don't send the filetype in the sql query
let
result
=
sqlx
::
query
(
let
result
=
sqlx
::
query
!
(
"INSERT INTO files (
"INSERT INTO files (
file,
file,
expiry,
expiry,
...
@@ -29,24 +29,24 @@ pub async fn add_file(
...
@@ -29,24 +29,24 @@ pub async fn add_file(
ip,
ip,
domain)
domain)
VALUES ( ?,?,?,?,?,?,? )"
,
VALUES ( ?,?,?,?,?,?,? )"
,
file
,
expiry
,
adminkey
,
accessed
,
filesize
,
ip
,
domain
)
)
.bind
(
file
)
.bind
(
expiry
)
.bind
(
adminkey
)
.bind
(
accessed
)
.bind
(
filesize
)
.bind
(
ip
)
.bind
(
domain
)
.execute
(
sqlconn
)
.execute
(
sqlconn
)
.await
;
.await
;
tracing
::
debug!
(
"add_file.filetype.is_none(Added file to the database.)"
);
tracing
::
debug!
(
"add_file.filetype.is_none(Added file to the database.)"
);
result
result
}
else
{
}
else
{
// Filetype is NOT none, so send the filetype in the sql query
!
// Filetype is NOT none, so send the filetype in the sql query
let
result
=
sqlx
::
query
(
let
result
=
sqlx
::
query
!
(
"INSERT INTO files (
"INSERT INTO files (
file,
file,
filetype,
filetype,
expiry,
expiry,
adminkey,
adminkey,
accessed,
accessed,
...
@@ -54,15 +54,15 @@ pub async fn add_file(
...
@@ -54,15 +54,15 @@ pub async fn add_file(
ip,
ip,
domain)
domain)
VALUES ( ?,?,?,?,?,?,?,? )"
,
VALUES ( ?,?,?,?,?,?,?,? )"
,
file
,
filetype
,
expiry
,
adminkey
,
accessed
,
filesize
,
ip
,
domain
)
)
.bind
(
file
)
.bind
(
filetype
)
.bind
(
expiry
)
.bind
(
adminkey
)
.bind
(
accessed
)
.bind
(
filesize
)
.bind
(
ip
)
.bind
(
domain
)
.execute
(
sqlconn
)
.execute
(
sqlconn
)
.await
;
.await
;
tracing
::
debug!
(
"add_file.filetype.not_none(Added file to the database.)"
);
tracing
::
debug!
(
"add_file.filetype.not_none(Added file to the database.)"
);
...
@@ -76,14 +76,20 @@ pub async fn add_file(
...
@@ -76,14 +76,20 @@ pub async fn add_file(
// This function checks if the filename actually exists in the DB, returning true if it does, false otherwise.
// This function checks if the filename actually exists in the DB, returning true if it does, false otherwise.
// This /can/ return true if there are multiple files with the same name, as we're not really interested in checking for errors here.
// This /can/ return true if there are multiple files with the same name, as we're not really interested in checking for errors here.
pub
async
fn
check_filename
(
sqlconn
:
&
Pool
<
Sqlite
>
,
filename
:
String
)
->
bool
{
pub
async
fn
check_filename
(
sqlconn
:
&
Pool
<
Sqlite
>
,
filename
:
String
)
->
bool
{
let
result
=
sqlx
::
query
(
"SELECT COUNT(*) FROM files WHERE file = ? and isDeleted = 0"
)
let
result
=
sqlx
::
query!
(
.bind
(
&
filename
)
"SELECT COUNT(*) as count
.fetch_one
(
sqlconn
)
FROM files
.await
WHERE file = ? and isDeleted = 0"
,
.unwrap
();
filename
let
filecount
:
i32
=
result
.get
(
"COUNT(*)"
);
)
tracing
::
debug!
(
"check_file(filecount: {:?})"
,
filecount
);
.fetch_one
(
sqlconn
)
match
filecount
{
.await
.unwrap
();
tracing
::
debug!
(
"check_file(filecount: {:?})"
,
result
.count
);
// Check if there are multiple files with the same name
match
result
.count
{
0
=>
false
,
// File doesn't exist :(
0
=>
false
,
// File doesn't exist :(
1
=>
true
,
// File exists!
1
=>
true
,
// File exists!
_
=>
{
_
=>
{
...
@@ -91,25 +97,26 @@ pub async fn check_filename(sqlconn: &Pool<Sqlite>, filename: String) -> bool {
...
@@ -91,25 +97,26 @@ pub async fn check_filename(sqlconn: &Pool<Sqlite>, filename: String) -> bool {
tracing
::
info!
(
tracing
::
info!
(
"Multiple files exist for file: {}, filecount: {}"
,
"Multiple files exist for file: {}, filecount: {}"
,
filename
,
filename
,
file
count
result
.
count
);
);
true
true
}
}
}
}
}
}
// Check if the adminkey corresponds to a file, and return Some(file), or None.
// This function receives an adminkey, (and sqlpool) and returns a Some(String) with the filename corresponding to the adminkey.
// It returns files that haven't already been deleted, so this is a 'single-use' operation per file.
pub
async
fn
check_adminkey
(
sqlconn
:
&
Pool
<
Sqlite
>
,
adminkey
:
String
)
->
Option
<
String
>
{
pub
async
fn
check_adminkey
(
sqlconn
:
&
Pool
<
Sqlite
>
,
adminkey
:
String
)
->
Option
<
String
>
{
// Make sure isDeleted = 0, so we don't try to delete files that are already deleted.
let
result
=
sqlx
::
query!
(
// Making this function into a 'single use' per file.
"SELECT file FROM files WHERE adminkey = ? AND isDeleted = 0"
,
let
result
=
sqlx
::
query
(
"SELECT file FROM files WHERE adminkey = ? AND isDeleted = 0"
)
adminkey
.bind
(
&
adminkey
)
)
.fetch_one
(
sqlconn
)
.fetch_one
(
sqlconn
)
.await
;
.await
;
if
result
.is_err
()
{
if
result
.is_err
()
{
return
None
;
return
None
;
}
else
{
}
else
{
let
filename
:
String
=
result
.unwrap
()
.
get
(
"
file
"
)
;
let
filename
:
String
=
result
.unwrap
()
.file
;
tracing
::
debug!
(
"check_adminkey(filename: {:?})"
,
filename
);
tracing
::
debug!
(
"check_adminkey(filename: {:?})"
,
filename
);
if
filename
.is_empty
()
{
if
filename
.is_empty
()
{
None
None
...
@@ -119,55 +126,67 @@ pub async fn check_adminkey(sqlconn: &Pool<Sqlite>, adminkey: String) -> Option<
...
@@ -119,55 +126,67 @@ pub async fn check_adminkey(sqlconn: &Pool<Sqlite>, adminkey: String) -> Option<
}
}
}
}
// Marking a file as deleted
// Marking a file as deleted in the DB (Doesn't delete the file on disk)
pub
async
fn
delete_file
(
sqlconn
:
&
Pool
<
Sqlite
>
,
filename
:
String
)
->
SqliteQueryResult
{
// This function returns a Some(u64), with the number of rows modified.
pub
async
fn
delete_file
(
sqlconn
:
&
Pool
<
Sqlite
>
,
filename
:
String
)
->
Option
<
u64
>
{
tracing
::
debug!
(
"delete_file(adminkey: {})"
,
filename
);
tracing
::
debug!
(
"delete_file(adminkey: {})"
,
filename
);
let
result
=
sqlx
::
query
(
"UPDATE files SET isDeleted = 1 WHERE file = ?"
)
let
result
=
sqlx
::
query!
(
"UPDATE files SET isDeleted = 1 WHERE file = ?"
,
filename
)
.bind
(
&
filename
)
.execute
(
sqlconn
)
.execute
(
sqlconn
)
.await
;
.await
;
result
.unwrap
()
// TODO: Check for row affected, and give a Result
if
result
.is_err
()
{
// If error, return none
return
None
;
}
else
{
return
Some
(
result
.unwrap
()
.rows_affected
());
}
}
}
// Updating a files viewcount
// Updating a files viewcount and accesstime.
pub
async
fn
update_fileview
(
// This receives the a String with the filename, i32 unix timestamp (and sqlpool)
sqlconn
:
&
Pool
<
Sqlite
>
,
// This returns a Some(u64), with the number of rows affected.
filename
:
String
,
pub
async
fn
update_fileview
(
sqlconn
:
&
Pool
<
Sqlite
>
,
filename
:
String
,
accessed
:
i32
)
->
Option
<
u64
>
{
accessed
:
i32
,
let
result
=
sqlx
::
query!
(
)
->
SqliteQueryResult
{
"UPDATE files SET accessed = ?, views = views + 1 WHERE file = ?"
,
sqlx
::
query
(
"UPDATE files SET accessed = ?, views = views + 1 WHERE file = ?"
)
accessed
,
.bind
(
accessed
)
filename
.bind
(
filename
)
)
.execute
(
sqlconn
)
.execute
(
sqlconn
)
.await
.await
;
.unwrap
()
// TODO: Check for row affected, and give a Result
if
result
.is_err
()
{
// If error, return none
return
None
;
}
else
{
return
Some
(
result
.unwrap
()
.rows_affected
());
}
}
}
// Returns the unix timestamp of the last access - 0 if unviewed.
// Returns the unix timestamp of the last access - 0 if unviewed.
// This doesn't do the basic error handling like the above functions, this re-write it completely.
pub
async
fn
get_accesss_time
(
sqlconn
:
&
Pool
<
Sqlite
>
,
filename
:
String
)
->
i32
{
pub
async
fn
get_accesss_time
(
sqlconn
:
&
Pool
<
Sqlite
>
,
filename
:
String
)
->
i32
{
let
result
=
sqlx
::
query
(
"SELECT accessed FROM files WHERE file = ?"
)
let
result
=
sqlx
::
query!
(
"SELECT accessed FROM files WHERE file = ?"
,
filename
)
.bind
(
&
filename
)
.fetch_one
(
sqlconn
)
.fetch_one
(
sqlconn
)
.await
;
.await
;
// TODO: We should probably handle /all/ the errors, but idk how.
if
result
.is_err
()
{
if
result
.is_err
()
{
// If result is an error, very likely this is the first upload of the file, so lets return the current time!
// If result is an error, very likely this is the first upload of the file, so lets return the current time!
// If it isn't, we're in trouble.
return
SystemTime
::
now
()
return
SystemTime
::
now
()
.duration_since
(
SystemTime
::
UNIX_EPOCH
)
.duration_since
(
SystemTime
::
UNIX_EPOCH
)
.unwrap
()
.unwrap
()
.as_secs
()
as
i32
;
.as_secs
()
as
i32
;
}
else
{
// This will only panic if the i64 doesn't fit into the i32
// I guess I'll have to patch this in 2038 anyway.
let
accesstime
:
i32
=
result
.unwrap
()
.accessed
.try_into
()
.unwrap
();
tracing
::
debug!
(
"get_accesss_time(filename: {}: {:?})"
,
filename
,
accesstime
.clone
()
);
accesstime
as
i32
}
}
let
accesstime
:
i32
=
result
.unwrap
()
.get
(
"accessed"
);
tracing
::
debug!
(
"get_accesss_time(filename: {}: {:?})"
,
filename
,
accesstime
.clone
()
);
accesstime
as
i32
}
}
// Generating a list of files that should be deleted
// Generating a list of files that should be deleted
...
...
This diff is collapsed.
Click to expand it.
src/main.rs
+
9
−
4
View file @
adaf5956
...
@@ -88,7 +88,7 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
...
@@ -88,7 +88,7 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
return
;
return
;
}
}
// override the mimetype if it's part of unsafe extensions
// override the mimetype if it's part of unsafe extensions
let
r
#
unsafe
=
CONFIG
let
r
#
unsafe
=
CONFIG
.get_array
(
"operations.unsafe_extensions"
)
.get_array
(
"operations.unsafe_extensions"
)
.expect
(
"Couldn't find 'unsafe_extensions' in config. :("
);
.expect
(
"Couldn't find 'unsafe_extensions' in config. :("
);
...
@@ -147,7 +147,9 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
...
@@ -147,7 +147,9 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
// TODO: Add actual file serving from the disk HERE, since salvo's built-in way breaks content-type header.
// TODO: Add actual file serving from the disk HERE, since salvo's built-in way breaks content-type header.
// Check if nginx sendfile is enabled, because we can skip this if it is.
// Check if nginx sendfile is enabled, because we can skip this if it is.
let
nginxsendfile
=
CONFIG
.get_bool
(
"server.nginx_sendfile"
)
.expect
(
"Couldn't find 'nginx_sendfile' in config. :("
);
let
nginxsendfile
=
CONFIG
.get_bool
(
"server.nginx_sendfile"
)
.expect
(
"Couldn't find 'nginx_sendfile' in config. :("
);
if
nginxsendfile
{
if
nginxsendfile
{
// Add the header, and we're done.
// Add the header, and we're done.
...
@@ -156,7 +158,7 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
...
@@ -156,7 +158,7 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
res
.add_header
(
"X-Accel-Redirect"
,
xsend
,
true
)
.unwrap
();
res
.add_header
(
"X-Accel-Redirect"
,
xsend
,
true
)
.unwrap
();
// We don't really need to update the content-type header, since nginx handles that (TODO: Test this lol)
// We don't really need to update the content-type header, since nginx handles that (TODO: Test this lol)
return
return
;
}
else
{
}
else
{
// If nginx sendfile is disabled, we need to render the file directly
// If nginx sendfile is disabled, we need to render the file directly
let
filepath
=
"files/"
.to_string
()
+
&
filename
.to_string
();
let
filepath
=
"files/"
.to_string
()
+
&
filename
.to_string
();
...
@@ -164,7 +166,10 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
...
@@ -164,7 +166,10 @@ async fn serve_file(req: &mut Request, res: &mut Response) {
// If the content-type header is already set, we don't need to update this
// If the content-type header is already set, we don't need to update this
if
res
.headers
()
.contains_key
(
"content-type"
)
{
if
res
.headers
()
.contains_key
(
"content-type"
)
{
tracing
::
debug!
(
"content-type exists: content-type: {:?}"
,
res
.headers
()
.get
(
"content-type"
)
.unwrap
());
tracing
::
debug!
(
"content-type exists: content-type: {:?}"
,
res
.headers
()
.get
(
"content-type"
)
.unwrap
()
);
file
.send
(
headers
,
res
)
.await
;
file
.send
(
headers
,
res
)
.await
;
// This complains about us writing into ResBody::Stream, but it means it renders as plaintext, so who cares.
// This complains about us writing into ResBody::Stream, but it means it renders as plaintext, so who cares.
res
.render
(
""
);
res
.render
(
""
);
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment