Skip to content
This repository has been archived by the owner on Oct 19, 2024. It is now read-only.

Commit

Permalink
fmt, clippy
Browse files Browse the repository at this point in the history
  • Loading branch information
thesuzerain committed Oct 5, 2023
1 parent 3cead46 commit bbe6cc9
Show file tree
Hide file tree
Showing 8 changed files with 582 additions and 242 deletions.
51 changes: 25 additions & 26 deletions src/database/models/organization_item.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
use crate::{models::ids::base62_impl::{parse_base62, to_base62}, database::redis::RedisPool};
use crate::{
database::redis::RedisPool,
models::ids::base62_impl::{parse_base62, to_base62},
};

use super::{ids::*, TeamMember};
use serde::{Deserialize, Serialize};
Expand Down Expand Up @@ -117,23 +120,23 @@ impl Organization {

organization_ids.append(
&mut redis
.multi_get::<i64, _>(
ORGANIZATIONS_TITLES_NAMESPACE,
organization_strings
.iter()
.map(|x| x.to_string().to_lowercase())
.collect(),
)
.await?
.into_iter()
.flatten()
.collect()
.multi_get::<i64, _>(
ORGANIZATIONS_TITLES_NAMESPACE,
organization_strings
.iter()
.map(|x| x.to_string().to_lowercase())
.collect(),
)
.await?
.into_iter()
.flatten()
.collect(),
);

if !organization_ids.is_empty() {
let organizations = redis
.multi_get::<String, _>(ORGANIZATIONS_NAMESPACE, organization_ids)
.await?;
.multi_get::<String, _>(ORGANIZATIONS_NAMESPACE, organization_ids)
.await?;

for organization in organizations {
if let Some(organization) =
Expand Down Expand Up @@ -185,13 +188,13 @@ impl Organization {

for organization in organizations {
redis
.set(
ORGANIZATIONS_NAMESPACE,
organization.id.0,
serde_json::to_string(&organization)?,
None,
)
.await?;
.set(
ORGANIZATIONS_NAMESPACE,
organization.id.0,
serde_json::to_string(&organization)?,
None,
)
.await?;
redis
.set(
ORGANIZATIONS_TITLES_NAMESPACE,
Expand Down Expand Up @@ -316,14 +319,10 @@ impl Organization {
title: Option<String>,
redis: &RedisPool,
) -> Result<(), super::DatabaseError> {

redis.delete(ORGANIZATIONS_NAMESPACE, id.0).await?;
if let Some(title) = title {
redis
.delete(
ORGANIZATIONS_TITLES_NAMESPACE,
title.to_lowercase(),
)
.delete(ORGANIZATIONS_TITLES_NAMESPACE, title.to_lowercase())
.await?;
}

Expand Down
2 changes: 1 addition & 1 deletion src/database/models/project_item.rs
Original file line number Diff line number Diff line change
Expand Up @@ -571,7 +571,7 @@ impl Project {
.try_filter_map(|e| async {
Ok(e.right().map(|m| {
let id = m.id;

QueryProject {
inner: Project {
id: ProjectId(id),
Expand Down
5 changes: 4 additions & 1 deletion src/database/models/team_item.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
use super::{ids::*, Organization, Project};
use crate::{database::redis::RedisPool, models::teams::{OrganizationPermissions, ProjectPermissions}};
use crate::{
database::redis::RedisPool,
models::teams::{OrganizationPermissions, ProjectPermissions},
};
use itertools::Itertools;
use rust_decimal::Decimal;
use serde::{Deserialize, Serialize};
Expand Down
38 changes: 19 additions & 19 deletions tests/common/database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,35 +22,35 @@ pub const FRIEND_USER_ID_PARSED: i64 = 4;
pub const ENEMY_USER_ID_PARSED: i64 = 5;

// These are full-scoped PATs- as if the user was logged in (including illegal scopes).
pub const ADMIN_USER_PAT : &str = "mrp_patadmin";
pub const MOD_USER_PAT : &str = "mrp_patmoderator";
pub const USER_USER_PAT : &str = "mrp_patuser";
pub const FRIEND_USER_PAT : &str = "mrp_patfriend";
pub const ENEMY_USER_PAT : &str = "mrp_patenemy";
pub const ADMIN_USER_PAT: &str = "mrp_patadmin";
pub const MOD_USER_PAT: &str = "mrp_patmoderator";
pub const USER_USER_PAT: &str = "mrp_patuser";
pub const FRIEND_USER_PAT: &str = "mrp_patfriend";
pub const ENEMY_USER_PAT: &str = "mrp_patenemy";

// There are two test projects. They are both created by user 3 (USER_USER_ID).
// They differ only in that 'ALPHA' is a public, approved project, and 'BETA' is a private, project in queue.
// The same goes for their corresponding versions- one listed, one draft.
pub const PROJECT_ALPHA_TEAM_ID : &str = "1c";
pub const PROJECT_BETA_TEAM_ID : &str = "1d";
pub const PROJECT_ALPHA_TEAM_ID: &str = "1c";
pub const PROJECT_BETA_TEAM_ID: &str = "1d";

pub const PROJECT_ALPHA_PROJECT_ID : &str = "G8";
pub const PROJECT_BETA_PROJECT_ID : &str = "G9";
pub const PROJECT_ALPHA_PROJECT_ID: &str = "G8";
pub const PROJECT_BETA_PROJECT_ID: &str = "G9";

pub const PROJECT_ALPHA_PROJECT_SLUG : &str = "testslug";
pub const PROJECT_BETA_PROJECT_SLUG : &str = "testslug2";
pub const PROJECT_ALPHA_PROJECT_SLUG: &str = "testslug";
pub const PROJECT_BETA_PROJECT_SLUG: &str = "testslug2";

pub const PROJECT_ALPHA_VERSION_ID : &str = "Hk";
pub const PROJECT_BETA_VERSION_ID : &str = "Hl";
pub const PROJECT_ALPHA_VERSION_ID: &str = "Hk";
pub const PROJECT_BETA_VERSION_ID: &str = "Hl";

// These are threads created alongside the projects.
pub const PROJECT_ALPHA_THREAD_ID : &str = "U";
pub const PROJECT_BETA_THREAD_ID : &str = "V";
pub const PROJECT_ALPHA_THREAD_ID: &str = "U";
pub const PROJECT_BETA_THREAD_ID: &str = "V";

// These are the hashes of the files attached to their versions: they do not reflect a 'real' hash of data.
// This can be used for /version_file/ type endpoints which get a project's data from its hash.
pub const PROJECT_ALPHA_THREAD_FILE_HASH : &str = "000000000";
pub const PROJECT_BETA_THREAD_FILE_HASH : &str = "111111111";
pub const PROJECT_ALPHA_THREAD_FILE_HASH: &str = "000000000";
pub const PROJECT_BETA_THREAD_FILE_HASH: &str = "111111111";

pub struct TemporaryDatabase {
pub pool: PgPool,
Expand All @@ -65,7 +65,7 @@ impl TemporaryDatabase {
// 3. Runs migrations on the new database
// 4. (Optionally, by using create_with_dummy) adds dummy data to the database
// If a db is created with create_with_dummy, it must be cleaned up with cleanup.
// This means that dbs will only 'remain' if a test fails (for examination of the db), and will be cleaned up otherwise.
// This means that dbs will only 'remain' if a test fails (for examination of the db), and will be cleaned up otherwise.
pub async fn create() -> Self {
let temp_database_name = generate_random_database_name();
println!("Creating temporary database: {}", &temp_database_name);
Expand Down Expand Up @@ -120,7 +120,7 @@ impl TemporaryDatabase {

// Deletes the temporary database
// If a temporary db is created, it must be cleaned up with cleanup.
// This means that dbs will only 'remain' if a test fails (for examination of the db), and will be cleaned up otherwise.
// This means that dbs will only 'remain' if a test fails (for examination of the db), and will be cleaned up otherwise.
pub async fn cleanup(mut self) {
let database_url = dotenvy::var("DATABASE_URL").expect("No database URL");
self.pool.close().await;
Expand Down
53 changes: 35 additions & 18 deletions tests/common/environment.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
#![allow(dead_code)]

use super::database::{TemporaryDatabase, USER_USER_ID_PARSED};
use crate::common::setup;
use actix_web::{
dev::ServiceResponse,
test::{self, TestRequest},
App,
};
use chrono::Utc;
use super::database::{TemporaryDatabase, USER_USER_ID_PARSED};
use labrinth::{
database::{self, models::generate_pat_id},
models::pats::Scopes,
};
use crate::common::setup;

// A complete test environment, with a test actix app and a database.
// Must be called in an #[actix_rt::test] context. It also simulates a
// Must be called in an #[actix_rt::test] context. It also simulates a
// temporary sqlx db like #[sqlx::test] would.
// Use .call(req) on it directly to make a test call as if test::call_service(req) were being used.
pub struct TestEnvironment {
Expand All @@ -28,7 +28,10 @@ impl TestEnvironment {
let labrinth_config = setup(&db).await;
let app = App::new().configure(|cfg| labrinth::app_config(cfg, labrinth_config.clone()));
let test_app = test::init_service(app).await;
Self { test_app: Box::new(test_app), db }
Self {
test_app: Box::new(test_app),
db,
}
}
pub async fn cleanup(self) {
self.db.cleanup().await;
Expand All @@ -39,16 +42,29 @@ impl TestEnvironment {
}
}


trait LocalService {
fn call(&self, req: actix_http::Request) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<ServiceResponse, actix_web::Error>>>>;
fn call(
&self,
req: actix_http::Request,
) -> std::pin::Pin<
Box<dyn std::future::Future<Output = Result<ServiceResponse, actix_web::Error>>>,
>;
}
impl<S> LocalService for S
where
S: actix_web::dev::Service<actix_http::Request, Response = ServiceResponse, Error = actix_web::Error>,
S: actix_web::dev::Service<
actix_http::Request,
Response = ServiceResponse,
Error = actix_web::Error,
>,
S::Future: 'static,
{
fn call(&self, req: actix_http::Request) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<ServiceResponse, actix_web::Error>>>> {
fn call(
&self,
req: actix_http::Request,
) -> std::pin::Pin<
Box<dyn std::future::Future<Output = Result<ServiceResponse, actix_web::Error>>>,
> {
Box::pin(self.call(req))
}
}
Expand All @@ -58,8 +74,7 @@ where
// - returns a 200-299 if the scope is present
// - returns failure and success JSON bodies for requests that are 200 (for performing non-simple follow-up tests on)
// This uses a builder format, so you can chain methods to set the parameters to non-defaults (most will probably be not need to be set).
pub struct ScopeTest<'a>
{
pub struct ScopeTest<'a> {
test_env: &'a TestEnvironment,
// Scopes expected to fail on this test. By default, this is all scopes except the success scopes.
// (To ensure we have isolated the scope we are testing)
Expand All @@ -70,8 +85,7 @@ pub struct ScopeTest<'a>
expected_failure_code: u16,
}

impl<'a> ScopeTest<'a>
{
impl<'a> ScopeTest<'a> {
pub fn new(test_env: &'a TestEnvironment) -> Self {
Self {
test_env,
Expand Down Expand Up @@ -107,13 +121,18 @@ impl<'a> ScopeTest<'a>
// success_scopes : the scopes that we are testing that should succeed
// returns a tuple of (failure_body, success_body)
// Should return a String error if on unexpected status code, allowing unwrapping in tests.
pub async fn test<T>(&self, req_gen: T, success_scopes: Scopes) -> Result<(serde_json::Value, serde_json::Value), String>
where T: Fn() -> TestRequest
pub async fn test<T>(
&self,
req_gen: T,
success_scopes: Scopes,
) -> Result<(serde_json::Value, serde_json::Value), String>
where
T: Fn() -> TestRequest,
{

// First, create a PAT with failure scopes
let failure_scopes = self.failure_scopes.unwrap_or(Scopes::ALL ^ success_scopes);
let access_token_all_others = create_test_pat(failure_scopes, self.user_id, &self.test_env.db).await;
let access_token_all_others =
create_test_pat(failure_scopes, self.user_id, &self.test_env.db).await;

// Create a PAT with the success scopes
let access_token = create_test_pat(success_scopes, self.user_id, &self.test_env.db).await;
Expand Down Expand Up @@ -164,9 +183,7 @@ impl<'a> ScopeTest<'a>
serde_json::Value::Null
};
Ok((failure_body, success_body))

}

}

// Creates a PAT with the given scopes, and returns the access token
Expand Down
18 changes: 9 additions & 9 deletions tests/pats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ pub async fn pat_full_test() {

// Create a PAT for a full test
let req = test::TestRequest::post()
.uri(&"/v2/pat".to_string())
.uri("/v2/pat")
.append_header(("Authorization", USER_USER_PAT))
.set_json(json!({
"scopes": Scopes::COLLECTION_CREATE, // Collection create as an easily tested example
Expand All @@ -46,7 +46,7 @@ pub async fn pat_full_test() {
// Get PAT again
let req = test::TestRequest::get()
.append_header(("Authorization", USER_USER_PAT))
.uri(&"/v2/pat".to_string())
.uri("/v2/pat")
.to_request();
let resp = test_env.call(req).await;
assert_eq!(resp.status().as_u16(), 200);
Expand All @@ -62,7 +62,7 @@ pub async fn pat_full_test() {
let token = token.to_string();
async {
let req = test::TestRequest::post()
.uri(&"/v2/collection".to_string())
.uri("/v2/collection")
.append_header(("Authorization", token))
.set_json(json!({
"title": "Test Collection 1",
Expand Down Expand Up @@ -167,7 +167,7 @@ pub async fn bad_pats() {

// Creating a PAT with no name should fail
let req = test::TestRequest::post()
.uri(&"/v2/pat".to_string())
.uri("/v2/pat")
.append_header(("Authorization", USER_USER_PAT))
.set_json(json!({
"scopes": Scopes::COLLECTION_CREATE, // Collection create as an easily tested example
Expand All @@ -180,7 +180,7 @@ pub async fn bad_pats() {
// Name too short or too long should fail
for name in ["n", "this_name_is_too_long".repeat(16).as_str()] {
let req = test::TestRequest::post()
.uri(&"/v2/pat".to_string())
.uri("/v2/pat")
.append_header(("Authorization", USER_USER_PAT))
.set_json(json!({
"name": name,
Expand All @@ -194,7 +194,7 @@ pub async fn bad_pats() {

// Creating a PAT with an expiry in the past should fail
let req = test::TestRequest::post()
.uri(&"/v2/pat".to_string())
.uri("/v2/pat")
.append_header(("Authorization", USER_USER_PAT))
.set_json(json!({
"scopes": Scopes::COLLECTION_CREATE, // Collection create as an easily tested example
Expand All @@ -212,7 +212,7 @@ pub async fn bad_pats() {
continue;
}
let req = test::TestRequest::post()
.uri(&"/v2/pat".to_string())
.uri("/v2/pat")
.append_header(("Authorization", USER_USER_PAT))
.set_json(json!({
"scopes": scope.bits(),
Expand All @@ -229,7 +229,7 @@ pub async fn bad_pats() {

// Create a 'good' PAT for patching
let req = test::TestRequest::post()
.uri(&"/v2/pat".to_string())
.uri("/v2/pat")
.append_header(("Authorization", USER_USER_PAT))
.set_json(json!({
"scopes": Scopes::COLLECTION_CREATE,
Expand All @@ -245,7 +245,7 @@ pub async fn bad_pats() {
// Patching to a bad name should fail
for name in ["n", "this_name_is_too_long".repeat(16).as_str()] {
let req = test::TestRequest::post()
.uri(&"/v2/pat".to_string())
.uri("/v2/pat")
.append_header(("Authorization", USER_USER_PAT))
.set_json(json!({
"name": name,
Expand Down
Loading

0 comments on commit bbe6cc9

Please sign in to comment.