diff --git a/.sqlx/query-da1b024c5d7c361cd38be03d486402c16cf39b8a8d7e17455fceb93b5cd520d1.json b/.sqlx/query-d9afc8f219156159f88c413d671c87d6e62e1374613ac024cc553aabe44362f4.json similarity index 50% rename from .sqlx/query-da1b024c5d7c361cd38be03d486402c16cf39b8a8d7e17455fceb93b5cd520d1.json rename to .sqlx/query-d9afc8f219156159f88c413d671c87d6e62e1374613ac024cc553aabe44362f4.json index 1d13372..aa18bea 100644 --- a/.sqlx/query-da1b024c5d7c361cd38be03d486402c16cf39b8a8d7e17455fceb93b5cd520d1.json +++ b/.sqlx/query-d9afc8f219156159f88c413d671c87d6e62e1374613ac024cc553aabe44362f4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT u.id,\n\t u.name,\n\t u.username,\n\t u.avatar_id,\n\t u.avatar_hex,\n\t u.public_flags,\n\t u.rendered_bio AS \"rendered_bio!\"\nFROM\n\tusers u\n\t\t-- Join stories\n\t\tINNER JOIN stories AS \"u->story\"\n\t\t\t\t ON \"u->story\".user_id = u.id\n\t\t\t\t\t AND \"u->story\".deleted_at IS NULL\n\t\t\t\t\t AND \"u->story\".published_at IS NOT NULL\nWHERE\n\t\"u->story\".category::TEXT = ANY ($1)\nORDER BY\n\tu.follower_count DESC\nLIMIT 25", + "query": "SELECT id, name, username, avatar_id, avatar_hex, public_flags, rendered_bio AS \"rendered_bio!\"\nFROM (SELECT DISTINCT u.id,\n u.name,\n u.username,\n u.avatar_id,\n u.avatar_hex,\n u.public_flags,\n u.rendered_bio,\n u.follower_count\n FROM users u\n -- Join stories\n INNER JOIN stories AS \"u->story\"\n ON \"u->story\".user_id = u.id\n AND \"u->story\".deleted_at IS NULL\n AND \"u->story\".published_at IS NOT NULL\n WHERE \"u->story\".category::TEXT = ANY ($1)\n ORDER BY\n u.follower_count DESC\n LIMIT 25)", "describe": { "columns": [ { @@ -54,5 +54,5 @@ true ] }, - "hash": "da1b024c5d7c361cd38be03d486402c16cf39b8a8d7e17455fceb93b5cd520d1" + "hash": "d9afc8f219156159f88c413d671c87d6e62e1374613ac024cc553aabe44362f4" } diff --git a/geo/db/GeoLite2-City.mmdb b/geo/db/GeoLite2-City.mmdb index ccf2fe4..0c79c57 100644 Binary files a/geo/db/GeoLite2-City.mmdb and b/geo/db/GeoLite2-City.mmdb differ diff --git a/justfile b/justfile index c9c859c..c78d632 100644 --- a/justfile +++ b/justfile @@ -27,3 +27,6 @@ udeps: update_geo: docker compose -f geo/docker-compose.yaml up + +migrate: + sqlx migrate run \ No newline at end of file diff --git a/queries/me/onboarding/writers.sql b/queries/me/onboarding/writers.sql index dab4718..97e55fc 100644 --- a/queries/me/onboarding/writers.sql +++ b/queries/me/onboarding/writers.sql @@ -1,19 +1,19 @@ -SELECT u.id, - u.name, - u.username, - u.avatar_id, - u.avatar_hex, - u.public_flags, - u.rendered_bio AS "rendered_bio!" -FROM - users u - -- Join stories - INNER JOIN stories AS "u->story" - ON "u->story".user_id = u.id - AND "u->story".deleted_at IS NULL - AND "u->story".published_at IS NOT NULL -WHERE - "u->story".category::TEXT = ANY ($1) -ORDER BY - u.follower_count DESC -LIMIT 25 \ No newline at end of file +SELECT id, name, username, avatar_id, avatar_hex, public_flags, rendered_bio AS "rendered_bio!" +FROM (SELECT DISTINCT u.id, + u.name, + u.username, + u.avatar_id, + u.avatar_hex, + u.public_flags, + u.rendered_bio, + u.follower_count + FROM users u + -- Join stories + INNER JOIN stories AS "u->story" + ON "u->story".user_id = u.id + AND "u->story".deleted_at IS NULL + AND "u->story".published_at IS NOT NULL + WHERE "u->story".category::TEXT = ANY ($1) + ORDER BY + u.follower_count DESC + LIMIT 25) \ No newline at end of file diff --git a/session/src/storage/redis_rs.rs b/session/src/storage/redis_rs.rs index 9c26042..061ff6d 100644 --- a/session/src/storage/redis_rs.rs +++ b/session/src/storage/redis_rs.rs @@ -4,12 +4,12 @@ use anyhow::{ Error, }; use redis::{ - aio::ConnectionManager, AsyncCommands, Cmd, FromRedisValue, RedisResult, Value, + aio::ConnectionManager, }; use std::{ convert::TryInto, @@ -19,6 +19,7 @@ use std::{ use super::SessionKey; use crate::storage::{ + SessionStore, interface::{ LoadError, SaveError, @@ -26,7 +27,6 @@ use crate::storage::{ UpdateError, }, utils::generate_session_key, - SessionStore, }; /// Use Redis as session storage backend. @@ -146,17 +146,18 @@ impl SessionStore for RedisSessionStore { let session_key = generate_session_key(user_id.map(|value| value.to_string())); let cache_key = (self.configuration.cache_keygen)(session_key.as_ref()); - self.execute_command( - redis::cmd("SET") - .arg(&cache_key) - .arg(&body) - .arg("NX") // NX: only set the key if it does not already exist - .arg("EX") // EX: set expiry - .arg(&format!("{}", ttl.whole_seconds())), - ) - .await - .map_err(Into::into) - .map_err(SaveError::Other)?; + let _: () = self + .execute_command( + redis::cmd("SET") + .arg(&cache_key) + .arg(&body) + .arg("NX") // NX: only set the key if it does not already exist + .arg("EX") // EX: set expiry + .arg(&format!("{}", ttl.whole_seconds())), + ) + .await + .map_err(Into::into) + .map_err(SaveError::Other)?; Ok(session_key) } @@ -209,7 +210,8 @@ impl SessionStore for RedisSessionStore { async fn update_ttl(&self, session_key: &SessionKey, ttl: &Duration) -> Result<(), Error> { let cache_key = (self.configuration.cache_keygen)(session_key.as_ref()); - self.client + let _: () = self + .client .clone() .expire( &cache_key, @@ -218,12 +220,14 @@ impl SessionStore for RedisSessionStore { )?, ) .await?; + Ok(()) } async fn delete(&self, session_key: &SessionKey) -> Result<(), anyhow::Error> { let cache_key = (self.configuration.cache_keygen)(session_key.as_ref()); - self.execute_command(redis::cmd("DEL").arg(&[&cache_key])) + let _: () = self + .execute_command(redis::cmd("DEL").arg(&[&cache_key])) .await .map_err(Into::into) .map_err(UpdateError::Other)?; diff --git a/src/cron/cleanup_blogs/cleanup_blogs.rs b/src/cron/cleanup_blogs/cleanup_blogs.rs index dae80e9..7cbd655 100644 --- a/src/cron/cleanup_blogs/cleanup_blogs.rs +++ b/src/cron/cleanup_blogs/cleanup_blogs.rs @@ -201,7 +201,6 @@ mod tests { }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, }; - use futures::future; use nanoid::nanoid; use sqlx::PgPool; use storiny_macros::test_context; @@ -272,7 +271,6 @@ SELECT assert_eq!(result.rows_affected(), count as u64); // Upload empty objects to S3. - let mut put_futures = vec![]; let mut object_keys = vec![]; object_keys.append(&mut primary_font_keys); @@ -280,18 +278,16 @@ SELECT object_keys.append(&mut code_font_keys); for key in object_keys { - put_futures.push( - s3_client - .put_object() - .bucket(S3_FONTS_BUCKET) - .key(key.to_string()) - .send(), - ); + s3_client + .put_object() + .bucket(S3_FONTS_BUCKET) + .key(key.to_string()) + .send() + .await + .unwrap(); } - future::join_all(put_futures).await; - - let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); @@ -325,7 +321,7 @@ SELECT assert!(result.is_empty()); // Objects should not be present in the bucket. - let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); @@ -358,7 +354,7 @@ SELECT assert!(result.is_empty()); // Objects should not be present in the bucket. - let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); @@ -408,7 +404,7 @@ WHERE id = (SELECT id FROM selected_blog) assert_eq!(result.len(), 1); // Objects should still be present in the bucket. - let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); diff --git a/src/cron/cleanup_s3/cleanup_s3.rs b/src/cron/cleanup_s3/cleanup_s3.rs index 29bf45a..594c559 100644 --- a/src/cron/cleanup_s3/cleanup_s3.rs +++ b/src/cron/cleanup_s3/cleanup_s3.rs @@ -334,22 +334,17 @@ SELECT UNNEST($1::UUID[]), $2, $3, $4 assert_eq!(result.rows_affected(), object_keys.len() as u64); - // Upload empty objects to S3. - let mut put_futures = vec![]; - for key in object_keys { - put_futures.push( - s3_client - .put_object() - .bucket(S3_UPLOADS_BUCKET) - .key(key.to_string()) - .send(), - ); + s3_client + .put_object() + .bucket(S3_UPLOADS_BUCKET) + .key(key.to_string()) + .send() + .await + .unwrap(); } - future::join_all(put_futures).await; - - let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None) .await .unwrap(); @@ -378,21 +373,17 @@ SELECT UNNEST($1::UUID[]) assert_eq!(result.rows_affected(), object_keys.len() as u64); // Upload empty objects to S3. - let mut put_futures = vec![]; - for key in object_keys { - put_futures.push( - s3_client - .put_object() - .bucket(S3_DOCS_BUCKET) - .key(key.to_string()) - .send(), - ); + s3_client + .put_object() + .bucket(S3_DOCS_BUCKET) + .key(key.to_string()) + .send() + .await + .unwrap(); } - future::join_all(put_futures).await; - - let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None) .await .unwrap(); @@ -428,7 +419,7 @@ SELECT UNNEST($1::UUID[]) assert!(result.is_empty()); // Objects should not be present in the bucket. - let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None) .await .unwrap(); @@ -461,7 +452,7 @@ SELECT UNNEST($1::UUID[]) assert!(result.is_empty()); // Objects should not be present in the bucket. - let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None) .await .unwrap(); @@ -511,7 +502,7 @@ WHERE id = (SELECT id FROM selected_asset) assert_eq!(result.len(), 1); // Object should still be present in the bucket. - let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None) .await .unwrap(); @@ -546,7 +537,7 @@ WHERE id = (SELECT id FROM selected_asset) assert!(result.is_empty()); // Objects should not be present in the bucket. - let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None) .await .unwrap(); @@ -579,7 +570,7 @@ WHERE id = (SELECT id FROM selected_asset) assert!(result.is_empty()); // Objects should not be present in the bucket. - let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None) + let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None) .await .unwrap(); diff --git a/src/cron/sitemap/presets.rs b/src/cron/sitemap/presets.rs index d8f5b12..cc076f8 100644 --- a/src/cron/sitemap/presets.rs +++ b/src/cron/sitemap/presets.rs @@ -1,8 +1,8 @@ use crate::{ + S3Client, constants::buckets::S3_SITEMAPS_BUCKET, cron::sitemap::GenerateSitemapResponse, utils::deflate_bytes_gzip::deflate_bytes_gzip, - S3Client, }; use apalis::prelude::Error; use sitemap_rs::{ @@ -103,9 +103,9 @@ mod tests { use crate::{ config::get_app_config, test_utils::{ + TestContext, count_s3_objects, get_s3_client, - TestContext, }, }; use storiny_macros::test_context; @@ -151,7 +151,6 @@ mod tests { s3_client, S3_SITEMAPS_BUCKET, Some("presets.xml".to_string()), - None, ) .await .unwrap(); diff --git a/src/cron/sitemap/sitemap.rs b/src/cron/sitemap/sitemap.rs index a562949..b4fede7 100644 --- a/src/cron/sitemap/sitemap.rs +++ b/src/cron/sitemap/sitemap.rs @@ -3,11 +3,11 @@ use crate::{ cron::{ init::SharedCronJobState, sitemap::{ + GenerateSitemapResponse, presets::generate_preset_sitemap, story::generate_story_sitemap, tag::generate_tag_sitemap, user::generate_user_sitemap, - GenerateSitemapResponse, }, }, utils::{ @@ -220,13 +220,13 @@ pub async fn refresh_sitemap( mod tests { use super::*; use crate::{ + S3Client, test_utils::{ + TestContext, count_s3_objects, get_cron_job_state_for_test, get_s3_client, - TestContext, }, - S3Client, }; use sqlx::PgPool; use storiny_macros::test_context; @@ -266,7 +266,7 @@ mod tests { assert_eq!(result.unwrap().file_count, expected_sitemap_file_count); // Sitemaps should be present in the bucket. - let sitemap_count = count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, None, None) + let sitemap_count = count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, None) .await .unwrap(); @@ -286,7 +286,7 @@ mod tests { assert_eq!(result.unwrap().file_count, expected_sitemap_file_count); // Sitemaps should be present in the bucket. - let sitemap_count = count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, None, None) + let sitemap_count = count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, None) .await .unwrap(); diff --git a/src/cron/sitemap/story/story.rs b/src/cron/sitemap/story/story.rs index f748ea7..ae0282c 100644 --- a/src/cron/sitemap/story/story.rs +++ b/src/cron/sitemap/story/story.rs @@ -1,11 +1,11 @@ use crate::{ + S3Client, constants::buckets::S3_SITEMAPS_BUCKET, cron::sitemap::GenerateSitemapResponse, utils::{ deflate_bytes_gzip::deflate_bytes_gzip, get_sitemap_change_freq::get_sitemap_change_freq, }, - S3Client, }; use apalis::prelude::Error; use async_recursion::async_recursion; @@ -193,9 +193,9 @@ mod tests { use crate::{ config::get_app_config, test_utils::{ + TestContext, count_s3_objects, get_s3_client, - TestContext, }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, }; @@ -250,14 +250,10 @@ mod tests { ); // Sitemaps should be present in the bucket. - let sitemap_count = count_s3_objects( - s3_client, - S3_SITEMAPS_BUCKET, - Some("stories-".to_string()), - None, - ) - .await - .unwrap(); + let sitemap_count = + count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, Some("stories-".to_string())) + .await + .unwrap(); assert_eq!(sitemap_count, 1); @@ -285,14 +281,10 @@ mod tests { ); // Sitemaps should be present in the bucket. - let sitemap_count = count_s3_objects( - s3_client, - S3_SITEMAPS_BUCKET, - Some("stories-".to_string()), - None, - ) - .await - .unwrap(); + let sitemap_count = + count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, Some("stories-".to_string())) + .await + .unwrap(); assert_eq!(sitemap_count, 3); diff --git a/src/cron/sitemap/tag/tag.rs b/src/cron/sitemap/tag/tag.rs index 7d762e3..89e6d10 100644 --- a/src/cron/sitemap/tag/tag.rs +++ b/src/cron/sitemap/tag/tag.rs @@ -1,8 +1,8 @@ use crate::{ + S3Client, constants::buckets::S3_SITEMAPS_BUCKET, cron::sitemap::GenerateSitemapResponse, utils::deflate_bytes_gzip::deflate_bytes_gzip, - S3Client, }; use apalis::prelude::Error; use async_recursion::async_recursion; @@ -160,9 +160,9 @@ mod tests { use crate::{ config::get_app_config, test_utils::{ + TestContext, count_s3_objects, get_s3_client, - TestContext, }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, }; @@ -217,14 +217,10 @@ mod tests { ); // Sitemaps should be present in the bucket. - let sitemap_count = count_s3_objects( - s3_client, - S3_SITEMAPS_BUCKET, - Some("tags-".to_string()), - None, - ) - .await - .unwrap(); + let sitemap_count = + count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, Some("tags-".to_string())) + .await + .unwrap(); assert_eq!(sitemap_count, 1); @@ -252,14 +248,10 @@ mod tests { ); // Sitemaps should be present in the bucket. - let sitemap_count = count_s3_objects( - s3_client, - S3_SITEMAPS_BUCKET, - Some("tags-".to_string()), - None, - ) - .await - .unwrap(); + let sitemap_count = + count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, Some("tags-".to_string())) + .await + .unwrap(); assert_eq!(sitemap_count, 3); diff --git a/src/cron/sitemap/user/user.rs b/src/cron/sitemap/user/user.rs index b42f5dc..2198234 100644 --- a/src/cron/sitemap/user/user.rs +++ b/src/cron/sitemap/user/user.rs @@ -1,4 +1,5 @@ use crate::{ + S3Client, constants::{ buckets::S3_SITEMAPS_BUCKET, image_size::ImageSize, @@ -8,7 +9,6 @@ use crate::{ deflate_bytes_gzip::deflate_bytes_gzip, get_cdn_url::get_cdn_url, }, - S3Client, }; use apalis::prelude::Error; use async_recursion::async_recursion; @@ -210,9 +210,9 @@ mod tests { use crate::{ config::get_app_config, test_utils::{ + TestContext, count_s3_objects, get_s3_client, - TestContext, }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, }; @@ -274,14 +274,10 @@ mod tests { ); // Sitemaps should be present in the bucket. - let sitemap_count = count_s3_objects( - s3_client, - S3_SITEMAPS_BUCKET, - Some("users-".to_string()), - None, - ) - .await - .unwrap(); + let sitemap_count = + count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, Some("users-".to_string())) + .await + .unwrap(); assert_eq!(sitemap_count, 1); @@ -316,14 +312,10 @@ mod tests { ); // Sitemaps should be present in the bucket. - let sitemap_count = count_s3_objects( - s3_client, - S3_SITEMAPS_BUCKET, - Some("users-".to_string()), - None, - ) - .await - .unwrap(); + let sitemap_count = + count_s3_objects(s3_client, S3_SITEMAPS_BUCKET, Some("users-".to_string())) + .await + .unwrap(); assert_eq!(sitemap_count, 3); diff --git a/src/middlewares/rate_limiter.rs b/src/middlewares/rate_limiter.rs index 7560225..0c3fa91 100644 --- a/src/middlewares/rate_limiter.rs +++ b/src/middlewares/rate_limiter.rs @@ -11,8 +11,8 @@ use actix_web::{ }; use async_trait::async_trait; use redis::{ - aio::ConnectionManager, AsyncCommands, + aio::ConnectionManager, }; use std::{ borrow::Cow, @@ -167,7 +167,8 @@ impl SimpleBackend for RedisBackend { async fn remove_key(&self, key: &str) -> Result<(), Self::Error> { let key = self.make_key(key); let mut con = self.connection.clone(); - con.del(key.as_ref()).await?; + let _: () = con.del(key.as_ref()).await?; + Ok(()) } } diff --git a/src/models/photo.rs b/src/models/photo.rs index eac1ec9..be73f43 100644 --- a/src/models/photo.rs +++ b/src/models/photo.rs @@ -5,6 +5,7 @@ use serde::{ #[derive(Debug, Serialize, Deserialize)] pub struct PhotoSource { + pub original: String, pub medium: String, } diff --git a/src/realms/server.rs b/src/realms/server.rs index 3adbce7..d670cf1 100644 --- a/src/realms/server.rs +++ b/src/realms/server.rs @@ -11,9 +11,11 @@ use super::{ }, }; use crate::{ + RedisPool, + S3Client, config::{ - get_app_config, Config, + get_app_config, }, constants::{ buckets::S3_DOCS_BUCKET, @@ -26,8 +28,6 @@ use crate::{ get_user_sessions::UserSession, inflate_bytes_gzip::inflate_bytes_gzip, }, - RedisPool, - S3Client, }; use aws_sdk_s3::operation::{ get_object::GetObjectError, @@ -57,8 +57,8 @@ use time::OffsetDateTime; use tokio::{ io::AsyncReadExt, signal::unix::{ - signal, SignalKind, + signal, }, sync::{ Mutex, @@ -73,6 +73,9 @@ use tracing::{ }; use uuid::Uuid; use warp::{ + Filter, + Rejection, + Reply, http::StatusCode, reply, ws::{ @@ -80,15 +83,12 @@ use warp::{ WebSocket, Ws, }, - Filter, - Rejection, - Reply, }; use yrs::{ - updates::decoder::Decode, Doc, Transact, Update, + updates::decoder::Decode, }; /// The maximum number of overflowing messages that are buffered in the memory for the broadcast @@ -686,6 +686,8 @@ pub async fn start_realms_server( #[cfg(test)] pub mod tests { use crate::{ + RedisPool, + S3Client, config::get_app_config, constants::{ buckets::S3_DOCS_BUCKET, @@ -694,28 +696,26 @@ pub mod tests { }, realms::{ realm::{ - RealmMap, MAX_PEERS_PER_REALM, + RealmMap, }, server::{ - start_realms_server, EnterRealmError, + start_realms_server, }, }, test_utils::{ + RedisTestContext, + TestContext, count_s3_objects, get_redis_pool, get_s3_client, - RedisTestContext, - TestContext, }, utils::{ deflate_bytes_gzip::deflate_bytes_gzip, delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, get_user_sessions::UserSession, }, - RedisPool, - S3Client, }; use cookie::{ Cookie, @@ -723,13 +723,13 @@ pub mod tests { Key, }; use futures_util::{ + SinkExt, + StreamExt, future, stream::{ SplitSink, SplitStream, }, - SinkExt, - StreamExt, }; use lockable::{ AsyncLimit, @@ -744,13 +744,13 @@ pub mod tests { use storiny_macros::test_context; use tokio::net::TcpStream; use tokio_tungstenite::{ + MaybeTlsStream, + WebSocketStream, tungstenite::{ + Message, client::IntoClientRequest, handshake::client::Request, - Message, }, - MaybeTlsStream, - WebSocketStream, }; use uuid::Uuid; use yrs::{ @@ -1725,7 +1725,7 @@ WHERE .await .unwrap(); - let documents = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None) + let documents = count_s3_objects(s3_client, S3_DOCS_BUCKET, None) .await .unwrap(); @@ -1763,7 +1763,7 @@ SELECT EXISTS ( assert!(result.get::("exists")); // Should also make a copy of the original document in the object storage. - let documents = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None) + let documents = count_s3_objects(s3_client, S3_DOCS_BUCKET, None) .await .unwrap(); @@ -1830,7 +1830,7 @@ WHERE .await .unwrap(); - let documents = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None) + let documents = count_s3_objects(s3_client, S3_DOCS_BUCKET, None) .await .unwrap(); diff --git a/src/routes/v1/blogs/subscribe/subscribe.rs b/src/routes/v1/blogs/subscribe/subscribe.rs index ac771a7..626611d 100644 --- a/src/routes/v1/blogs/subscribe/subscribe.rs +++ b/src/routes/v1/blogs/subscribe/subscribe.rs @@ -1,7 +1,8 @@ use crate::{ + AppState, amqp::consumers::templated_email::{ - TemplatedEmailMessage, TEMPLATED_EMAIL_QUEUE_NAME, + TemplatedEmailMessage, }, constants::{ email_template::EmailTemplate, @@ -23,14 +24,13 @@ use crate::{ get_cdn_url::get_cdn_url, incr_subscription_limit::incr_subscription_limit, }, - AppState, }; use actix_web::{ + HttpRequest, + HttpResponse, http::StatusCode, post, web, - HttpRequest, - HttpResponse, }; use actix_web_validator::Json; use chrono::{ @@ -38,8 +38,8 @@ use chrono::{ Local, }; use deadpool_lapin::lapin::{ - options::BasicPublishOptions, BasicProperties, + options::BasicPublishOptions, }; use serde::{ Deserialize, @@ -248,10 +248,10 @@ pub fn init_routes(cfg: &mut web::ServiceConfig) { mod tests { use super::*; use crate::test_utils::{ + RedisTestContext, assert_form_error_response, assert_toast_error_response, init_app_for_test, - RedisTestContext, }; use actix_web::test; use sqlx::PgPool; @@ -380,7 +380,6 @@ VALUES ($1, $2) resource_limit::ResourceLimit, }, }; - use futures_util::future; use redis::AsyncCommands; #[test_context(RedisTestContext)] @@ -448,14 +447,13 @@ SELECT EXISTS ( ) -> sqlx::Result<()> { let redis_pool = &ctx.redis_pool; let app = init_app_for_test(post, pool, false, false, None).await.0; - let mut incr_futures = vec![]; for _ in 0..ResourceLimit::CreateReport.get_limit() + 1 { - incr_futures.push(incr_subscription_limit(redis_pool, "8.8.8.8")); + incr_subscription_limit(redis_pool, "8.8.8.8") + .await + .unwrap(); } - future::join_all(incr_futures).await; - let req = test::TestRequest::post() .peer_addr(SocketAddr::from(SocketAddrV4::new( Ipv4Addr::new(8, 8, 8, 8), diff --git a/src/routes/v1/me/assets/delete.rs b/src/routes/v1/me/assets/delete.rs index eecc21d..31bc507 100644 --- a/src/routes/v1/me/assets/delete.rs +++ b/src/routes/v1/me/assets/delete.rs @@ -1,4 +1,5 @@ use crate::{ + AppState, constants::buckets::S3_UPLOADS_BUCKET, error::{ AppError, @@ -6,12 +7,11 @@ use crate::{ }, middlewares::identity::identity::Identity, utils::delete_s3_objects::delete_s3_objects, - AppState, }; use actix_web::{ + HttpResponse, delete, web, - HttpResponse, }; use serde::Deserialize; use sqlx::Row; @@ -100,15 +100,15 @@ pub fn init_routes(cfg: &mut web::ServiceConfig) { mod tests { use super::*; use crate::{ + S3Client, test_utils::{ + TestContext, assert_toast_error_response, count_s3_objects, get_s3_client, init_app_for_test, - TestContext, }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, - S3Client, }; use actix_web::test; use sqlx::PgPool; @@ -169,7 +169,7 @@ mod tests { .unwrap(); // Asset should be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_UPLOADS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_UPLOADS_BUCKET, None) .await .unwrap(); @@ -220,7 +220,7 @@ SELECT EXISTS ( assert!(!result.get::("exists")); // Asset should not be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_UPLOADS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_UPLOADS_BUCKET, None) .await .unwrap(); diff --git a/src/routes/v1/me/blogs/settings/appearance/fonts/delete.rs b/src/routes/v1/me/blogs/settings/appearance/fonts/delete.rs index b65414b..fa0e560 100644 --- a/src/routes/v1/me/blogs/settings/appearance/fonts/delete.rs +++ b/src/routes/v1/me/blogs/settings/appearance/fonts/delete.rs @@ -1,4 +1,5 @@ use crate::{ + AppState, constants::buckets::S3_FONTS_BUCKET, error::{ AppError, @@ -6,12 +7,11 @@ use crate::{ }, middlewares::identity::identity::Identity, utils::delete_s3_objects::delete_s3_objects, - AppState, }; use actix_web::{ + HttpResponse, delete, web, - HttpResponse, }; use serde::Deserialize; use sqlx::Row; @@ -138,16 +138,16 @@ pub fn init_routes(cfg: &mut web::ServiceConfig) { mod tests { use super::*; use crate::{ + S3Client, test_utils::{ + TestContext, assert_response_body_text, assert_toast_error_response, count_s3_objects, get_s3_client, init_app_for_test, - TestContext, }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, - S3Client, }; use actix_web::test; use sqlx::PgPool; @@ -365,7 +365,7 @@ RETURNING id .unwrap(); // Font should be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); @@ -413,7 +413,7 @@ WHERE id = $1 assert!(result.get::, _>("font_primary").is_none()); // Font should not be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); @@ -442,7 +442,7 @@ WHERE id = $1 .unwrap(); // Font should be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); @@ -539,7 +539,7 @@ WHERE id = $1 assert!(result.get::, _>("font_primary").is_none()); // Font should not be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); diff --git a/src/routes/v1/me/blogs/settings/appearance/fonts/post.rs b/src/routes/v1/me/blogs/settings/appearance/fonts/post.rs index 5f9ac8d..783955e 100644 --- a/src/routes/v1/me/blogs/settings/appearance/fonts/post.rs +++ b/src/routes/v1/me/blogs/settings/appearance/fonts/post.rs @@ -904,7 +904,7 @@ WHERE id = $1 assert!(res.status().is_success()); // Font should be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); @@ -924,7 +924,7 @@ WHERE id = $1 assert!(res.status().is_success()); // Should delete the old font. - let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); diff --git a/src/routes/v1/me/blogs/settings/delete_blog.rs b/src/routes/v1/me/blogs/settings/delete_blog.rs index 43198ee..e6d88d5 100644 --- a/src/routes/v1/me/blogs/settings/delete_blog.rs +++ b/src/routes/v1/me/blogs/settings/delete_blog.rs @@ -1,4 +1,5 @@ use crate::{ + AppState, constants::buckets::S3_FONTS_BUCKET, error::{ AppError, @@ -7,13 +8,12 @@ use crate::{ }, middlewares::identity::identity::Identity, utils::delete_s3_objects::delete_s3_objects, - AppState, }; use actix_http::StatusCode; use actix_web::{ + HttpResponse, post, web, - HttpResponse, }; use actix_web_validator::Json; use argon2::{ @@ -170,24 +170,24 @@ pub fn init_routes(cfg: &mut web::ServiceConfig) { mod tests { use super::*; use crate::{ + S3Client, test_utils::{ + TestContext, assert_form_error_response, assert_toast_error_response, count_s3_objects, get_s3_client, init_app_for_test, - TestContext, }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, - S3Client, }; use actix_web::test; use argon2::{ + PasswordHasher, password_hash::{ - rand_core::OsRng, SaltString, + rand_core::OsRng, }, - PasswordHasher, }; use sqlx::{ PgPool, @@ -367,7 +367,7 @@ RETURNING id .unwrap(); // Fonts should be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); @@ -423,7 +423,7 @@ SELECT EXISTS ( assert!(!result.get::("exists")); // Fonts should not be present in the bucket. - let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None, None) + let result = count_s3_objects(&ctx.s3_client, S3_FONTS_BUCKET, None) .await .unwrap(); diff --git a/src/routes/v1/me/flow/onboarding/tags/tags.rs b/src/routes/v1/me/flow/onboarding/tags/tags.rs index ec70fc9..58db37a 100644 --- a/src/routes/v1/me/flow/onboarding/tags/tags.rs +++ b/src/routes/v1/me/flow/onboarding/tags/tags.rs @@ -1,13 +1,13 @@ use crate::{ + AppState, error::AppError, middlewares::identity::identity::Identity, utils::decode_uri_encoded_story_categories::decode_uri_encoded_story_categories, - AppState, }; use actix_web::{ + HttpResponse, get, web, - HttpResponse, }; use actix_web_validator::QsQuery; use serde::{ @@ -57,7 +57,7 @@ async fn get( let categories = decode_uri_encoded_story_categories(&query.encoded_categories).map_err(|error| { - warn!("unable to decode maybe invalid story categories: {error:?}"); + warn!("unable to decode story categories: {error:?}"); AppError::from("Invalid encoded categories") })?; diff --git a/src/routes/v1/me/flow/onboarding/writers/writers.rs b/src/routes/v1/me/flow/onboarding/writers/writers.rs index 13fb10e..1e545a4 100644 --- a/src/routes/v1/me/flow/onboarding/writers/writers.rs +++ b/src/routes/v1/me/flow/onboarding/writers/writers.rs @@ -1,13 +1,13 @@ use crate::{ + AppState, error::AppError, middlewares::identity::identity::Identity, utils::decode_uri_encoded_story_categories::decode_uri_encoded_story_categories, - AppState, }; use actix_web::{ + HttpResponse, get, web, - HttpResponse, }; use actix_web_validator::QsQuery; use serde::{ @@ -57,7 +57,7 @@ async fn get( let categories = decode_uri_encoded_story_categories(&query.encoded_categories).map_err(|error| { - warn!("unable to decode maybe invalid story categories: {error:?}"); + warn!("unable to decode story categories: {error:?}"); AppError::from("Invalid encoded categories") })?; diff --git a/src/routes/v1/me/gallery/post.rs b/src/routes/v1/me/gallery/post.rs index 42c14b1..449499f 100644 --- a/src/routes/v1/me/gallery/post.rs +++ b/src/routes/v1/me/gallery/post.rs @@ -1,4 +1,6 @@ use crate::{ + AppState, + S3Client, constants::{ buckets::S3_UPLOADS_BUCKET, pexels::PEXELS_API_URL, @@ -14,24 +16,22 @@ use crate::{ check_resource_limit::check_resource_limit, incr_resource_limit::incr_resource_limit, }, - AppState, - S3Client, }; use actix_http::StatusCode; use actix_web::{ + HttpResponse, post, web, - HttpResponse, }; use actix_web_validator::Json; use colors_transform::Rgb; use dominant_color::get_colors; use image::{ - imageops::FilterType, EncodableLayout, GenericImageView, ImageError, ImageFormat, + imageops::FilterType, }; use mime::IMAGE_JPEG; use serde::{ @@ -74,14 +74,12 @@ struct Response { /// Returns the URL of the photo image, with custom width and height query parameters. /// -/// # Caution -/// -/// This URL format is not documented by Pexels, but their public API follows this format. -/// -/// * `photo_id` - The ID of the photo resource. -fn get_photo_image_url(photo_id: &str) -> String { +/// * `original_url` - The original URL of the photo resource without query parameters. +fn get_photo_image_url(original_url: &str) -> String { format!( - "https://images.pexels.com/photos/{photo_id}/pexels-photo-{photo_id}.jpeg?auto=compress&cs=tinysrgb&h=2048&w=2048" + "{}{}auto=compress&cs=tinysrgb&h=2048&w=2048", + original_url, + if original_url.contains("?") { "&" } else { "?" } ) } @@ -176,7 +174,7 @@ async fn post( )?; let image_response = reqwest_client - .get(get_photo_image_url(&photo_id.to_string())) + .get(get_photo_image_url(&photo.src.original)) .timeout(Duration::from_secs(30)) // 30 seconds download timeout .send() .await @@ -408,7 +406,10 @@ pub fn init_routes(cfg: &mut web::ServiceConfig) { mod tests { use super::*; use crate::{ + RedisPool, test_utils::{ + RedisTestContext, + TestContext, assert_toast_error_response, exceed_resource_limit, get_redis_pool, @@ -416,11 +417,8 @@ mod tests { get_s3_client, init_app_for_test, res_to_string, - RedisTestContext, - TestContext, }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, - RedisPool, }; use actix_http::StatusCode; use actix_web::test; diff --git a/src/routes/v1/public/reports.rs b/src/routes/v1/public/reports.rs index 5822c8b..6e11f60 100644 --- a/src/routes/v1/public/reports.rs +++ b/src/routes/v1/public/reports.rs @@ -1,4 +1,5 @@ use crate::{ + AppState, constants::report_type::REPORT_TYPE_VEC, error::{ AppError, @@ -10,14 +11,13 @@ use crate::{ check_report_limit::check_report_limit, incr_report_limit::incr_report_limit, }, - AppState, }; use actix_web::{ + HttpRequest, + HttpResponse, http::StatusCode, post, web, - HttpRequest, - HttpResponse, }; use actix_web_validator::Json; use serde::{ @@ -139,13 +139,12 @@ mod tests { resource_limit::ResourceLimit, }, test_utils::{ + RedisTestContext, assert_form_error_response, init_app_for_test, - RedisTestContext, }, }; use actix_web::test; - use futures_util::future; use redis::AsyncCommands; use sqlx::{ PgPool, @@ -283,14 +282,11 @@ WHERE entity_id = $1 let user_id = user_id.unwrap(); let user_id_str = user_id.to_string(); - let mut incr_futures = vec![]; for _ in 0..ResourceLimit::CreateReport.get_limit() + 1 { - incr_futures.push(incr_report_limit(redis_pool, &user_id_str)); + incr_report_limit(redis_pool, &user_id_str).await.unwrap(); } - future::join_all(incr_futures).await; - let req = test::TestRequest::post() .cookie(cookie.unwrap()) .uri("/v1/public/reports") diff --git a/src/test_utils/count_s3_objects.rs b/src/test_utils/count_s3_objects.rs index 82695f7..3bde9d3 100644 --- a/src/test_utils/count_s3_objects.rs +++ b/src/test_utils/count_s3_objects.rs @@ -7,37 +7,34 @@ use async_recursion::async_recursion; /// * `client` - The S3 client instance. /// * `bucket_name` - The name of the target bucket containing the objects. /// * `prefix` - An optional string prefix to match keys and limit the objects. -/// * `continuation_token` - (Private) A token used during recursion if there are more than 1000 -/// keys with the provided prefix. #[async_recursion] pub async fn count_s3_objects( client: &S3Client, bucket_name: &str, prefix: Option, - continuation_token: Option, ) -> anyhow::Result { let mut num_objects: u32 = 0; + let mut continuation_token: Option = None; + + loop { + let response = client + .list_objects_v2() + .bucket(bucket_name) + .max_keys(1000) + .set_prefix(prefix.clone()) + .set_continuation_token(continuation_token.clone()) + .send() + .await + .map_err(|error| error.into_service_error()) + .map_err(|error| anyhow!("unable to list objects: {:?}", error))?; + + num_objects += response.contents().len() as u32; - let list_objects_result = client - .list_objects_v2() - .bucket(bucket_name) - .max_keys(1_000_i32) - .set_prefix(prefix.clone()) - .set_continuation_token(continuation_token) - .send() - .await - .map_err(|error| error.into_service_error()) - .map_err(|error| anyhow!("unable to list objects: {:?}", error))?; - - num_objects += list_objects_result.contents().len() as u32; - - // Recurse until there are no more keys left - if list_objects_result.is_truncated.unwrap_or_default() { - let next_continuation_token = list_objects_result.next_continuation_token.clone(); - drop(list_objects_result); - - num_objects += - count_s3_objects(client, bucket_name, prefix, next_continuation_token).await?; + if response.is_truncated.unwrap_or(false) { + continuation_token = response.next_continuation_token.clone(); + } else { + break; + } } Ok(num_objects) @@ -49,12 +46,11 @@ mod tests { use crate::{ constants::buckets::S3_BASE_BUCKET, test_utils::{ - get_s3_client, TestContext, + get_s3_client, }, utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix, }; - use futures::future; use storiny_macros::test_context; struct LocalTestContext { @@ -99,14 +95,10 @@ mod tests { assert!(result.is_ok()); - let result = count_s3_objects( - s3_client, - S3_BASE_BUCKET, - Some("test-fruits/".to_string()), - None, - ) - .await - .unwrap(); + let result = + count_s3_objects(s3_client, S3_BASE_BUCKET, Some("test-fruits/".to_string())) + .await + .unwrap(); assert_eq!(result, 1_u32); } @@ -115,26 +107,22 @@ mod tests { #[tokio::test] async fn can_count_more_than_1000_objects_with_prefix(ctx: &mut LocalTestContext) { let s3_client = &ctx.s3_client; - let mut put_futures = vec![]; // Insert 1200 objects for i in 0..1_200 { - put_futures.push( - s3_client - .put_object() - .bucket(S3_BASE_BUCKET) - .key(format!("test-integers/{}", i)) - .send(), - ); + s3_client + .put_object() + .bucket(S3_BASE_BUCKET) + .key(format!("test-integers/{}", i)) + .send() + .await + .unwrap(); } - future::join_all(put_futures).await; - let result = count_s3_objects( s3_client, S3_BASE_BUCKET, Some("test-integers/".to_string()), - None, ) .await .unwrap(); @@ -167,14 +155,10 @@ mod tests { assert!(result.is_ok()); - let result = count_s3_objects( - s3_client, - S3_BASE_BUCKET, - Some("test-trees/".to_string()), - None, - ) - .await - .unwrap(); + let result = + count_s3_objects(s3_client, S3_BASE_BUCKET, Some("test-trees/".to_string())) + .await + .unwrap(); assert_eq!(result, 1_u32); } diff --git a/src/test_utils/exceed_resource_limit.rs b/src/test_utils/exceed_resource_limit.rs index fb92d06..cd8cbce 100644 --- a/src/test_utils/exceed_resource_limit.rs +++ b/src/test_utils/exceed_resource_limit.rs @@ -1,9 +1,8 @@ use crate::{ + RedisPool, constants::resource_limit::ResourceLimit, utils::incr_resource_limit::incr_resource_limit, - RedisPool, }; -use futures::future; /// Exceeds the resource limit for the provided resource type and user ID (used only for tests). /// @@ -15,13 +14,11 @@ pub async fn exceed_resource_limit( resource_limit: ResourceLimit, user_id: i64, ) { - let mut incr_futures = vec![]; - for _ in 0..resource_limit.get_limit() + 1 { - incr_futures.push(incr_resource_limit(redis_pool, resource_limit, user_id)); + incr_resource_limit(redis_pool, resource_limit, user_id) + .await + .unwrap(); } - - future::join_all(incr_futures).await; } #[cfg(test)] diff --git a/src/test_utils/exceed_resource_lock_attempts.rs b/src/test_utils/exceed_resource_lock_attempts.rs index a477a98..cb1c631 100644 --- a/src/test_utils/exceed_resource_lock_attempts.rs +++ b/src/test_utils/exceed_resource_lock_attempts.rs @@ -1,9 +1,8 @@ use crate::{ + RedisPool, constants::resource_lock::ResourceLock, utils::incr_resource_lock_attempts::incr_resource_lock_attempts, - RedisPool, }; -use futures::future; /// Exceeds the attempts for a locked resource. /// @@ -15,17 +14,11 @@ pub async fn exceed_resource_lock_attempts( resource_lock: ResourceLock, identifier: &str, ) { - let mut incr_futures = vec![]; - for _ in 0..resource_lock.get_max_attempts() { - incr_futures.push(incr_resource_lock_attempts( - redis_pool, - resource_lock, - identifier, - )); + incr_resource_lock_attempts(redis_pool, resource_lock, identifier) + .await + .unwrap(); } - - future::join_all(incr_futures).await; } #[cfg(test)] diff --git a/src/utils/check_report_limit.rs b/src/utils/check_report_limit.rs index 7f57249..a8b3f41 100644 --- a/src/utils/check_report_limit.rs +++ b/src/utils/check_report_limit.rs @@ -1,9 +1,9 @@ use crate::{ + RedisPool, constants::{ redis_namespaces::RedisNamespace, resource_limit::ResourceLimit, }, - RedisPool, }; use anyhow::anyhow; use redis::AsyncCommands; @@ -39,7 +39,6 @@ mod tests { test_utils::RedisTestContext, utils::incr_report_limit::incr_report_limit, }; - use futures::future; use storiny_macros::test_context; mod serial { @@ -71,16 +70,13 @@ mod tests { #[tokio::test] async fn can_return_false_when_exceeding_a_report_limit(ctx: &mut RedisTestContext) { let redis_pool = &ctx.redis_pool; - let mut incr_futures = vec![]; // Exceed the report limit. Do not use [crate::test_utils::exceed_report_limit] as // it depends on [check_report_limit]. for _ in 0..ResourceLimit::CreateReport.get_limit() + 1 { - incr_futures.push(incr_report_limit(redis_pool, "::1")); + incr_report_limit(redis_pool, "::1").await.unwrap(); } - future::join_all(incr_futures).await; - let result = check_report_limit(redis_pool, "::1").await.unwrap(); assert!(!result); diff --git a/src/utils/check_resource_limit.rs b/src/utils/check_resource_limit.rs index 9ac5429..bb81ed7 100644 --- a/src/utils/check_resource_limit.rs +++ b/src/utils/check_resource_limit.rs @@ -1,9 +1,9 @@ use crate::{ + RedisPool, constants::{ redis_namespaces::RedisNamespace, resource_limit::ResourceLimit, }, - RedisPool, }; use anyhow::anyhow; use redis::AsyncCommands; @@ -44,7 +44,6 @@ mod tests { test_utils::RedisTestContext, utils::incr_resource_limit::incr_resource_limit, }; - use futures::future; use storiny_macros::test_context; mod serial { @@ -83,20 +82,15 @@ mod tests { #[tokio::test] async fn can_return_false_when_exceeding_a_resource_limit(ctx: &mut RedisTestContext) { let redis_pool = &ctx.redis_pool; - let mut incr_futures = vec![]; // Exceed the resource limit. Do not use [crate::test_utils::exceed_resource_limit] as // it depends on [check_resource_limit]. for _ in 0..ResourceLimit::CreateStory.get_limit() + 1 { - incr_futures.push(incr_resource_limit( - redis_pool, - ResourceLimit::CreateStory, - 1_i64, - )); + incr_resource_limit(redis_pool, ResourceLimit::CreateStory, 1_i64) + .await + .unwrap(); } - future::join_all(incr_futures).await; - let result = check_resource_limit(redis_pool, ResourceLimit::CreateStory, 1_i64) .await .unwrap(); diff --git a/src/utils/check_subscription_limit.rs b/src/utils/check_subscription_limit.rs index ad96c47..77d43d7 100644 --- a/src/utils/check_subscription_limit.rs +++ b/src/utils/check_subscription_limit.rs @@ -1,9 +1,9 @@ use crate::{ + RedisPool, constants::{ redis_namespaces::RedisNamespace, resource_limit::ResourceLimit, }, - RedisPool, }; use anyhow::anyhow; use redis::AsyncCommands; @@ -39,7 +39,6 @@ mod tests { test_utils::RedisTestContext, utils::incr_subscription_limit::incr_subscription_limit, }; - use futures::future; use storiny_macros::test_context; mod serial { @@ -71,17 +70,14 @@ mod tests { #[tokio::test] async fn can_return_false_when_exceeding_a_subscription_limit(ctx: &mut RedisTestContext) { let redis_pool = &ctx.redis_pool; - let mut incr_futures = vec![]; // Exceed the subscription limit. Do not use // [crate::test_utils::exceed_subscription_limit] as it depends on // [check_subscription_limit]. for _ in 0..ResourceLimit::SubscribeUnregistered.get_limit() + 1 { - incr_futures.push(incr_subscription_limit(redis_pool, "::1")); + incr_subscription_limit(redis_pool, "::1").await.unwrap(); } - future::join_all(incr_futures).await; - let result = check_subscription_limit(redis_pool, "::1").await.unwrap(); assert!(!result); diff --git a/src/utils/delete_s3_objects_using_prefix.rs b/src/utils/delete_s3_objects_using_prefix.rs index 0cf7ce2..0fe67b5 100644 --- a/src/utils/delete_s3_objects_using_prefix.rs +++ b/src/utils/delete_s3_objects_using_prefix.rs @@ -67,7 +67,6 @@ mod tests { }, }; use aws_sdk_s3::operation::get_object::GetObjectError; - use futures::future; use storiny_macros::test_context; struct LocalTestContext { @@ -139,21 +138,18 @@ mod tests { #[tokio::test] async fn can_delete_more_than_1000_objects_with_prefix(ctx: &mut LocalTestContext) { let s3_client = &ctx.s3_client; - let mut put_futures = vec![]; // Insert 1200 objects for i in 0..1_200 { - put_futures.push( - s3_client - .put_object() - .bucket(S3_BASE_BUCKET) - .key(format!("test-integers/{}", i)) - .send(), - ); + s3_client + .put_object() + .bucket(S3_BASE_BUCKET) + .key(format!("test-integers/{}", i)) + .send() + .await + .unwrap(); } - future::join_all(put_futures).await; - let result = delete_s3_objects_using_prefix( s3_client, S3_BASE_BUCKET, diff --git a/src/utils/is_resource_locked.rs b/src/utils/is_resource_locked.rs index d389cae..0ed4741 100644 --- a/src/utils/is_resource_locked.rs +++ b/src/utils/is_resource_locked.rs @@ -1,6 +1,6 @@ use crate::{ - constants::resource_lock::ResourceLock, RedisPool, + constants::resource_lock::ResourceLock, }; use anyhow::anyhow; use redis::AsyncCommands; @@ -34,7 +34,6 @@ pub async fn is_resource_locked( mod tests { use super::*; use crate::test_utils::RedisTestContext; - use futures::future; use storiny_macros::test_context; mod serial { @@ -73,19 +72,14 @@ mod tests { #[tokio::test] async fn can_return_true_for_a_locked_resource(ctx: &mut RedisTestContext) { let redis_pool = &ctx.redis_pool; - let mut incr_futures = vec![]; // Exceed the attempts. for _ in 0..ResourceLock::Signup.get_max_attempts() + 1 { - incr_futures.push(incr_resource_lock_attempts( - redis_pool, - ResourceLock::Signup, - "::1", - )); + incr_resource_lock_attempts(redis_pool, ResourceLock::Signup, "::1") + .await + .unwrap(); } - future::join_all(incr_futures).await; - let result = is_resource_locked(redis_pool, ResourceLock::Signup, "::1") .await .unwrap(); diff --git a/src/utils/redis_async_transaction.rs b/src/utils/redis_async_transaction.rs index 927d99a..2d64061 100644 --- a/src/utils/redis_async_transaction.rs +++ b/src/utils/redis_async_transaction.rs @@ -7,10 +7,10 @@ macro_rules! async_transaction { ($conn:expr, $keys:expr, $body:expr) => { loop { - redis::cmd("WATCH").arg($keys).query_async($conn).await?; + let _: () = redis::cmd("WATCH").arg($keys).query_async($conn).await?; if let Some(response) = $body { - redis::cmd("UNWATCH").query_async($conn).await?; + let _: () = redis::cmd("UNWATCH").query_async($conn).await?; break response; } } diff --git a/src/utils/reset_resource_lock.rs b/src/utils/reset_resource_lock.rs index 6d535e7..51d3b69 100644 --- a/src/utils/reset_resource_lock.rs +++ b/src/utils/reset_resource_lock.rs @@ -19,7 +19,8 @@ pub async fn reset_resource_lock( anyhow!("unable to acquire a connection from the Redis pool: {error:?}") })?; - conn.del(format!("{}:{identifier}", resource_lock)) + let _: () = conn + .del(format!("{}:{identifier}", resource_lock)) .await .map_err(|error| anyhow!("unable to delete the record from Redis: {error:?}"))?;