Skip to content
This repository was archived by the owner on Jan 31, 2026. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Binary file modified geo/db/GeoLite2-City.mmdb
Binary file not shown.
3 changes: 3 additions & 0 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,6 @@ udeps:

update_geo:
docker compose -f geo/docker-compose.yaml up

migrate:
sqlx migrate run
38 changes: 19 additions & 19 deletions queries/me/onboarding/writers.sql
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
SELECT u.id,
u.name,
u.username,
u.avatar_id,
u.avatar_hex,
u.public_flags,
u.rendered_bio AS "rendered_bio!"
FROM
users u
-- Join stories
INNER JOIN stories AS "u->story"
ON "u->story".user_id = u.id
AND "u->story".deleted_at IS NULL
AND "u->story".published_at IS NOT NULL
WHERE
"u->story".category::TEXT = ANY ($1)
ORDER BY
u.follower_count DESC
LIMIT 25
SELECT id, name, username, avatar_id, avatar_hex, public_flags, rendered_bio AS "rendered_bio!"
FROM (SELECT DISTINCT u.id,
u.name,
u.username,
u.avatar_id,
u.avatar_hex,
u.public_flags,
u.rendered_bio,
u.follower_count
FROM users u
-- Join stories
INNER JOIN stories AS "u->story"
ON "u->story".user_id = u.id
AND "u->story".deleted_at IS NULL
AND "u->story".published_at IS NOT NULL
WHERE "u->story".category::TEXT = ANY ($1)
ORDER BY
u.follower_count DESC
LIMIT 25)
34 changes: 19 additions & 15 deletions session/src/storage/redis_rs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ use anyhow::{
Error,
};
use redis::{
aio::ConnectionManager,
AsyncCommands,
Cmd,
FromRedisValue,
RedisResult,
Value,
aio::ConnectionManager,
};
use std::{
convert::TryInto,
Expand All @@ -19,14 +19,14 @@ use std::{

use super::SessionKey;
use crate::storage::{
SessionStore,
interface::{
LoadError,
SaveError,
SessionState,
UpdateError,
},
utils::generate_session_key,
SessionStore,
};

/// Use Redis as session storage backend.
Expand Down Expand Up @@ -146,17 +146,18 @@ impl SessionStore for RedisSessionStore {
let session_key = generate_session_key(user_id.map(|value| value.to_string()));
let cache_key = (self.configuration.cache_keygen)(session_key.as_ref());

self.execute_command(
redis::cmd("SET")
.arg(&cache_key)
.arg(&body)
.arg("NX") // NX: only set the key if it does not already exist
.arg("EX") // EX: set expiry
.arg(&format!("{}", ttl.whole_seconds())),
)
.await
.map_err(Into::into)
.map_err(SaveError::Other)?;
let _: () = self
.execute_command(
redis::cmd("SET")
.arg(&cache_key)
.arg(&body)
.arg("NX") // NX: only set the key if it does not already exist
.arg("EX") // EX: set expiry
.arg(&format!("{}", ttl.whole_seconds())),
)
.await
.map_err(Into::into)
.map_err(SaveError::Other)?;

Ok(session_key)
}
Expand Down Expand Up @@ -209,7 +210,8 @@ impl SessionStore for RedisSessionStore {
async fn update_ttl(&self, session_key: &SessionKey, ttl: &Duration) -> Result<(), Error> {
let cache_key = (self.configuration.cache_keygen)(session_key.as_ref());

self.client
let _: () = self
.client
.clone()
.expire(
&cache_key,
Expand All @@ -218,12 +220,14 @@ impl SessionStore for RedisSessionStore {
)?,
)
.await?;

Ok(())
}

async fn delete(&self, session_key: &SessionKey) -> Result<(), anyhow::Error> {
let cache_key = (self.configuration.cache_keygen)(session_key.as_ref());
self.execute_command(redis::cmd("DEL").arg(&[&cache_key]))
let _: () = self
.execute_command(redis::cmd("DEL").arg(&[&cache_key]))
.await
.map_err(Into::into)
.map_err(UpdateError::Other)?;
Expand Down
26 changes: 11 additions & 15 deletions src/cron/cleanup_blogs/cleanup_blogs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,6 @@ mod tests {
},
utils::delete_s3_objects_using_prefix::delete_s3_objects_using_prefix,
};
use futures::future;
use nanoid::nanoid;
use sqlx::PgPool;
use storiny_macros::test_context;
Expand Down Expand Up @@ -272,26 +271,23 @@ SELECT
assert_eq!(result.rows_affected(), count as u64);

// Upload empty objects to S3.
let mut put_futures = vec![];
let mut object_keys = vec![];

object_keys.append(&mut primary_font_keys);
object_keys.append(&mut secondary_font_keys);
object_keys.append(&mut code_font_keys);

for key in object_keys {
put_futures.push(
s3_client
.put_object()
.bucket(S3_FONTS_BUCKET)
.key(key.to_string())
.send(),
);
s3_client
.put_object()
.bucket(S3_FONTS_BUCKET)
.key(key.to_string())
.send()
.await
.unwrap();
}

future::join_all(put_futures).await;

let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -325,7 +321,7 @@ SELECT
assert!(result.is_empty());

// Objects should not be present in the bucket.
let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -358,7 +354,7 @@ SELECT
assert!(result.is_empty());

// Objects should not be present in the bucket.
let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -408,7 +404,7 @@ WHERE id = (SELECT id FROM selected_blog)
assert_eq!(result.len(), 1);

// Objects should still be present in the bucket.
let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_FONTS_BUCKET, None)
.await
.unwrap();

Expand Down
51 changes: 21 additions & 30 deletions src/cron/cleanup_s3/cleanup_s3.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,22 +334,17 @@ SELECT UNNEST($1::UUID[]), $2, $3, $4

assert_eq!(result.rows_affected(), object_keys.len() as u64);

// Upload empty objects to S3.
let mut put_futures = vec![];

for key in object_keys {
put_futures.push(
s3_client
.put_object()
.bucket(S3_UPLOADS_BUCKET)
.key(key.to_string())
.send(),
);
s3_client
.put_object()
.bucket(S3_UPLOADS_BUCKET)
.key(key.to_string())
.send()
.await
.unwrap();
}

future::join_all(put_futures).await;

let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -378,21 +373,17 @@ SELECT UNNEST($1::UUID[])
assert_eq!(result.rows_affected(), object_keys.len() as u64);

// Upload empty objects to S3.
let mut put_futures = vec![];

for key in object_keys {
put_futures.push(
s3_client
.put_object()
.bucket(S3_DOCS_BUCKET)
.key(key.to_string())
.send(),
);
s3_client
.put_object()
.bucket(S3_DOCS_BUCKET)
.key(key.to_string())
.send()
.await
.unwrap();
}

future::join_all(put_futures).await;

let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -428,7 +419,7 @@ SELECT UNNEST($1::UUID[])
assert!(result.is_empty());

// Objects should not be present in the bucket.
let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -461,7 +452,7 @@ SELECT UNNEST($1::UUID[])
assert!(result.is_empty());

// Objects should not be present in the bucket.
let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -511,7 +502,7 @@ WHERE id = (SELECT id FROM selected_asset)
assert_eq!(result.len(), 1);

// Object should still be present in the bucket.
let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_UPLOADS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -546,7 +537,7 @@ WHERE id = (SELECT id FROM selected_asset)
assert!(result.is_empty());

// Objects should not be present in the bucket.
let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None)
.await
.unwrap();

Expand Down Expand Up @@ -579,7 +570,7 @@ WHERE id = (SELECT id FROM selected_asset)
assert!(result.is_empty());

// Objects should not be present in the bucket.
let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None, None)
let object_count = count_s3_objects(s3_client, S3_DOCS_BUCKET, None)
.await
.unwrap();

Expand Down
5 changes: 2 additions & 3 deletions src/cron/sitemap/presets.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use crate::{
S3Client,
constants::buckets::S3_SITEMAPS_BUCKET,
cron::sitemap::GenerateSitemapResponse,
utils::deflate_bytes_gzip::deflate_bytes_gzip,
S3Client,
};
use apalis::prelude::Error;
use sitemap_rs::{
Expand Down Expand Up @@ -103,9 +103,9 @@ mod tests {
use crate::{
config::get_app_config,
test_utils::{
TestContext,
count_s3_objects,
get_s3_client,
TestContext,
},
};
use storiny_macros::test_context;
Expand Down Expand Up @@ -151,7 +151,6 @@ mod tests {
s3_client,
S3_SITEMAPS_BUCKET,
Some("presets.xml".to_string()),
None,
)
.await
.unwrap();
Expand Down
Loading
Loading