Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 17 additions & 5 deletions client/pyroclient/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def update_last_image(self, media: bytes) -> Response:
return requests.patch(
urljoin(self._route_prefix, ClientRoute.CAMERAS_IMAGE),
headers=self.headers,
files={"file": ("logo.png", media, "image/png")},
files={"file": ("logo.jpg", media, "image/jpeg")},
timeout=self.timeout,
)

Expand Down Expand Up @@ -245,7 +245,7 @@ def update_pose_image(self, pose_id: int, media: bytes) -> Response:
return requests.patch(
urljoin(self._route_prefix, ClientRoute.POSES_IMAGE.format(pose_id=pose_id)),
headers=self.headers,
files={"file": ("image.png", media, "image/png")},
files={"file": ("image.jpg", media, "image/jpeg")},
timeout=self.timeout,
)

Expand Down Expand Up @@ -344,6 +344,7 @@ def create_detection(
media: bytes,
bboxes: List[Tuple[float, float, float, float, float]],
pose_id: int,
crop: bytes | None = None,
) -> Response:
"""Notify the detection of a wildfire on the picture taken by a camera.

Expand All @@ -356,6 +357,7 @@ def create_detection(
media: byte data of the picture
bboxes: list of tuples where each tuple is a relative coordinate in order xmin, ymin, xmax, ymax, conf
pose_id: pose_id of the detection
crop: optional byte data of a cropped picture associated with the detection

Returns:
HTTP response
Expand All @@ -366,12 +368,15 @@ def create_detection(
"bboxes": _dump_bbox_to_json(bboxes),
}
data["pose_id"] = str(pose_id)
files: Dict[str, Tuple[str, bytes, str]] = {"file": ("frame.jpg", media, "image/jpeg")}
if crop is not None:
files["crop"] = ("crop.jpg", crop, "image/jpeg")
return requests.post(
urljoin(self._route_prefix, ClientRoute.DETECTIONS_CREATE),
headers=self.headers,
data=data,
timeout=self.timeout,
files={"file": ("logo.png", media, "image/png")},
files=files,
)

def get_detection_url(self, detection_id: int) -> Response:
Expand Down Expand Up @@ -469,7 +474,13 @@ def fetch_latest_sequences(self) -> Response:
timeout=self.timeout,
)

def fetch_sequences_detections(self, sequence_id: int, limit: int = 10, desc: bool = True) -> Response:
def fetch_sequences_detections(
self,
sequence_id: int,
limit: int = 10,
desc: bool = True,
with_crop: bool = True,
) -> Response:
"""List the detections of a sequence

>>> from pyroclient import client
Expand All @@ -480,14 +491,15 @@ def fetch_sequences_detections(self, sequence_id: int, limit: int = 10, desc: bo
sequence_id: ID of the associated sequence entry
limit: maximum number of detections to fetch
desc: whether to order the detections by created_at in descending order
with_crop: whether to include the crop_url for detections that have a crop

Returns:
HTTP response
"""
return requests.get(
urljoin(self._route_prefix, ClientRoute.SEQUENCES_FETCH_DETECTIONS.format(seq_id=sequence_id)),
headers=self.headers,
params={"limit": limit, "desc": desc},
params={"limit": limit, "desc": desc, "with_crop": with_crop},
timeout=self.timeout,
)

Expand Down
24 changes: 14 additions & 10 deletions src/app/api/api_v1/endpoints/detections.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,7 @@ async def create_detection(
),
pose_id: int = Form(..., gt=0, description="pose id of the detection"),
file: UploadFile = File(..., alias="file"),
crop_file: Optional[UploadFile] = File(None, alias="crop"),
detections: DetectionCRUD = Depends(get_detection_crud),
webhooks: WebhookCRUD = Depends(get_webhook_crud),
organizations: OrganizationCRUD = Depends(get_organization_crud),
Expand All @@ -371,8 +372,7 @@ async def create_detection(
detail="xmin & ymin are expected to be respectively smaller than xmax & ymax",
)

# Upload media
bucket_key = await upload_file(file, token_payload.organization_id, token_payload.sub)
# Authorize before any S3 upload to avoid orphan objects on 403
pose = cast(Pose, await poses.get(pose_id, strict=True))
if pose.camera_id != token_payload.sub:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Access forbidden.")
Expand All @@ -381,6 +381,14 @@ async def create_detection(
if not bbox_strings:
raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Invalid bbox format.")

# Upload media
bucket_key = await upload_file(file, token_payload.organization_id, token_payload.sub)
crop_bucket_key: Optional[str] = None
if crop_file is not None:
crop_bucket_key = await upload_file(
crop_file, token_payload.organization_id, token_payload.sub, key_prefix="crop_"
)

created: List[Detection] = []
camera = cast(Camera, await cameras.get(token_payload.sub, strict=True))

Expand All @@ -393,6 +401,7 @@ async def create_detection(
camera_id=token_payload.sub,
pose_id=pose_id,
bucket_key=bucket_key,
crop_bucket_key=crop_bucket_key,
bbox=single_bboxes,
others_bboxes=others_bboxes,
)
Expand Down Expand Up @@ -529,17 +538,12 @@ async def get_detection_url(
# Check in DB
detection = cast(Detection, await detections.get(detection_id, strict=True))

if UserRole.ADMIN in token_payload.scopes:
camera = cast(Camera, await cameras.get(detection.camera_id, strict=True))
bucket = s3_service.get_bucket(s3_service.resolve_bucket_name(camera.organization_id))
return DetectionUrl(url=bucket.get_public_url(detection.bucket_key))

camera = cast(Camera, await cameras.get(detection.camera_id, strict=True))
if token_payload.organization_id != camera.organization_id:
if UserRole.ADMIN not in token_payload.scopes and token_payload.organization_id != camera.organization_id:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Access forbidden.")
# Check in bucket
bucket = s3_service.get_bucket(s3_service.resolve_bucket_name(camera.organization_id))
return DetectionUrl(url=bucket.get_public_url(detection.bucket_key))
crop_url = bucket.get_public_url(detection.crop_bucket_key) if detection.crop_bucket_key else None
return DetectionUrl(url=bucket.get_public_url(detection.bucket_key), crop_url=crop_url)


@router.get("/", status_code=status.HTTP_200_OK, summary="Fetch all the detections")
Expand Down
18 changes: 12 additions & 6 deletions src/app/api/api_v1/endpoints/sequences.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,10 @@ async def fetch_sequence_detections(
sequence_id: int = Path(..., gt=0),
limit: int = Query(10, description="Maximum number of detections to fetch", ge=1, le=100),
desc: bool = Query(True, description="Whether to order the detections by created_at in descending order"),
with_crop: bool = Query(
True,
description="If true, presign and include crop_url for detections that have a crop. Set to false to skip the extra S3 head requests when crops are not needed.",
),
cameras: CameraCRUD = Depends(get_camera_crud),
detections: DetectionCRUD = Depends(get_detection_crud),
sequences: SequenceCRUD = Depends(get_sequence_crud),
Expand All @@ -118,17 +122,19 @@ async def fetch_sequence_detections(

# Get the bucket of the camera's organization
bucket = s3_service.get_bucket(s3_service.resolve_bucket_name(camera.organization_id))
fetched = await detections.fetch_all(
filters=("sequence_id", sequence_id),
order_by="created_at",
order_desc=desc,
limit=limit,
)
return [
DetectionWithUrl(
**DetectionRead(**elt.model_dump()).model_dump(),
url=bucket.get_public_url(elt.bucket_key),
crop_url=(bucket.get_public_url(elt.crop_bucket_key) if with_crop and elt.crop_bucket_key else None),
)
for elt in await detections.fetch_all(
filters=("sequence_id", sequence_id),
order_by="created_at",
order_desc=desc,
limit=limit,
)
for elt in fetched
]


Expand Down
1 change: 1 addition & 0 deletions src/app/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ class Detection(SQLModel, table=True):
pose_id: int = Field(..., foreign_key="poses.id", nullable=False)
sequence_id: Union[int, None] = Field(None, foreign_key="sequences.id", nullable=True)
bucket_key: str
crop_bucket_key: Union[str, None] = Field(default=None, nullable=True)
bbox: str = Field(..., min_length=2, max_length=settings.MAX_BBOX_STR_LENGTH_SINGLE, nullable=False)
others_bboxes: Union[str, None] = Field(default=None, max_length=settings.MAX_BBOX_STR_LENGTH_OTHERS, nullable=True)
created_at: datetime = Field(default_factory=datetime.utcnow, nullable=False)
Expand Down
3 changes: 3 additions & 0 deletions src/app/schemas/detections.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ class DetectionCreate(BaseModel):
camera_id: int = Field(..., gt=0)
pose_id: int = Field(..., gt=0)
bucket_key: str
crop_bucket_key: Optional[str] = None
bbox: str = Field(
...,
min_length=2,
Expand All @@ -41,6 +42,7 @@ class DetectionCreate(BaseModel):

class DetectionUrl(BaseModel):
url: str = Field(..., description="temporary URL to access the media content")
crop_url: Optional[str] = Field(None, description="temporary URL to access the cropped media content, if any")


class DetectionRead(Detection):
Expand All @@ -49,6 +51,7 @@ class DetectionRead(Detection):

class DetectionWithUrl(Detection):
url: str = Field(..., description="temporary URL to access the media content")
crop_url: Optional[str] = Field(None, description="temporary URL to access the cropped media content, if any")


class DetectionSequence(BaseModel):
Expand Down
7 changes: 4 additions & 3 deletions src/app/services/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def resolve_bucket_name(organization_id: int) -> str:
return f"{settings.SERVER_NAME}-alert-api-{organization_id!s}"


async def upload_file(file: UploadFile, organization_id: int, camera_id: int) -> str:
async def upload_file(file: UploadFile, organization_id: int, camera_id: int, key_prefix: str = "") -> str:
"""Upload a file to S3 storage and return the public URL"""
# Concatenate the first 8 chars (to avoid system interactions issues) of SHA256 hash with file extension
sha_hash = hashlib.sha256(file.file.read()).hexdigest()
Expand All @@ -165,8 +165,9 @@ async def upload_file(file: UploadFile, organization_id: int, camera_id: int) ->
await file.seek(0)
# guess_extension will return none if this fails
extension = guess_extension(magic.from_buffer(file.file.read(), mime=True)) or ""
# Concatenate timestamp & hash
bucket_key = f"{camera_id}-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}-{sha_hash[:8]}{extension}"
# Concatenate timestamp & hash; key_prefix lets callers segregate distinct uploads in the
# same request (e.g. frame vs crop) so identical bytes don't collide on the same key.
bucket_key = f"{key_prefix}{camera_id}-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}-{sha_hash[:8]}{extension}"
# Reset byte position of the file (cf. https://fastapi.tiangolo.com/tutorial/request-files/#uploadfile)
await file.seek(0)
bucket_name = s3_service.resolve_bucket_name(organization_id)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
"""Add crop_bucket_key to detections

Check notice on line 1 in src/migrations/versions/2026_04_26_1200-7f1c4d2a9b3e_add_crop_bucket_key_to_detections.py

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/migrations/versions/2026_04_26_1200-7f1c4d2a9b3e_add_crop_bucket_key_to_detections.py#L1

First line should end with a period, question mark, or exclamation point (not 's') (D415)

Check notice on line 1 in src/migrations/versions/2026_04_26_1200-7f1c4d2a9b3e_add_crop_bucket_key_to_detections.py

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/migrations/versions/2026_04_26_1200-7f1c4d2a9b3e_add_crop_bucket_key_to_detections.py#L1

Multi-line docstring summary should start at the second line (D213)

Revision ID: 7f1c4d2a9b3e
Revises: 9700bbccb2f1
Create Date: 2026-04-26 12:00:00.000000

"""

from typing import Sequence, Union

import sqlalchemy as sa
import sqlmodel
from alembic import op

# revision identifiers, used by Alembic.
revision: str = "7f1c4d2a9b3e"
down_revision: Union[str, None] = "9700bbccb2f1"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None


def upgrade() -> None:
op.add_column("detections", sa.Column("crop_bucket_key", sqlmodel.sql.sqltypes.AutoString(), nullable=True))


def downgrade() -> None:
op.drop_column("detections", "crop_bucket_key")
4 changes: 4 additions & 0 deletions src/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@
"pose_id": 1,
"sequence_id": 1,
"bucket_key": "my_file",
"crop_bucket_key": None,
"bbox": "[(.1,.1,.7,.8,.9)]",
"others_bboxes": None,
"created_at": datetime.strptime("2023-11-07T15:08:19.226673", dt_format),
Expand All @@ -159,6 +160,7 @@
"pose_id": 1,
"sequence_id": 1,
"bucket_key": "my_file",
"crop_bucket_key": None,
"bbox": "[(.1,.1,.7,.8,.9)]",
"others_bboxes": None,
"created_at": datetime.strptime("2023-11-07T15:18:19.226673", dt_format),
Expand All @@ -169,6 +171,7 @@
"pose_id": 1,
"sequence_id": 1,
"bucket_key": "my_file",
"crop_bucket_key": None,
"bbox": "[(.1,.1,.7,.8,.9)]",
"others_bboxes": None,
"created_at": datetime.strptime("2023-11-07T15:28:19.226673", dt_format),
Expand All @@ -179,6 +182,7 @@
"pose_id": 3,
"sequence_id": 2,
"bucket_key": "my_file",
"crop_bucket_key": None,
"bbox": "[(.1,.1,.7,.8,.9)]",
"others_bboxes": None,
"created_at": datetime.strptime("2023-11-07T16:08:19.226673", dt_format),
Expand Down
Loading
Loading