Skip to content

Commit

Permalink
re-initialize TODOs with new action
Browse files Browse the repository at this point in the history
  • Loading branch information
GondekNP committed Dec 5, 2024
1 parent 6eb396a commit 40ec289
Show file tree
Hide file tree
Showing 13 changed files with 628 additions and 635 deletions.
2 changes: 1 addition & 1 deletion .deployment/tofu/modules/titiler/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ resource "google_artifact_registry_repository" "burn-backend" {
location = "us-central1"
}

## TODO: Both this and the burn-backend service should either be require authenticated
## TODO(!feat): Both this and the burn-backend service should either be require authenticated
## invocations or at least have a rate limit

# Allow unauthenticated invocations
Expand Down
11 changes: 5 additions & 6 deletions src/burn_backend/lib/derive_boundary.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def derive_boundary(
pipeline,
):

## TODO: Some part of the spectral index process is creating a buffer of NaN
## TODO(!smelly): Some part of the spectral index process is creating a buffer of NaN
## at the outside edge of the metric layer - not an issue to replace with 0 in this case
## but zeros inside the image will be erroneously identified as unburned islands which is
## a big problem.
Expand All @@ -279,11 +279,10 @@ def derive_boundary(
interior_nan_filled, metric_values_exist_binary
)

# TODO: This is a new issue that should be picked up by the github action todo-to-issue
# Test test, hello world

# TODO(!urgent): This is a new issue that should be picked up by the github action todo-to-issue and its real serious!
# Issue URL: https://github.com/SchmidtDSE/burn-severity-mapping-poc/issues/48
# TODO (!feat): Improve (and investigate) handling of internal NaNs within derived boundary
# Internal NaNs are a problem because they may be interpreted as unburned islands, unless we handle
# them directly. So far so good, it appears we only get these at the edges of the boundary where we
# may have interpolation issues, so this is very conservative, but a little hacky.

if no_interior_nan_detected:
# In this case, we aren't missing interior unburned islands, but we still want the original
Expand Down
10 changes: 5 additions & 5 deletions src/burn_backend/lib/query_sentinel.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def __init__(
self.crs = crs
self.buffer = buffer

# TODO [#17]: Settle on standards for storing polygons
# TODO(!feat): Settle on standards for storing polygons
# Oscillating between geojsons and geopandas dataframes, which is a bit messy. Should pick one and stick with it.
self.geojson_boundary = None
self.bbox = None
Expand All @@ -99,7 +99,7 @@ def set_boundary(self, geojson_boundary):
None
"""
boundary_gpd = gpd.GeoDataFrame.from_features(geojson_boundary)
# TODO [#7]: Generalize Sentinel2Client to accept any CRS
# TODO(!feat): Generalize Sentinel2Client to accept any CRS
# This is hard-coded to assume 4326 - when we draw an AOI, we will change this logic depending on what makes frontend sense
if not boundary_gpd.crs:
geojson_boundary = boundary_gpd.set_crs("EPSG:4326")
Expand Down Expand Up @@ -147,7 +147,7 @@ def get_items(self, date_range, from_bbox=True, max_items=None):
"""
date_range_fmt = "{}/{}".format(date_range[0], date_range[1])

# TODO [#14]: Cloud cover response to smoke
# TODO(!feat): Cloud cover response to smoke
# Right now we don't give any mind to smoke occlusion, but we should considering we will have bias if smoke occludes our imagery

query = {
Expand Down Expand Up @@ -266,7 +266,7 @@ def reduce_time_range(self, range_stack):
xarray.DataArray: The reduced range stack.
"""

# TODO [#30]: Think about best practice for reducing time dimension pre/post fire
# TODO(!feat): Think about best practice for reducing time dimension pre/post fire
# This will probably get a bit more sophisticated, but for now, just take the median
# We will probably run into issues of cloud occlusion, and for really long fire events,
# we might want to look into time-series effects of greenup, drying, etc, in the adjacent
Expand Down Expand Up @@ -426,7 +426,7 @@ def derive_boundary_flood_fill(

seed_indices = list(zip(*np.where(metric_layer["seed"].values[0, :, :])))

## TODO: Seed indices are essentially required at the moment, but this is an artifact
## TODO(!smelly): Seed indices are essentially required at the moment, but this is an artifact
## of flood fill segmentation, so this Pipeline should be more flexible in the future.
pipeline = Pipeline(
thresholding_strategy=OtsuThreshold(),
Expand Down
2 changes: 1 addition & 1 deletion src/burn_backend/routers/analyze/spectral_burn_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class AnaylzeBurnPOSTBody(BaseModel):
final: bool = True


# TODO [#5]: Decide on / implement cloud tasks or other async batch
# TODO(!feat): Decide on / implement cloud tasks or other async batch
# This is a long running process, and users probably don't mind getting an email notification
# or something similar when the process is complete. Esp if the frontend remanins static.
@router.post(
Expand Down
1 change: 0 additions & 1 deletion src/burn_backend/routers/batch/batch_analyze_and_fetch.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@ def main(
}
geojson_name = "drawn_aoi_boundary" if derive_boundary else "boundary"

## TODO [#34]: Should probably define a class for batch analysis and fetch
job_status = {
"submitted": str(submission_time),
"fire_event_name": fire_event_name,
Expand Down
18 changes: 5 additions & 13 deletions src/burn_backend/routers/refine/flood_fill_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class FloodFillSegmentationPOSTBody(BaseModel):
affiliation: str


# TODO [#5]: Decide on / implement cloud tasks or other async batch
# TODO(!feat): Decide on / implement cloud tasks or other async batch
# This is a long running process, and users probably don't mind getting an email notification
# or something similar when the process is complete. Esp if the frontend remanins static.
@router.post(
Expand Down Expand Up @@ -66,7 +66,9 @@ async def refine_flood_fill_segmentation(
"""
sentry_sdk.set_context("fire-event", {"request": body})

# TODO: No idea where this is getting converted to a dict...
# TODO(!smelly): geojson interpreted inconsistently as string or dict by FastAPI in burn-backend?
# Likely caused by a frontend issue with the request body - need to investigate

# geojson_seed_points = json.loads(body.geojson)
user_edits_geojson = body.geojson

Expand All @@ -89,21 +91,13 @@ async def main(
logger,
cloud_static_io_client,
):
## NOTE: derive_boundary is accepted for now to maintain compatibility with the frontend,
## but will shortly be a different endpoint

logger.info(f"Received flood-fill-segmentation request for {fire_event_name}")

try:
# create a Sentinel2Client instance, without initializing, since we are
# getting what we need from the cogs already generated
geo_client = Sentinel2Client()

## TODO: Since we are running serverless, and don't have a live database, we are
## required to re-construct the metrics stack from the existing files, in the case
## where the user has identified fire boundaries from our intermediate rbr output.
## This is not ideal, but not sure there is a better solution without using something
## like redis or another live cache.
metric_layers = []
for metric_name in ["nbr_prefire", "nbr_postfire", "dnbr", "rdnbr", "rbr"]:
with tempfile.NamedTemporaryFile(suffix=".tif", delete=False) as tmp:
Expand Down Expand Up @@ -131,12 +125,10 @@ async def main(
inplace=True,
)

# save the derived boundary to the FTP server
# save the derived boundary to the S3 bucket
with tempfile.NamedTemporaryFile(suffix=".geojson", delete=False) as tmp:
tmp_geojson = tmp.name

## TODO: This is a little circuitous but, was getting errors trying to
## use .to_json() and saving the result
geo_client.geojson_boundary.to_file(tmp_geojson, driver="GeoJSON")
derived_boundary_json = json.load(open(tmp_geojson))

Expand Down
2 changes: 1 addition & 1 deletion src/common/lib/backend_dependencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def init_sentry(logger: Logger = Depends(get_cloud_logger)):
"""
logger.info("Initializing Sentry client")

## TODO [#28]: Move to sentry to environment variable if we keep sentry
## TODO(!magic): Move to sentry to environment variable if we keep sentry
sentry_sdk.init(
dsn="https://3660129e232b3c796208a5e46945d838@o4506701219364864.ingest.sentry.io/4506701221199872",
# Set traces_sample_rate to 1.0 to capture 100%
Expand Down
6 changes: 1 addition & 5 deletions src/common/util/cloud_static_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,9 +280,6 @@ def upload_cogs(self, metrics_stack, fire_event_name, affiliation, final=True):
ds.build_overviews([2, 4, 8, 16, 32], Resampling.nearest)
ds.update_tags(ns="rio_overview", resampling="nearest")

## TODO: Stupid solution to not having control over GDAL's cache
## We need to be able to invalidate it after we crop, but we can't do that
## and if we disable it entirely, performance is terrible.
if not final:
band_name = f"intermediate_{band_name}"

Expand Down Expand Up @@ -328,8 +325,7 @@ def upload_rap_estimates(self, rap_estimates, fire_event_name, affiliation):
"""
with tempfile.TemporaryDirectory() as tmpdir:
for band_name in rap_estimates.band.to_index():
# TODO [#23]: This is the same logic as in upload_cogs. Refactor to avoid duplication
# Save the band as a local COG
# TODO(!feat): Refactor upload_cogs and upload_rap_estimates to share code
local_cog_path = os.path.join(tmpdir, f"{band_name}.tif")
band_cog = rap_estimates.sel(band=band_name).rio
band_cog.to_raster(local_cog_path, driver="GTiff")
Expand Down
1 change: 0 additions & 1 deletion src/titiler/routers/pages/map.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ def serve_map(

s3_bucket_name = os.getenv("S3_BUCKET_NAME")

## TODO [#21]: Use Tofu Output to construct hardocded cog and geojson urls (in case we change s3 bucket name)
cog_url = f"https://{s3_bucket_name}.s3.us-east-2.amazonaws.com/public/{affiliation}/{fire_event_name}/{burn_metric}.tif"
burn_boundary_geojson_url = f"https://{s3_bucket_name}.s3.us-east-2.amazonaws.com/public/{affiliation}/{fire_event_name}/boundary.geojson"
ecoclass_geojson_url = f"https://{s3_bucket_name}.s3.us-east-2.amazonaws.com/public/{affiliation}/{fire_event_name}/ecoclass_dominant_cover.geojson"
Expand Down
3 changes: 2 additions & 1 deletion src/titiler/routers/pages/upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ def upload(
endpoint_titiler: str = Depends(get_endpoint_titiler),
endpoint_burn_backend: str = Depends(get_endpoint_burn_backend),
):
## TODO: These thresholds should be configurable, and probably should use the same
## TODO(!feat): Reuse UI elements from Map for Upload page
## these thresholds should be configurable, and probably should use the same
## frontend elements as the threhsold sliders within the map. Going to punt on that for now,
## since the map needs a refactor in the vein of the upload refactor.
cog_tileserver_url_prefix = (
Expand Down
Loading

0 comments on commit 40ec289

Please sign in to comment.