Skip to content

Commit

Permalink
Update theropoda.py
Browse files Browse the repository at this point in the history
  • Loading branch information
vieiramesquita authored Jun 30, 2024
1 parent 2eabe63 commit cad040d
Showing 1 changed file with 4 additions and 76 deletions.
80 changes: 4 additions & 76 deletions theropoda.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,6 @@
from joblib import Parallel, delayed
from loguru import logger
import sqlite3
from skmap.misc import date_range, ttprint
from trend_analysis import run as trend_run
from skmap import parallel
import argparse
import shutil

logger.add("log_do_.log", rotation="500 MB")

Expand Down Expand Up @@ -181,11 +176,11 @@ def reduceData(img):
.set('satelite',img.get('satelite')) #Sapacraft plataform name (i.e. Sentinel 2A or 2B)
.set('MGRS_TILE',img.get('MGRS_TILE')) #Reference tile grid
.set('AREA_HA',ee.Feature(geometry).area(1).divide(10000)) #Choosed polygon ID Field
#.set('NDVI_mean',ee.Number(ee.Dictionary(series).get('NDVI_mean'))) #NDVI pixel average for the polygon
.set('NDVI_mean',ee.Number(ee.Dictionary(series).get('NDVI_mean'))) #NDVI pixel average for the polygon
.set('NDVI_median',ee.Number(ee.Dictionary(series).get('NDVI_median'))) #NDVI pixel median for the polygon
#.set('NDVI_min',ee.Number(ee.Dictionary(series).get('NDVI_min'))) #NDVI pixel minimum value for the polygon
#.set('NDVI_max',ee.Number(ee.Dictionary(series).get('NDVI_max'))) #NDVI pixel maximum value for the polygon
#.set('NDVI_stdDev',ee.Number(ee.Dictionary(series).get('NDVI_stdDev'))) #NDVI pixel Standard Deviation for the polygon
.set('NDVI_min',ee.Number(ee.Dictionary(series).get('NDVI_min'))) #NDVI pixel minimum value for the polygon
.set('NDVI_max',ee.Number(ee.Dictionary(series).get('NDVI_max'))) #NDVI pixel maximum value for the polygon
.set('NDVI_stdDev',ee.Number(ee.Dictionary(series).get('NDVI_stdDev'))) #NDVI pixel Standard Deviation for the polygon
.set('Pixel_Count',ee.Number(ee.Dictionary(series).get('NDVI_count'))) #Number of pixels cloudless and shadowless used for estimatives
.set('Total_Pixels',ee.Number(ee.Dictionary(series).get('full_count'))) #Total number of pixels inside the polygon
.set('Pixel_Size',pixel_size) #Size of the pixel used
Expand Down Expand Up @@ -496,70 +491,3 @@ def run(asset,id_field,output_name,colab_folder):

logger.success(f'The average processing time was {round(pd.DataFrame(time_list).mean()[0],2)} seconds')
logger.success(f'Processing finished. All the work took {round(time.time() - start_time,3)} seconds to complete')

#from google.colab import drive
#drive.mount('/content/drive/')

if __name__ == '__main__':

parser = argparse.ArgumentParser(description='Toolkit created to extract Time Series information from Sentinel 2 stored in Earth Engine, perform gap filling and trend analysis image.')

parser.add_argument('--asset', type=str, required=True, help='The asset name or path')
parser.add_argument('--id_field', type=str, required=True, help='The ID field name')
parser.add_argument('--output_name', type=str, required=True, help='The output file name')

args = parser.parse_args()

asset = args.asset #'users/vieiramesquita/LAPIG_FieldSamples/lapig_goias_fieldwork_2022_50m' #Earth Engine Vector Asset
id_field = args.id_field #'ID_POINTS' #Vector collumn used as ID (use unique identifiers!)

db = asset.split('/')[-1]

db_name = db + '.db'

colab_folder = ''
output_name = args.output_name #db_name

conn = sqlite3.connect(db_name)
conn.close()

#Check if polygon list file exists
if os.path.exists(os.path.join(colab_folder,db + '_polygonList.txt')) is False:
build_id_list(asset,id_field,colab_folder)

run(asset,id_field,output_name,colab_folder)

input_file = output_name
start_date_trend, end_date_trend= '2019-01-01', '2024-01-01'
output_file_trends = f'{output_name[:-3]}_trend_analysis.pq'

################################
## SQLITE access
################################
ttprint(f"Preparing {output_name}")
con = sqlite3.connect(output_name)
cur = con.cursor()
res = cur.execute(f"CREATE INDEX IF NOT EXISTS restoration_id_pol ON restoration ({id_field})")
con.commit()

################################
## Common data structures
################################
ttprint(f"Preparing polygon ids")

idx_sql = f"SELECT {id_field}, MIN(date) min_date, MAX(date) max_date, COUNT(*) count FROM restoration GROUP BY 1 ORDER BY 1"
idx = pd.read_sql_query(idx_sql, con=con)

dt_5days = list(date_range(start_date_trend, end_date_trend, date_unit='days', date_step=5, ignore_29feb=True))
season_size = int(len(dt_5days) / 5)

args = [ (output_name, r[f'{id_field}'], dt_5days, season_size, id_field, output_file_trends) for _, r in idx.iterrows() ]

ttprint(f"Starting trend analysis on {len(args)} polygons")
for id_pol in parallel.job(trend_run, args, joblib_args={'backend': 'multiprocessing'}):
continue

df2conv = pd.read_parquet(output_file_trends)
df2conv.to_parquet(f'{output_name[:-3]}_trend_analysis.parquet')
df2conv = None
shutil.rmtree(df2conv)

0 comments on commit cad040d

Please sign in to comment.