From aca1e0cb85ac4a69ee58e69585174959e63e2a30 Mon Sep 17 00:00:00 2001 From: sambles Date: Mon, 30 Jan 2023 13:26:30 +0000 Subject: [PATCH 01/33] Added OED validation on file upload, and updated ods-tools package to 3.0.1 (#724) * test file return format conv This reverts commit 8c0de7ef51942caaa80b52e186fa5b10db2cc5a7. * Draft option to validate oed files on upload * validate on upload - wip * Note for later * Draft serializer to return portolio validation status * WIP GET validated files * POST portfolio validate * Add validation url param * read validation option, url/settings.py * Set ods-tools 3.0.1 * fix unittests * Fix validation on parquet file uploads * Fix handling of invalid data upload * Fix typos and missing docs strings * remove dup func * Turn valadation on by default * Add validation unit testing * Add test_all_exposure__are_valid * set set_portolio_valid as portfolio method instead of function * Move VALIDATION_CONFIG to settings.py * Update Swagger to show validate on upload default=true --- requirements-server.txt | 2 +- requirements-worker.txt | 2 +- requirements.txt | 2 +- .../0005_relatedfile_oed_validated.py | 18 + src/server/oasisapi/files/models.py | 16 + src/server/oasisapi/files/serializers.py | 48 +- src/server/oasisapi/files/views.py | 37 +- src/server/oasisapi/portfolios/models.py | 39 +- src/server/oasisapi/portfolios/serializers.py | 38 ++ .../portfolios/tests/test_portfolio.py | 426 ++++++++++++++++++ src/server/oasisapi/portfolios/viewsets.py | 119 +++-- src/server/oasisapi/schemas/custom_swagger.py | 26 +- src/server/oasisapi/settings.py | 11 + 13 files changed, 708 insertions(+), 76 deletions(-) create mode 100644 src/server/oasisapi/files/migrations/0005_relatedfile_oed_validated.py diff --git a/requirements-server.txt b/requirements-server.txt index 1f0d42ff7..537b47060 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -165,7 +165,7 @@ numpy==1.23.4 # pyarrow oauthlib==3.2.2 # via requests-oauthlib -ods-tools==2.3.2 +ods-tools==3.0.1 # via -r requirements-server.in packaging==21.3 # via drf-yasg diff --git a/requirements-worker.txt b/requirements-worker.txt index f48357b48..7b55ed3cb 100644 --- a/requirements-worker.txt +++ b/requirements-worker.txt @@ -146,7 +146,7 @@ oasislmf[extra]==1.27.0 # via -r requirements-worker.in oauthlib==3.2.2 # via requests-oauthlib -ods-tools==3.0.0 +ods-tools==3.0.1 # via oasislmf packaging==21.3 # via diff --git a/requirements.txt b/requirements.txt index ac8a2195f..e81bba17f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -282,7 +282,7 @@ oasislmf[extra]==1.27.0 # via -r ./requirements-worker.in oauthlib==3.2.2 # via requests-oauthlib -ods-tools==3.0.0 +ods-tools==3.0.1 # via # -r ./requirements-server.in # oasislmf diff --git a/src/server/oasisapi/files/migrations/0005_relatedfile_oed_validated.py b/src/server/oasisapi/files/migrations/0005_relatedfile_oed_validated.py new file mode 100644 index 000000000..6e89014c9 --- /dev/null +++ b/src/server/oasisapi/files/migrations/0005_relatedfile_oed_validated.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-01-16 11:16 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('files', '0004_remove_relatedfile_aws_location'), + ] + + operations = [ + migrations.AddField( + model_name='relatedfile', + name='oed_validated', + field=models.BooleanField(default=False, editable=False), + ), + ] diff --git a/src/server/oasisapi/files/models.py b/src/server/oasisapi/files/models.py index 86150ebe7..9a2f69acc 100644 --- a/src/server/oasisapi/files/models.py +++ b/src/server/oasisapi/files/models.py @@ -1,5 +1,8 @@ import os +import io + from uuid import uuid4 +import pandas as pd from django.conf import settings from django.db import models @@ -7,6 +10,18 @@ from model_utils.models import TimeStampedModel + +def related_file_to_df(RelatedFile): + if not RelatedFile: + return None + + RelatedFile.file.seek(0) + if RelatedFile.content_type == 'application/octet-stream': + return pd.read_parquet(io.BytesIO(RelatedFile.read())) + else: + return pd.read_csv(io.BytesIO(RelatedFile.read())) + + def random_file_name(instance, filename): if instance.store_as_filename: return filename @@ -61,6 +76,7 @@ class RelatedFile(TimeStampedModel): # filehash_md5 = models.CharField(max_length=255, editable=False, default="", blank=True) content_type = models.CharField(max_length=255) store_as_filename = models.BooleanField(default=False, blank=True, null=True) + oed_validated = models.BooleanField(default=False, editable=False) def __str__(self): return 'File_{}'.format(self.file) diff --git a/src/server/oasisapi/files/serializers.py b/src/server/oasisapi/files/serializers.py index f71f94ad8..045fdba8a 100644 --- a/src/server/oasisapi/files/serializers.py +++ b/src/server/oasisapi/files/serializers.py @@ -1,13 +1,15 @@ import logging import hashlib import io -import ods_tools +import pandas as pd +from ods_tools.oed.exposure import OedExposure from rest_framework import serializers from rest_framework.exceptions import ValidationError from django.core.files.uploadedfile import UploadedFile +from django.conf import settings as django_settings -from .models import RelatedFile +from .models import RelatedFile, related_file_to_df logger = logging.getLogger('root') @@ -17,6 +19,13 @@ 'application/vnd.ms-excel': 'text/csv' } +EXPOSURE_ARGS = { + 'accounts_file': 'account', + 'location_file': 'location', + 'reinsurance_info_file': 'ri_info', + 'reinsurance_scope_file': 'ri_scope' +} + def md5_filehash(in_memory_file, chunk_size=4096): hasher_md5 = hashlib.md5() @@ -36,21 +45,39 @@ class Meta: # 'filehash_md5', ) - def __init__(self, *args, content_types=None, parquet_storage=False, **kwargs): + def __init__(self, *args, content_types=None, parquet_storage=False, field=None, oed_validate=None, **kwargs): self.content_types = content_types or [] self.parquet_storage = parquet_storage + self.oed_field = field + self.oed_validate = oed_validate if oed_validate is not None else django_settings.PORTFOLIO_UPLOAD_VALIDATION super(RelatedFileSerializer, self).__init__(*args, **kwargs) def validate(self, attrs): - # Covert to parquet if option is on and file is CSV - if self.parquet_storage and attrs['file'].content_type == 'text/csv': + run_validation = self.oed_validate and self.oed_field in EXPOSURE_ARGS + convert_to_parquet = self.parquet_storage and attrs['file'].content_type == 'text/csv' + + # Create dataframe from file upload + if run_validation or convert_to_parquet: try: - attrs['file'].seek(0) - temp_df = ods_tools.read_csv(io.BytesIO(attrs['file'].read())) + uploaded_data_df = related_file_to_df(attrs['file']) + except Exception as e: + raise ValidationError('Failed to read uploaded data [{}]'.format(e)) - # Create new UploadedFile object + # Run OED Validation + if run_validation: + uploaded_exposure = OedExposure(**{ + EXPOSURE_ARGS[self.oed_field]: uploaded_data_df, + 'validation_config': django_settings.PORTFOLIO_VALIDATION_CONFIG + }) + oed_validation_errors = uploaded_exposure.check() + if len(oed_validation_errors) > 0: + raise ValidationError(detail=[(error['name'], error['msg']) for error in oed_validation_errors]) + + # Convert 'CSV' upload to 'parquet' + if convert_to_parquet: + try: f = io.open(attrs['file'].name + '.parquet', 'wb+') - temp_df.to_parquet(f) + uploaded_data_df.to_parquet(f) in_memory_file = UploadedFile( file=f, name=f.name, @@ -65,9 +92,10 @@ def validate(self, attrs): attrs['creator'] = self.context['request'].user attrs['content_type'] = attrs['file'].content_type attrs['filename'] = attrs['file'].name - # attrs['filehash_md5'] = md5_filehash(self.context['request'].FILES['file']) + attrs['oed_validated'] = self.oed_validate return super(RelatedFileSerializer, self).validate(attrs) + def validate_file(self, value): mapped_content_type = CONTENT_TYPE_MAPPING.get(value.content_type, value.content_type) if self.content_types and mapped_content_type not in self.content_types: diff --git a/src/server/oasisapi/files/views.py b/src/server/oasisapi/files/views.py index 602ddc6fe..dc2c078cd 100644 --- a/src/server/oasisapi/files/views.py +++ b/src/server/oasisapi/files/views.py @@ -1,14 +1,23 @@ import json import io -import ods_tools +import pandas as pd +from ods_tools.oed.exposure import OedExposure from django.core.files.uploadedfile import UploadedFile from django.http import StreamingHttpResponse, Http404, QueryDict +from django.conf import settings as django_settings from rest_framework.response import Response from .serializers import RelatedFileSerializer +EXPOSURE_ARGS = { + 'accounts_file': 'account', + 'location_file': 'location', + 'reinsurance_info_file': 'ri_info', + 'reinsurance_scope_file': 'ri_scope' +} + def _delete_related_file(parent, field): """ Delete an attached RelatedFile model @@ -39,9 +48,16 @@ def _handle_get_related_file(parent, field, file_format): # Parquet format requested and data stored as csv if file_format == 'parquet' and f.content_type == 'text/csv': output_buffer = io.BytesIO() - df = ods_tools.read_csv(io.BytesIO(f.file.read())) + + # Load DataFrame and pass to ods-tools exposure class + exposure = OedExposure(**{ + EXPOSURE_ARGS[field]: pd.read_csv(io.BytesIO(f.file.read())) + }) + + df = getattr(exposure, EXPOSURE_ARGS[field]).dataframe df.to_parquet(output_buffer, index=False) output_buffer.seek(0) + response = StreamingHttpResponse(output_buffer, content_type='application/octet-stream') response['Content-Disposition'] = 'attachment; filename="{}{}"'.format(download_name, '.parquet') return response @@ -49,9 +65,16 @@ def _handle_get_related_file(parent, field, file_format): # CSV format requested and data stored as Parquet if file_format == 'csv' and f.content_type == 'application/octet-stream': output_buffer = io.BytesIO() - df = ods_tools.read_parquet(io.BytesIO(f.file.read())) + + exposure = OedExposure(**{ + EXPOSURE_ARGS[field]: pd.read_parquet(io.BytesIO(f.file.read())), + 'check_oed': False, + }) + + df = getattr(exposure, EXPOSURE_ARGS[field]).dataframe df.to_csv(output_buffer, index=False) output_buffer.seek(0) + response = StreamingHttpResponse(output_buffer, content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="{}{}"'.format(download_name, '.csv') return response @@ -62,8 +85,8 @@ def _handle_get_related_file(parent, field, file_format): return response -def _handle_post_related_file(parent, field, request, content_types, parquet_storage): - serializer = RelatedFileSerializer(data=request.data, content_types=content_types, context={'request': request}, parquet_storage=parquet_storage) +def _handle_post_related_file(parent, field, request, content_types, parquet_storage, oed_validate): + serializer = RelatedFileSerializer(data=request.data, content_types=content_types, context={'request': request}, parquet_storage=parquet_storage, field=field, oed_validate=oed_validate) serializer.is_valid(raise_exception=True) instance = serializer.create(serializer.validated_data) @@ -134,14 +157,14 @@ def _json_read_from_file(parent, field): else: return Response(json.load(f)) -def handle_related_file(parent, field, request, content_types, parquet_storage=False): +def handle_related_file(parent, field, request, content_types, parquet_storage=False, oed_validate=None): method = request.method.lower() if method == 'get': requested_format = request.GET.get('file_format', None) return _handle_get_related_file(parent, field, file_format=requested_format) elif method == 'post': - return _handle_post_related_file(parent, field, request, content_types, parquet_storage) + return _handle_post_related_file(parent, field, request, content_types, parquet_storage, oed_validate) elif method == 'delete': return _handle_delete_related_file(parent, field) diff --git a/src/server/oasisapi/portfolios/models.py b/src/server/oasisapi/portfolios/models.py index 0992c875e..78947f38c 100644 --- a/src/server/oasisapi/portfolios/models.py +++ b/src/server/oasisapi/portfolios/models.py @@ -8,8 +8,11 @@ from django.dispatch import receiver from model_utils.models import TimeStampedModel from rest_framework.reverse import reverse +from rest_framework.exceptions import ValidationError -from ..files.models import RelatedFile +from ..files.models import RelatedFile, related_file_to_df + +from ods_tools.oed.exposure import OedExposure class Portfolio(TimeStampedModel): @@ -42,9 +45,37 @@ def get_absolute_reinsurance_info_file_url(self, request=None): def get_absolute_reinsurance_scope_file_url(self, request=None): return reverse('portfolio-reinsurance-scope-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request) - def get_absolute_storage_url(self, request=None): + def get_absolute_storage_url(self, request=None): return reverse('portfolio-storage-links', kwargs={'version': 'v1', 'pk': self.pk}, request=request) + def set_portolio_valid(self): + oed_files = [ + 'accounts_file', + 'location_file', + 'reinsurance_info_file', + 'reinsurance_scope_file', + ] + for ref in oed_files: + file_ref = getattr(self, ref) + if file_ref: + file_ref.oed_validated = True + file_ref.save() + + def run_oed_validation(self): + portfolio_exposure = OedExposure( + location=related_file_to_df(self.location_file), + account=related_file_to_df(self.accounts_file), + ri_info=related_file_to_df(self.reinsurance_info_file), + ri_scope=related_file_to_df(self.reinsurance_scope_file), + validation_config=settings.PORTFOLIO_VALIDATION_CONFIG) + validation_errors = portfolio_exposure.check() + + # Set validation fields to true or raise exception + if validation_errors: + raise ValidationError(detail=[(error['name'], error['msg']) for error in validation_errors]) + else: + self.set_portolio_valid() + class PortfolioStatus(TimeStampedModel): @@ -55,12 +86,12 @@ def __str__(self): def delete_connected_files(sender, instance, **kwargs): """ Post delete handler to clear out any dangaling analyses files """ - files_for_removal = [ + files_for_removal = [ 'accounts_file', 'location_file', 'reinsurance_info_file', 'reinsurance_scope_file', - ] + ] for ref in files_for_removal: file_ref = getattr(instance, ref) if file_ref: diff --git a/src/server/oasisapi/portfolios/serializers.py b/src/server/oasisapi/portfolios/serializers.py index 58aa4ae4b..4392ad271 100644 --- a/src/server/oasisapi/portfolios/serializers.py +++ b/src/server/oasisapi/portfolios/serializers.py @@ -370,3 +370,41 @@ def create(self, validated_data): if 'request' in self.context: data['creator'] = self.context.get('request').user return super(CreateAnalysisSerializer, self).create(data) + + + +class PortfolioValidationSerializer(serializers.ModelSerializer): + accounts_validated = serializers.SerializerMethodField() + location_validated = serializers.SerializerMethodField() + reinsurance_info_validated = serializers.SerializerMethodField() + reinsurance_scope_validated = serializers.SerializerMethodField() + + + class Meta: + model = Portfolio + fields = ( + 'location_validated', + 'accounts_validated', + 'reinsurance_info_validated', + 'reinsurance_scope_validated', + ) + + @swagger_serializer_method(serializer_or_field=serializers.CharField) # should it be BooleanField ? + def get_location_validated(self, instance): + if instance.location_file: + return instance.location_file.oed_validated + + @swagger_serializer_method(serializer_or_field=serializers.CharField) + def get_accounts_validated(self, instance): + if instance.accounts_file: + return instance.accounts_file.oed_validated + + @swagger_serializer_method(serializer_or_field=serializers.CharField) + def get_reinsurance_info_validated(self, instance): + if instance.reinsurance_info_file: + return instance.reinsurance_info_file.oed_validated + + @swagger_serializer_method(serializer_or_field=serializers.CharField) + def get_reinsurance_scope_validated(self, instance): + if instance.reinsurance_scope_file: + return instance.reinsurance_scope_file.oed_validated diff --git a/src/server/oasisapi/portfolios/tests/test_portfolio.py b/src/server/oasisapi/portfolios/tests/test_portfolio.py index dc11d7bee..e7a3de411 100644 --- a/src/server/oasisapi/portfolios/tests/test_portfolio.py +++ b/src/server/oasisapi/portfolios/tests/test_portfolio.py @@ -872,3 +872,429 @@ def test_reinsurance_info_file_is_uploaded_as_parquet___file_can_be_retrieved(se prq_return_data = pd.read_parquet(io.BytesIO(parquet_response.content)) pd.testing.assert_frame_equal(prq_return_data, test_data) + + + + + +LOCATION_DATA_VALID = """PortNumber,AccNumber,LocNumber,IsTenant,BuildingID,CountryCode,Latitude,Longitude,StreetAddress,PostalCode,OccupancyCode,ConstructionCode,LocPerilsCovered,BuildingTIV,OtherTIV,ContentsTIV,BITIV,LocCurrency,OEDVersion +1,A11111,10002082046,1,1,GB,52.76698052,-0.895469856,1 ABINGDON ROAD,LE13 0HL,1050,5000,WW1,220000,0,0,0,GBP,2.0.0 +1,A11111,10002082047,1,1,GB,52.76697956,-0.89536613,2 ABINGDON ROAD,LE13 0HL,1050,5000,WW1,790000,0,0,0,GBP,2.0.0 +1,A11111,10002082048,1,1,GB,52.76697845,-0.895247587,3 ABINGDON ROAD,LE13 0HL,1050,5000,WW1,160000,0,0,0,GBP,2.0.0 +1,A11111,10002082049,1,1,GB,52.76696096,-0.895473908,4 ABINGDON ROAD,LE13 0HL,1050,5000,WW1,30000,0,0,0,GBP,2.0.0 +""" + +ACCOUNT_DATA_VALID = """PortNumber,AccNumber,AccCurrency,PolNumber,PolPerilsCovered,PolInceptionDate,PolExpiryDate,LayerNumber,LayerParticipation,LayerLimit,LayerAttachment,OEDVersion +1,A11111,GBP,Layer1,WW1,2018-01-01,2018-12-31,1,0.3,5000000,500000,2.0.0 +1,A11111,GBP,Layer2,WW1,2018-01-01,2018-12-31,2,0.3,100000000,5500000,2.0.0 +""" + +INFO_DATA_VALID = """ReinsNumber,ReinsLayerNumber,ReinsName,ReinsPeril,ReinsInceptionDate,ReinsExpiryDate,CededPercent,RiskLimit,RiskAttachment,OccLimit,OccAttachment,PlacedPercent,ReinsCurrency,InuringPriority,ReinsType,RiskLevel,UseReinsDates,OEDVersion +1,1,ABC QS,WW1,2018-01-01,2018-12-31,1,0,0,0,0,1,GBP,1,SS,LOC,N,2.0.0 +""" + +SCOPE_DATA_VALID = """ReinsNumber,PortNumber,AccNumber,PolNumber,LocGroup,LocNumber,CedantName,ProducerName,LOB,CountryCode,ReinsTag,CededPercent,OEDVersion +1,1,A11111,,,10002082047,,,,,,0.1,2.0.0 +1,1,A11111,,,10002082048,,,,,,0.2,2.0.0 +""" + +LOCATION_DATA_INVALID = """Port,AccNumber,LocNumb,IsTenant,BuildingID,CountryCode,Latitude,Longitude,Street,PostalCode,OccupancyCode,ConstructionCode,LocPerilsCovered,BuildingTIV,OtherTIV,ContentsTIV,BITIV,LocCurrency,OEDVersion +1,A11111,10002082046,1,1,GB,52.76698052,-0.895469856,1 ABINGDON ROAD,LE13 0HL,1050,5000,WW1,220000,0,0,0,GBP,2.0.0 +1,A11111,10002082047,1,1,GB,52.76697956,-0.89536613,2 ABINGDON ROAD,LE13 0HL,1050,5000,XXYA,790000,0,0,0,GBP,2.0.0 +1,A11111,10002082048,1,1,GB,52.76697845,,3 ABINGDON ROAD,LE13 0HL,1050,5000,WW1,160000,0,0,0,GBP,2.0.0 +1,A11111,10002082049,1,1,GB,52.76696096,-0.895473908,4 ABINGDON ROAD,LE13 0HL,1050,-1,WW1,30000,0,0,0,GBP,2.0.0 +""" + +class PortfolioValidation(WebTestMixin, TestCase): + + def test_all_exposure__are_valid(self): + content_type='text/csv' + loc_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) + acc_data = pd.read_csv(io.StringIO(ACCOUNT_DATA_VALID)) + inf_data = pd.read_csv(io.StringIO(INFO_DATA_VALID)) + scp_data = pd.read_csv(io.StringIO(SCOPE_DATA_VALID)) + + loc_file_content= loc_data.to_csv(index=False).encode('utf-8') + acc_file_content= acc_data.to_csv(index=False).encode('utf-8') + inf_file_content= inf_data.to_csv(index=False).encode('utf-8') + scp_file_content= scp_data.to_csv(index=False).encode('utf-8') + + with TemporaryDirectory() as d: + with override_settings(MEDIA_ROOT=d): + user = fake_user() + portfolio = fake_portfolio() + + self.app.post( + portfolio.get_absolute_location_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), loc_file_content), + ), + ) + self.app.post( + portfolio.get_absolute_accounts_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), acc_file_content), + ), + ) + self.app.post( + portfolio.get_absolute_reinsurance_info_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), inf_file_content), + ), + ) + self.app.post( + portfolio.get_absolute_reinsurance_scope_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), scp_file_content), + ), + ) + + validate_response = self.app.get( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + ) + + # Get current validate status - Not yet run + self.assertEqual(200, validate_response.status_code) + self.assertEqual(validate_response.json, { + 'location_validated': False, + 'accounts_validated': False, + 'reinsurance_info_validated': False, + 'reinsurance_scope_validated': False}) + + + # Run validate - check is valid + validate_response = self.app.post( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + ) + self.assertEqual(200, validate_response.status_code) + self.assertEqual(validate_response.json, { + 'location_validated': True, + 'accounts_validated': True, + 'reinsurance_info_validated': True, + 'reinsurance_scope_validated': True}) + + + def test_location_file__is_valid(self): + content_type='text/csv' + test_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) + file_content= test_data.to_csv(index=False).encode('utf-8') + + with TemporaryDirectory() as d: + with override_settings(MEDIA_ROOT=d): + user = fake_user() + portfolio = fake_portfolio() + + self.app.post( + portfolio.get_absolute_location_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), file_content), + ), + ) + + validate_response = self.app.get( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + ) + + # Get current validate status - Not yet run + self.assertEqual(200, validate_response.status_code) + self.assertEqual(validate_response.json, { + 'location_validated': False, + 'accounts_validated': None, + 'reinsurance_info_validated': None, + 'reinsurance_scope_validated': None}) + + + # Run validate - check is valid + validate_response = self.app.post( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + ) + self.assertEqual(200, validate_response.status_code) + self.assertEqual(validate_response.json, { + 'location_validated': True, + 'accounts_validated': None, + 'reinsurance_info_validated': None, + 'reinsurance_scope_validated': None}) + + + def test_location_file__is_invalid__response_is_400(self): + content_type='text/csv' + test_data = pd.read_csv(io.StringIO(LOCATION_DATA_INVALID)) + file_content= test_data.to_csv(index=False).encode('utf-8') + + with TemporaryDirectory() as d: + with override_settings(MEDIA_ROOT=d): + user = fake_user() + portfolio = fake_portfolio() + + self.app.post( + portfolio.get_absolute_location_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), file_content), + ), + ) + validate_response = self.app.get( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + ) + + # Get current validate status - Not yet run + self.assertEqual(200, validate_response.status_code) + self.assertEqual(validate_response.json, { + 'location_validated': False, + 'accounts_validated': None, + 'reinsurance_info_validated': None, + 'reinsurance_scope_validated': None}) + + # Run validate - check is invalid + validate_response = self.app.post( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + expect_errors=True, + ) + self.assertEqual(400, validate_response.status_code) + self.assertEqual(validate_response.json, [ + ['location', 'missing required column PortNumber'], + ['location', 'missing required column LocNumber'], + ['location', "column 'Port' is not a valid oed field"], + ['location', "column 'LocNumb' is not a valid oed field"], + ['location', "column 'Street' is not a valid oed field"], + ['location', 'LocPerilsCovered has invalid perils.\n AccNumber LocPerilsCovered\n1 A11111 XXYA'] + ]) + + def test_account_file__is_invalid__response_is_400(self): + content_type='text/csv' + test_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) + file_content= test_data.to_csv(index=False).encode('utf-8') + + with TemporaryDirectory() as d: + with override_settings(MEDIA_ROOT=d): + user = fake_user() + portfolio = fake_portfolio() + + self.app.post( + portfolio.get_absolute_accounts_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), file_content), + ), + ) + + validate_response = self.app.get( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + ) + + # Get current validate status - Not yet run + self.assertEqual(200, validate_response.status_code) + self.assertEqual(validate_response.json, { + 'location_validated': None, + 'accounts_validated': False, + 'reinsurance_info_validated': None, + 'reinsurance_scope_validated': None}) + + + # Run validate - check is valid + validate_response = self.app.post( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + expect_errors=True, + ) + self.assertEqual(400, validate_response.status_code) + self.assertEqual(validate_response.json, [ + ['account', 'missing required column AccCurrency'], + ['account', 'missing required column PolNumber'], + ['account', 'missing required column PolPerilsCovered'], + ['account', "column 'LocNumber' is not a valid oed field"], + ['account', "column 'IsTenant' is not a valid oed field"], + ['account', "column 'BuildingID' is not a valid oed field"], + ['account', "column 'CountryCode' is not a valid oed field"], + ['account', "column 'Latitude' is not a valid oed field"], + ['account', "column 'Longitude' is not a valid oed field"], + ['account', "column 'StreetAddress' is not a valid oed field"], + ['account', "column 'PostalCode' is not a valid oed field"], + ['account', "column 'OccupancyCode' is not a valid oed field"], + ['account', "column 'ConstructionCode' is not a valid oed field"], + ['account', "column 'LocPerilsCovered' is not a valid oed field"], + ['account', "column 'BuildingTIV' is not a valid oed field"], + ['account', "column 'OtherTIV' is not a valid oed field"], + ['account', "column 'ContentsTIV' is not a valid oed field"], + ['account', "column 'BITIV' is not a valid oed field"], + ['account', "column 'LocCurrency' is not a valid oed field"] + ]) + + + def test_reinsurance_info_file__is_invalid__response_is_400(self): + content_type='text/csv' + test_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) + file_content= test_data.to_csv(index=False).encode('utf-8') + + with TemporaryDirectory() as d: + with override_settings(MEDIA_ROOT=d): + user = fake_user() + portfolio = fake_portfolio() + + self.app.post( + portfolio.get_absolute_reinsurance_info_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), file_content), + ), + ) + + validate_response = self.app.get( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + ) + + # Get current validate status - Not yet run + self.assertEqual(200, validate_response.status_code) + self.assertEqual(validate_response.json, { + 'location_validated': None, + 'accounts_validated': None, + 'reinsurance_info_validated': False, + 'reinsurance_scope_validated': None}) + + + # Run validate - check is valid + validate_response = self.app.post( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + expect_errors=True, + ) + self.assertEqual(400, validate_response.status_code) + self.assertEqual(validate_response.json, [ + ['ri_info', 'missing required column ReinsNumber'], + ['ri_info', 'missing required column ReinsPeril'], + ['ri_info', 'missing required column PlacedPercent'], + ['ri_info', 'missing required column ReinsCurrency'], + ['ri_info', 'missing required column InuringPriority'], + ['ri_info', 'missing required column ReinsType'], + ['ri_info', 'missing required column RiskLevel'], + ['ri_info', "column 'PortNumber' is not a valid oed field"], + ['ri_info', "column 'AccNumber' is not a valid oed field"], + ['ri_info', "column 'LocNumber' is not a valid oed field"], + ['ri_info', "column 'IsTenant' is not a valid oed field"], + ['ri_info', "column 'BuildingID' is not a valid oed field"], + ['ri_info', "column 'CountryCode' is not a valid oed field"], + ['ri_info', "column 'Latitude' is not a valid oed field"], + ['ri_info', "column 'Longitude' is not a valid oed field"], + ['ri_info', "column 'StreetAddress' is not a valid oed field"], + ['ri_info', "column 'PostalCode' is not a valid oed field"], + ['ri_info', "column 'OccupancyCode' is not a valid oed field"], + ['ri_info', "column 'ConstructionCode' is not a valid oed field"], + ['ri_info', "column 'LocPerilsCovered' is not a valid oed field"], + ['ri_info', "column 'BuildingTIV' is not a valid oed field"], + ['ri_info', "column 'OtherTIV' is not a valid oed field"], + ['ri_info', "column 'ContentsTIV' is not a valid oed field"], + ['ri_info', "column 'BITIV' is not a valid oed field"], + ['ri_info', "column 'LocCurrency' is not a valid oed field"] + ]) + + def test_reinsurance_scope_file__is_invalid__response_is_400(self): + content_type='text/csv' + test_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) + file_content= test_data.to_csv(index=False).encode('utf-8') + + with TemporaryDirectory() as d: + with override_settings(MEDIA_ROOT=d): + user = fake_user() + portfolio = fake_portfolio() + + self.app.post( + portfolio.get_absolute_reinsurance_scope_file_url(), + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + upload_files=( + ('file', 'file{}'.format(mimetypes.guess_extension(content_type)), file_content), + ), + ) + + validate_response = self.app.get( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + ) + + # Get current validate status - Not yet run + self.assertEqual(200, validate_response.status_code) + self.assertEqual(validate_response.json, { + 'location_validated': None, + 'accounts_validated': None, + 'reinsurance_info_validated': None, + 'reinsurance_scope_validated': False}) + + + # Run validate - check is valid + validate_response = self.app.post( + portfolio.get_absolute_url() + 'validate/', + headers={ + 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) + }, + expect_errors=True, + ) + self.assertEqual(400, validate_response.status_code) + self.assertEqual(validate_response.json,[ + ['ri_scope', 'missing required column ReinsNumber'], + ['ri_scope', "column 'IsTenant' is not a valid oed field"], + ['ri_scope', "column 'BuildingID' is not a valid oed field"], + ['ri_scope', "column 'Latitude' is not a valid oed field"], + ['ri_scope', "column 'Longitude' is not a valid oed field"], + ['ri_scope', "column 'StreetAddress' is not a valid oed field"], + ['ri_scope', "column 'PostalCode' is not a valid oed field"], + ['ri_scope', "column 'OccupancyCode' is not a valid oed field"], + ['ri_scope', "column 'ConstructionCode' is not a valid oed field"], + ['ri_scope', "column 'LocPerilsCovered' is not a valid oed field"], + ['ri_scope', "column 'BuildingTIV' is not a valid oed field"], + ['ri_scope', "column 'OtherTIV' is not a valid oed field"], + ['ri_scope', "column 'ContentsTIV' is not a valid oed field"], + ['ri_scope', "column 'BITIV' is not a valid oed field"], + ['ri_scope', "column 'LocCurrency' is not a valid oed field"] + ]) diff --git a/src/server/oasisapi/portfolios/viewsets.py b/src/server/oasisapi/portfolios/viewsets.py index 436e8fdc6..f37e53b61 100644 --- a/src/server/oasisapi/portfolios/viewsets.py +++ b/src/server/oasisapi/portfolios/viewsets.py @@ -17,13 +17,14 @@ from ..files.views import handle_related_file from ..files.serializers import RelatedFileSerializer from .models import Portfolio -from ..schemas.custom_swagger import FILE_RESPONSE, FILE_FORMAT_PARAM +from ..schemas.custom_swagger import FILE_RESPONSE, FILE_FORMAT_PARAM, FILE_VALIDATION_PARAM from ..schemas.serializers import StorageLinkSerializer from .serializers import ( PortfolioSerializer, CreateAnalysisSerializer, PortfolioStorageSerializer, - PortfolioListSerializer + PortfolioListSerializer, + PortfolioValidationSerializer ) @@ -105,9 +106,10 @@ def get_serializer_class(self): return PortfolioListSerializer elif self.action in ['set_storage_links', 'storage_links']: return PortfolioStorageSerializer + elif self.action in ['validate']: + return PortfolioValidationSerializer elif self.action in [ 'accounts_file', 'location_file', 'reinsurance_info_file', 'reinsurance_scope_file', - 'set_accounts_file', 'set_location_file', 'set_reinsurance_info_file', 'set_reinsurance_scope_file', ]: return RelatedFileSerializer else: @@ -116,7 +118,10 @@ def get_serializer_class(self): @property def parser_classes(self): - if getattr(self, 'action', None) in ['set_accounts_file', 'set_location_file', 'set_reinsurance_info_file', 'set_reinsurance_scope_file']: + method = self.request.method.lower() + upload_views = ['accounts_file', 'location_file', 'reinsurance_info_file', 'reinsurance_scope_file'] + + if method == 'post' and getattr(self, 'action', None) in upload_views: return [MultiPartParser] else: return api_settings.DEFAULT_PARSER_CLASSES @@ -156,85 +161,113 @@ def storage_links(self, request, pk=None, version=None): @swagger_auto_schema(methods=['get'], responses={200: FILE_RESPONSE}, manual_parameters=[FILE_FORMAT_PARAM]) - @action(methods=['get', 'delete'], detail=True) + @swagger_auto_schema(methods=['post'], manual_parameters=[FILE_VALIDATION_PARAM]) + @action(methods=['get', 'post', 'delete'], detail=True) def accounts_file(self, request, pk=None, version=None): """ get: Gets the portfolios `accounts_file` contents - delete: - Disassociates the portfolios `accounts_file` with the portfolio - """ - return handle_related_file(self.get_object(), 'accounts_file', request, self.supported_mime_types) - - @accounts_file.mapping.post - def set_accounts_file(self, request, pk=None, version=None): - """ post: Sets the portfolios `accounts_file` contents + + delete: + Disassociates the portfolios `accounts_file` with the portfolio """ - store_as_parquet=django_settings.PORTFOLIO_PARQUET_STORAGE - return handle_related_file(self.get_object(), 'accounts_file', request, self.supported_mime_types, store_as_parquet) + method = request.method.lower() + if method == 'post': + store_as_parquet = django_settings.PORTFOLIO_PARQUET_STORAGE + oed_validate = request.GET.get('validate', 'false').lower() == 'true' + else: + store_as_parquet = None + oed_validate = None + return handle_related_file(self.get_object(), 'accounts_file', request, self.supported_mime_types, store_as_parquet, oed_validate) @swagger_auto_schema(methods=['get'], responses={200: FILE_RESPONSE}, manual_parameters=[FILE_FORMAT_PARAM]) - @action(methods=['get', 'delete'], detail=True) + @swagger_auto_schema(methods=['post'], manual_parameters=[FILE_VALIDATION_PARAM]) + @action(methods=['get', 'post', 'delete'], detail=True) def location_file(self, request, pk=None, version=None): """ get: Gets the portfolios `location_file` contents - delete: - Disassociates the portfolios `location_file` contents - """ - return handle_related_file(self.get_object(), 'location_file', request, self.supported_mime_types) - - @location_file.mapping.post - def set_location_file(self, request, pk=None, version=None): - """ post: Sets the portfolios `location_file` contents + + delete: + Disassociates the portfolios `location_file` contents """ - store_as_parquet=django_settings.PORTFOLIO_PARQUET_STORAGE - return handle_related_file(self.get_object(), 'location_file', request, self.supported_mime_types, store_as_parquet) + method = request.method.lower() + if method == 'post': + store_as_parquet = django_settings.PORTFOLIO_PARQUET_STORAGE + oed_validate = request.GET.get('validate', 'false').lower() == 'true' + else: + store_as_parquet = None + oed_validate = None + return handle_related_file(self.get_object(), 'location_file', request, self.supported_mime_types, store_as_parquet, oed_validate) @swagger_auto_schema(methods=['get'], responses={200: FILE_RESPONSE}, manual_parameters=[FILE_FORMAT_PARAM]) - @action(methods=['get', 'delete'], detail=True) + @swagger_auto_schema(methods=['post'], manual_parameters=[FILE_VALIDATION_PARAM]) + @action(methods=['get', 'post', 'delete'], detail=True) def reinsurance_info_file(self, request, pk=None, version=None): """ get: Gets the portfolios `reinsurance_info_file` contents + post: + Sets the portfolios `reinsurance_info_file` contents + delete: Disassociates the portfolios `reinsurance_info_file` contents """ - return handle_related_file(self.get_object(), 'reinsurance_info_file', request, self.supported_mime_types) + method = request.method.lower() + if method == 'post': + store_as_parquet = django_settings.PORTFOLIO_PARQUET_STORAGE + oed_validate = request.GET.get('validate', 'false').lower() == 'true' + else: + store_as_parquet = None + oed_validate = None + return handle_related_file(self.get_object(), 'reinsurance_info_file', request, self.supported_mime_types, store_as_parquet, oed_validate) - @reinsurance_info_file.mapping.post - def set_reinsurance_info_file(self, request, pk=None, version=None): - """ - post: - Sets the portfolios `reinsurance_info_file` contents - """ - store_as_parquet=django_settings.PORTFOLIO_PARQUET_STORAGE - return handle_related_file(self.get_object(), 'reinsurance_info_file', request, self.supported_mime_types, store_as_parquet) @swagger_auto_schema(methods=['get'], responses={200: FILE_RESPONSE}, manual_parameters=[FILE_FORMAT_PARAM]) - @action(methods=['get', 'delete'], detail=True) + @swagger_auto_schema(methods=['post'], manual_parameters=[FILE_VALIDATION_PARAM]) + @action(methods=['get', 'post', 'delete'], detail=True) def reinsurance_scope_file(self, request, pk=None, version=None): """ get: Gets the portfolios `reinsurance_scope_file` contents + post: + Sets the portfolios `reinsurance_scope_file` contents + delete: Disassociates the portfolios `reinsurance_scope_file` contents """ - return handle_related_file(self.get_object(), 'reinsurance_scope_file', request, self.supported_mime_types) + method = request.method.lower() + if method == 'post': + store_as_parquet = django_settings.PORTFOLIO_PARQUET_STORAGE + oed_validate = request.GET.get('validate', 'false').lower() == 'true' + else: + store_as_parquet = None + oed_validate = None + return handle_related_file(self.get_object(), 'reinsurance_scope_file', request, self.supported_mime_types, store_as_parquet, oed_validate) + - @reinsurance_scope_file.mapping.post - def set_reinsurance_scope_file(self, request, pk=None, version=None): + @action(methods=['get', 'post'], detail=True) + def validate(self, request, pk=None, version=None): """ + get: + Return OED validation status for each attached file + post: - Sets the portfolios `reinsurance_scope_file` contents + Run OED validation on the connected exposure files """ - store_as_parquet=django_settings.PORTFOLIO_PARQUET_STORAGE - return handle_related_file(self.get_object(), 'reinsurance_scope_file', request, self.supported_mime_types, store_as_parquet) + method = request.method.lower() + instance = self.get_object() + + if method == 'post': + instance.run_oed_validation() + + serializer = self.get_serializer(instance) + return Response(serializer.data) diff --git a/src/server/oasisapi/schemas/custom_swagger.py b/src/server/oasisapi/schemas/custom_swagger.py index ec876dbc0..a161adbc2 100644 --- a/src/server/oasisapi/schemas/custom_swagger.py +++ b/src/server/oasisapi/schemas/custom_swagger.py @@ -3,6 +3,7 @@ 'HEALTHCHECK', 'TOKEN_REFRESH_HEADER', 'FILE_FORMAT_PARAM', + 'FILE_VALIDATION_PARAM', ] from drf_yasg import openapi @@ -40,15 +41,15 @@ required=["version", "config"], properties={ "version": Schema( - title='Server version', + title='Server version', description="Version of oasis platform", - read_only=True, - type='string', + read_only=True, + type='string', ), "config": Schema( - title='Server config', + title='Server config', description="Oasis server public configuration", - type='object', + type='object', ) } ) @@ -62,9 +63,16 @@ ) FILE_FORMAT_PARAM = openapi.Parameter( - 'file_format', - openapi.IN_QUERY, - description="File format returned, default is `csv`", - type=openapi.TYPE_STRING, + 'file_format', + openapi.IN_QUERY, + description="File format returned, default is `csv`", + type=openapi.TYPE_STRING, enum=['csv', 'parquet'] ) + +FILE_VALIDATION_PARAM = openapi.Parameter( + 'validate', + openapi.IN_QUERY, + description="Validate OED files on upload, default `True`", + type=openapi.TYPE_BOOLEAN, +) diff --git a/src/server/oasisapi/settings.py b/src/server/oasisapi/settings.py index 2def6a7fe..28bbd5acc 100644 --- a/src/server/oasisapi/settings.py +++ b/src/server/oasisapi/settings.py @@ -265,6 +265,17 @@ # storage selector for exposure files PORTFOLIO_PARQUET_STORAGE = iniconf.settings.getboolean('server', 'PORTFOLIO_PARQUET_STORAGE', fallback=False) +PORTFOLIO_UPLOAD_VALIDATION = iniconf.settings.getboolean('server', 'PORTFOLIO_UPLOAD_VALIDATION', fallback=True) +PORTFOLIO_VALIDATION_CONFIG = [ + {'name': 'required_fields', 'on_error': 'return'}, + {'name': 'unknown_column', 'on_error': 'return'}, + {'name': 'valid_values', 'on_error': 'return'}, + {'name': 'perils', 'on_error': 'return'}, + {'name': 'occupancy_code', 'on_error': 'return'}, + {'name': 'construction_code', 'on_error': 'return'}, + {'name': 'country_and_area_code', 'on_error': 'return'}, +] + # limit analyses logs access to admin accounts RESTRICT_SYSTEM_LOGS = iniconf.settings.getboolean('server', 'RESTRICT_SYSTEM_LOGS', fallback=False) From c8542c9bfc098979d0fa291b5a12e376317fc1a2 Mon Sep 17 00:00:00 2001 From: sambles Date: Mon, 30 Jan 2023 14:06:07 +0000 Subject: [PATCH 02/33] Add Python code quality scanning (#732) * Add code-quality.yml * Always run both code scans on fail * Run Autopep8 * missed some autopep * Remove unused imports * Fix Autopep8 from #724 --- .github/workflows/code-quality.yml | 39 +++ scripts/update-changelog.py | 100 ++++---- scripts/update-requirements.py | 5 +- src/common/shared.py | 4 +- src/conf/iniconf.py | 5 +- .../backends/azure_storage.py | 43 ++-- src/model_execution_worker/storage_manager.py | 7 +- src/model_execution_worker/tasks.py | 31 ++- src/server/oasisapi/analyses/models.py | 65 ++--- src/server/oasisapi/analyses/serializers.py | 6 +- src/server/oasisapi/analyses/tasks.py | 29 ++- .../analyses/tests/test_analysis_api.py | 77 +++--- .../analyses/tests/test_analysis_model.py | 3 +- .../analyses/tests/test_analysis_tasks.py | 14 +- src/server/oasisapi/analyses/viewsets.py | 19 +- src/server/oasisapi/analysis_models/admin.py | 7 +- src/server/oasisapi/analysis_models/models.py | 8 +- .../oasisapi/analysis_models/serializers.py | 9 +- .../tests/test_analysis_model.py | 90 ++++--- .../oasisapi/analysis_models/viewsets.py | 14 +- src/server/oasisapi/asgi.py | 2 +- src/server/oasisapi/auth/serializers.py | 5 +- src/server/oasisapi/data_files/models.py | 2 +- .../data_files/tests/test_data_files.py | 2 +- src/server/oasisapi/data_files/viewsets.py | 2 +- src/server/oasisapi/files/models.py | 65 +++-- src/server/oasisapi/files/serializers.py | 2 - src/server/oasisapi/files/upload.py | 2 +- src/server/oasisapi/files/views.py | 9 +- src/server/oasisapi/info/tests/test_info.py | 2 +- src/server/oasisapi/info/views.py | 6 +- src/server/oasisapi/portfolios/models.py | 29 ++- src/server/oasisapi/portfolios/serializers.py | 9 +- .../portfolios/tests/test_portfolio.py | 229 +++++++++--------- src/server/oasisapi/portfolios/viewsets.py | 10 +- src/server/oasisapi/schemas/custom_swagger.py | 1 - src/server/oasisapi/schemas/serializers.py | 10 +- src/server/oasisapi/settings.py | 36 ++- tests/integration/api_integration.py | 14 +- tests/test_tasks.py | 34 +-- 40 files changed, 534 insertions(+), 512 deletions(-) create mode 100644 .github/workflows/code-quality.yml diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml new file mode 100644 index 000000000..1464bdba9 --- /dev/null +++ b/.github/workflows/code-quality.yml @@ -0,0 +1,39 @@ +# This workflow performs code quality checks like: +# - PEP8: the workflow fails if code is not PEP8 compliant +# - flake8: the problems identified by flake 8 are listed but the workflow +# presently doesn't fail if flake reports errors. + +name: Code Quality + +on: [push, pull_request] + +env: + max_line_length: 150 + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.10" + + - name: install pip tools + run: | + python -m pip install --upgrade pip + pip install --upgrade pip-tools + + - name: install code quality tools + run: pip install --upgrade autopep8 flake8 + + - name: Run (partial) flake8 + if: ${{ ! cancelled() }} + run: flake8 --select F401,F522,F524,F541 --show-source src/ + + - name: check PEP8 compliance + if: ${{ ! cancelled() }} + id: autopep8 + run: | + autopep8 --diff --exit-code --recursive --max-line-length ${{ env.max_line_length }} --ignore E402 . diff --git a/scripts/update-changelog.py b/scripts/update-changelog.py index e015afa6d..b5648f7e7 100755 --- a/scripts/update-changelog.py +++ b/scripts/update-changelog.py @@ -15,7 +15,7 @@ from pydriller import Repository as RepositoryMining logging.basicConfig(level=logging.INFO) -## extract text between markers in Pull requesuts +# extract text between markers in Pull requesuts START_PR_MARKER = '\r\n' END_PR_MARKER = '' DEFAULT_PR_TITLE = '### Release notes feature title' @@ -32,6 +32,7 @@ class ReleaseNotesBuilder(object): ## install requirments 'pip install github pydriller click' """ + def __init__(self, github_token=None, github_user='OasisLMF'): """ :param github_token: Github Oauth Token @@ -81,7 +82,6 @@ def _get_commit_refs(self, repo_url, local_path, from_tag, to_tag): commit_list = sum(commit_list, []) return set(map(lambda cm: int(cm[1:]), commit_list)) - def _get_github_pull_requests(self, github, commit_refs): """ All pull requests have issues but not all issue have pull requests @@ -101,7 +101,6 @@ def _get_github_pull_requests(self, github, commit_refs): self.logger.info("Filtered github refereces to Pull Requests: {}".format([pr.number for pr in pull_requeusts])) return pull_requeusts - def _get_linked_issues(self, pr_number, repo_url): """ there is no direct way to find which issues are linked to a PR via the github API (yet) @@ -112,8 +111,8 @@ def _get_linked_issues(self, pr_number, repo_url): try: r = requests.get(f"{repo_url}/pull/{pr_number}") soup = BeautifulSoup(r.text, 'html.parser') - issueForm = soup.find("form", { "aria-label": re.compile('Link issues')}) - issue_urls_found = [ re.findall(r'\d+', i["href"]) for i in issueForm.find_all("a")] + issueForm = soup.find("form", {"aria-label": re.compile('Link issues')}) + issue_urls_found = [re.findall(r'\d+', i["href"]) for i in issueForm.find_all("a")] except Exception as e: self.logger.warning(f"Error fetching linked issue for PR-{pr_number}, {e}") @@ -121,7 +120,6 @@ def _get_linked_issues(self, pr_number, repo_url): self.logger.info("PR-{} linked issues: {}".format(pr_number, issue_refs)) return set(map(int, issue_refs)) - def _check_gh_rate_limit(self): resp = requests.get( 'https://api.github.com/rate_limit', @@ -129,7 +127,6 @@ def _check_gh_rate_limit(self): resp.raise_for_status() return resp.json() - def _get_tag(self, repo_name, idx=0): resp = requests.get( f'https://api.github.com/repos/{self.github_user}/{repo_name}/tags', @@ -175,7 +172,6 @@ def _find_milestone(self, repo_name, title): return milestone.get('number') return -1 - def load_data(self, repo_name, local_path=None, tag_from=None, tag_to=None): """ Create a dict of PyGithub objects based on the references found in commit @@ -218,7 +214,6 @@ def load_data(self, repo_name, local_path=None, tag_from=None, tag_to=None): else: logger.warning(f'repo_path: ".git" folder not found in {repo_path}, fallback to fresh clone') - # Load repository data github = Github(login_or_token=self.github_token).get_repo(f'{self.github_user}/{repo_name}') @@ -309,9 +304,9 @@ def create_changelog(self, github_data, format_markdown=False): pr['linked_issues'][0].title, )) else: - # Case 2: PR has multiple linked issues + # Case 2: PR has multiple linked issues changelog_lines.append("* [{}]({}) - {}".format( - ', '.join([f'#{issue.number}' for issue in pr['linked_issues']]), + ', '.join([f'#{issue.number}' for issue in pr['linked_issues']]), pr['pull_request'].html_url, pr['pull_request'].title )) @@ -354,7 +349,6 @@ def release_plat_header(self, tag_platform=None, tag_oasislmf=None, tag_oasisui= plat_header.append('\n') return plat_header - def extract_pr_content(self, github_data): """ Extract release note text between two markers in the Pull_request's body @@ -374,7 +368,7 @@ def extract_pr_content(self, github_data): # skip PR if release note tags are missing continue - release_desc = pr_body[idx_start+len(START_PR_MARKER):idx_end].strip() + release_desc = pr_body[idx_start + len(START_PR_MARKER):idx_end].strip() if len(release_desc) < 1: # skip PR if tags contain an empty string continue @@ -393,18 +387,16 @@ def extract_pr_content(self, github_data): pr['pull_request'].html_url) title = [release_desc.split('\r\n')[0] + pr_link] body = release_desc.split('\r\n')[1:] - release_note_content.append("\r\n".join(title + body) ) + release_note_content.append("\r\n".join(title + body)) release_note_content.append('\n\n') else: release_note_content.append(release_desc) release_note_content.append('\n\n') - - has_content = True return has_content, release_note_content - def create_release_notes(self, github_data): + def create_release_notes(self, github_data): """ release notes """ release_notes = [] @@ -430,12 +422,13 @@ def check_rate_limit(github_token): rate_limit_info = noteBuilder._check_gh_rate_limit() logger.info(json.dumps(rate_limit_info, indent=4)) + @cli.command() -@click.option('--repo', type=click.Choice(['ktools', 'OasisLMF', 'OasisPlatform', 'OasisUI'], case_sensitive=True), required=True) -@click.option('--output-path', type=click.Path(exists=False), default='./CHANGELOG.rst', help='changelog output path') -@click.option('--local-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') -@click.option('--from-tag', required=True, help='Github tag to track changes from' ) -@click.option('--to-tag', required=True, help='Github tag to track changes to') +@click.option('--repo', type=click.Choice(['ktools', 'OasisLMF', 'OasisPlatform', 'OasisUI'], case_sensitive=True), required=True) +@click.option('--output-path', type=click.Path(exists=False), default='./CHANGELOG.rst', help='changelog output path') +@click.option('--local-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') +@click.option('--from-tag', required=True, help='Github tag to track changes from') +@click.option('--to-tag', required=True, help='Github tag to track changes to') @click.option('--github-token', default=None, help='Github OAuth token') @click.option('--apply-milestone', is_flag=True, help='Add issues to Github milestone, (requires Github OAuth token)') def build_changelog(repo, from_tag, to_tag, github_token, output_path, apply_milestone, local_repo_path): @@ -451,7 +444,7 @@ def build_changelog(repo, from_tag, to_tag, github_token, output_path, apply_mil repo_data = noteBuilder.load_data(repo_name=repo, local_path=local_repo_path, tag_from=from_tag, tag_to=to_tag) changelog_data = noteBuilder.create_changelog(repo_data) changelog_path = os.path.abspath(output_path) - logger.info("CHANGELOG OUTPUT: \n" + "".join(changelog_data)) + logger.info("CHANGELOG OUTPUT: \n" + "".join(changelog_data)) # Add milestones if apply_milestone: @@ -470,18 +463,18 @@ def build_changelog(repo, from_tag, to_tag, github_token, output_path, apply_mil # new file or stub cl.seek(0) header = [f'{repo} Changelog\n'] - header.append( (len(header[0])-1) * '='+'\n') + header.append((len(header[0]) - 1) * '=' + '\n') header.append('\n') cl.writelines(header + changelog_data) logger.info(f'Written Changelog to new file: "{changelog_path}"') @cli.command() -@click.option('--repo', type=click.Choice(['ktools', 'OasisLMF', 'OasisUI'], case_sensitive=True), required=True) -@click.option('--output-path', type=click.Path(exists=False), default='./RELEASE.md', help='Release notes output path') -@click.option('--local-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') -@click.option('--from-tag', required=True, help='Github tag to track changes from' ) -@click.option('--to-tag', required=True, help='Github tag to track changes to') +@click.option('--repo', type=click.Choice(['ktools', 'OasisLMF', 'OasisUI'], case_sensitive=True), required=True) +@click.option('--output-path', type=click.Path(exists=False), default='./RELEASE.md', help='Release notes output path') +@click.option('--local-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') +@click.option('--from-tag', required=True, help='Github tag to track changes from') +@click.option('--to-tag', required=True, help='Github tag to track changes to') @click.option('--github-token', default=None, help='Github OAuth token') def build_release(repo, from_tag, to_tag, github_token, output_path, local_repo_path): logger = logging.getLogger() @@ -495,7 +488,7 @@ def build_release(repo, from_tag, to_tag, github_token, output_path, local_repo_ repo_data = noteBuilder.load_data(repo_name=repo, local_path=local_repo_path, tag_from=from_tag, tag_to=to_tag) release_notes = noteBuilder.create_changelog(repo_data, format_markdown=True) release_notes += noteBuilder.create_release_notes(repo_data) - logger.info("RELEASE NOTES OUTPUT: \n" + "".join(release_notes)) + logger.info("RELEASE NOTES OUTPUT: \n" + "".join(release_notes)) # Write lines to target file release_notes_path = os.path.abspath(output_path) @@ -504,22 +497,21 @@ def build_release(repo, from_tag, to_tag, github_token, output_path, local_repo_ logger.info(f'Written Release notes to new file: "{release_notes_path}"') - @cli.command() -@click.option('--platform-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') -@click.option('--platform-from-tag', default=None, help='Github tag to track changes from' ) -@click.option('--platform-to-tag', default=None, help='Github tag to track changes to') -@click.option('--lmf-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') -@click.option('--lmf-from-tag', default=None, help='Github tag to track changes from' ) -@click.option('--lmf-to-tag', default=None, help='Github tag to track changes to') -@click.option('--ktools-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') -@click.option('--ktools-from-tag', default=None, help='Github tag to track changes from' ) -@click.option('--ktools-to-tag', default=None, help='Github tag to track changes to') -@click.option('--github-token', default=None, help='Github OAuth token') -@click.option('--output-path', type=click.Path(exists=False), default='./RELEASE.md', help='Release notes output path') +@click.option('--platform-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') +@click.option('--platform-from-tag', default=None, help='Github tag to track changes from') +@click.option('--platform-to-tag', default=None, help='Github tag to track changes to') +@click.option('--lmf-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') +@click.option('--lmf-from-tag', default=None, help='Github tag to track changes from') +@click.option('--lmf-to-tag', default=None, help='Github tag to track changes to') +@click.option('--ktools-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') +@click.option('--ktools-from-tag', default=None, help='Github tag to track changes from') +@click.option('--ktools-to-tag', default=None, help='Github tag to track changes to') +@click.option('--github-token', default=None, help='Github OAuth token') +@click.option('--output-path', type=click.Path(exists=False), default='./RELEASE.md', help='Release notes output path') def build_release_platform(platform_repo_path, platform_from_tag, - platform_to_tag , + platform_to_tag, lmf_repo_path, lmf_from_tag, lmf_to_tag, @@ -528,18 +520,17 @@ def build_release_platform(platform_repo_path, ktools_to_tag, github_token, output_path): - """ Create the OasisPlatform release notes """ logger = logging.getLogger() noteBuilder = ReleaseNotesBuilder(github_token=github_token) - plat_from = platform_from_tag if platform_from_tag else noteBuilder._get_tag(repo_name='OasisPlatform', idx=1) - plat_to = platform_to_tag if platform_to_tag else noteBuilder._get_tag(repo_name='OasisPlatform', idx=0) - lmf_from = lmf_from_tag if lmf_from_tag else noteBuilder._get_tag(repo_name='OasisLMF', idx=1) - lmf_to = lmf_to_tag if lmf_to_tag else noteBuilder._get_tag(repo_name='OasisLMF', idx=0) - ktools_from = ktools_from_tag if ktools_from_tag else noteBuilder._get_tag(repo_name='ktools', idx=1) - ktools_to = ktools_to_tag if ktools_to_tag else noteBuilder._get_tag(repo_name='ktools', idx=0) + plat_from = platform_from_tag if platform_from_tag else noteBuilder._get_tag(repo_name='OasisPlatform', idx=1) + plat_to = platform_to_tag if platform_to_tag else noteBuilder._get_tag(repo_name='OasisPlatform', idx=0) + lmf_from = lmf_from_tag if lmf_from_tag else noteBuilder._get_tag(repo_name='OasisLMF', idx=1) + lmf_to = lmf_to_tag if lmf_to_tag else noteBuilder._get_tag(repo_name='OasisLMF', idx=0) + ktools_from = ktools_from_tag if ktools_from_tag else noteBuilder._get_tag(repo_name='ktools', idx=1) + ktools_to = ktools_to_tag if ktools_to_tag else noteBuilder._get_tag(repo_name='ktools', idx=0) ui_to = noteBuilder._get_tag(repo_name='OasisUI', idx=0) # Load github data @@ -549,7 +540,7 @@ def build_release_platform(platform_repo_path, # Add title release_notes_data = [f'Oasis Release v{plat_to} \n'] - release_notes_data.append((len(release_notes_data[0])-1) * '='+'\n') + release_notes_data.append((len(release_notes_data[0]) - 1) * '=' + '\n') release_notes_data.append('\n') # Print docker images and components @@ -561,8 +552,8 @@ def build_release_platform(platform_repo_path, # Load Change logs release_notes_data += ["# Changelogs \n", "\n"] - release_notes_data += noteBuilder.create_changelog(plat_data, format_markdown=True) - release_notes_data += noteBuilder.create_changelog(lmf_data, format_markdown=True) + release_notes_data += noteBuilder.create_changelog(plat_data, format_markdown=True) + release_notes_data += noteBuilder.create_changelog(lmf_data, format_markdown=True) release_notes_data += noteBuilder.create_changelog(ktools_data, format_markdown=True) # Extract Feature notes from PR's @@ -570,7 +561,7 @@ def build_release_platform(platform_repo_path, release_notes_data += noteBuilder.create_release_notes(plat_data) release_notes_data += noteBuilder.create_release_notes(lmf_data) release_notes_data += noteBuilder.create_release_notes(ktools_data) - logger.info("RELEASE NOTES OUTPUT: \n" + "".join(release_notes_data)) + logger.info("RELEASE NOTES OUTPUT: \n" + "".join(release_notes_data)) # Write lines to target file release_notes_path = os.path.abspath(output_path) @@ -578,5 +569,6 @@ def build_release_platform(platform_repo_path, rn.writelines(release_notes_data) logger.info(f'Written Release notes to new file: "{release_notes_path}"') + if __name__ == '__main__': cli() diff --git a/scripts/update-requirements.py b/scripts/update-requirements.py index 380c68683..dbfa95558 100644 --- a/scripts/update-requirements.py +++ b/scripts/update-requirements.py @@ -1,7 +1,4 @@ - - - server = json.load(open('server-results.sarif')) -[v for v in server['runs'][0]['results'] if 'Fixed Version' in v['message']['text'] ] +[v for v in server['runs'][0]['results'] if 'Fixed Version' in v['message']['text']] diff --git a/src/common/shared.py b/src/common/shared.py index 7717c4dc0..a581c585e 100644 --- a/src/common/shared.py +++ b/src/common/shared.py @@ -1,5 +1,6 @@ import logging + def set_aws_log_level(log_level): # Set log level for s3boto3 try: @@ -13,9 +14,10 @@ def set_aws_log_level(log_level): logging.getLogger('s3transfer').setLevel(LOG_LEVEL) logging.getLogger('urllib3').setLevel(LOG_LEVEL) + def set_azure_log_level(log_level): try: LOG_LEVEL = getattr(logging, log_level.upper()) except AttributeError: LOG_LEVEL = logging.WARNING - logging.getLogger('azure').setLevel(LOG_LEVEL) + logging.getLogger('azure').setLevel(LOG_LEVEL) diff --git a/src/conf/iniconf.py b/src/conf/iniconf.py index 5db547eca..b97ba5b0e 100644 --- a/src/conf/iniconf.py +++ b/src/conf/iniconf.py @@ -76,14 +76,13 @@ def get_timedelta(self, section, option, **kwargs): kwargs.setdefault('vars', self._get_section_env_vars(section)) kwargs_string = super(Settings, self).get(section, option, **kwargs) try: - kwargs = {k.split('=')[0].strip():int(k.split('=')[1]) + kwargs = {k.split('=')[0].strip(): int(k.split('=')[1]) for k in kwargs_string.split(',')} except (TypeError, IndexError): - kwargs = {k.split('=')[0].strip():int(k.split('=')[1]) + kwargs = {k.split('=')[0].strip(): int(k.split('=')[1]) for k in kwargs['fallback'].split(',')} return timedelta(**kwargs) - def getboolean(self, section, option, **kwargs): kwargs.setdefault('vars', self._get_section_env_vars(section)) return super(Settings, self).getboolean(section, option, **kwargs) diff --git a/src/model_execution_worker/backends/azure_storage.py b/src/model_execution_worker/backends/azure_storage.py index 6a0aa915f..4c9e7615c 100755 --- a/src/model_execution_worker/backends/azure_storage.py +++ b/src/model_execution_worker/backends/azure_storage.py @@ -3,44 +3,41 @@ import tempfile from azure.core.exceptions import ResourceNotFoundError -from azure.storage.blob import ( - BlobClient, BlobSasPermissions, BlobServiceClient, ContentSettings, - generate_blob_sas, -) +from azure.storage.blob import BlobServiceClient from ...common.shared import set_azure_log_level from ..storage_manager import BaseStorageConnector class AzureObjectStore(BaseStorageConnector): -# https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python + # https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python def __init__(self, settings): self._service_client = None self._client = None # Required - self.account_name = settings.get('worker', 'AZURE_ACCOUNT_NAME') - self.account_key = settings.get('worker', 'AZURE_ACCOUNT_KEY') - self.azure_container = settings.get('worker', 'AZURE_CONTAINER') + self.account_name = settings.get('worker', 'AZURE_ACCOUNT_NAME') + self.account_key = settings.get('worker', 'AZURE_ACCOUNT_KEY') + self.azure_container = settings.get('worker', 'AZURE_CONTAINER') # Optional - self.location = settings.get('worker', 'AZURE_LOCATION', fallback='') - self.connection_string = settings.get('worker', 'AZURE_CONNECTION_STRING', fallback=None) - self.shared_container = settings.get('worker', 'AZURE_SHARED_CONTAINER', fallback=True) - self.azure_ssl = settings.get('worker', 'AZURE_SSL', fallback=True) - self.upload_max_conn = settings.get('worker', 'AZURE_UPLOAD_MAX_CONN', fallback=2) - self.timeout = settings.get('worker', 'AZURE_CONNECTION_TIMEOUT_SECS', fallback=20) - self.max_memory_size = settings.get('worker', 'AZURE_BLOB_MAX_MEMORY_SIZE', fallback=2*1024*1024) - self.expiration_secs = settings.get('worker', 'AZURE_URL_EXPIRATION_SECS', fallback=None) - self.overwrite_files = settings.get('worker', 'AZURE_OVERWRITE_FILES', fallback=True) + self.location = settings.get('worker', 'AZURE_LOCATION', fallback='') + self.connection_string = settings.get('worker', 'AZURE_CONNECTION_STRING', fallback=None) + self.shared_container = settings.get('worker', 'AZURE_SHARED_CONTAINER', fallback=True) + self.azure_ssl = settings.get('worker', 'AZURE_SSL', fallback=True) + self.upload_max_conn = settings.get('worker', 'AZURE_UPLOAD_MAX_CONN', fallback=2) + self.timeout = settings.get('worker', 'AZURE_CONNECTION_TIMEOUT_SECS', fallback=20) + self.max_memory_size = settings.get('worker', 'AZURE_BLOB_MAX_MEMORY_SIZE', fallback=2 * 1024 * 1024) + self.expiration_secs = settings.get('worker', 'AZURE_URL_EXPIRATION_SECS', fallback=None) + self.overwrite_files = settings.get('worker', 'AZURE_OVERWRITE_FILES', fallback=True) self.default_content_type = settings.get('worker', 'AZURE_DEFAULT_CONTENT', fallback='application/octet-stream') - self.cache_control = settings.get('worker', 'AZURE_CACHE_CONTROL', fallback=None) - self.sas_token = settings.get('worker', 'AZURE_SAS_TOKEN', fallback=None) - self.custom_domain = settings.get('worker', 'AZURE_CUSTOM_DOMAIN', fallback=None) - self.token_credential = settings.get('worker', 'AZURE_TOKEN_CREDENTIAL', fallback=None) - self.azure_log_level = settings.get('worker', 'AWS_LOG_LEVEL', fallback=logging.ERROR) - self.azure_protocol = 'https' if self.azure_ssl else 'http' + self.cache_control = settings.get('worker', 'AZURE_CACHE_CONTROL', fallback=None) + self.sas_token = settings.get('worker', 'AZURE_SAS_TOKEN', fallback=None) + self.custom_domain = settings.get('worker', 'AZURE_CUSTOM_DOMAIN', fallback=None) + self.token_credential = settings.get('worker', 'AZURE_TOKEN_CREDENTIAL', fallback=None) + self.azure_log_level = settings.get('worker', 'AWS_LOG_LEVEL', fallback=logging.ERROR) + self.azure_protocol = 'https' if self.azure_ssl else 'http' set_azure_log_level(self.azure_log_level) def _get_service_client(self): diff --git a/src/model_execution_worker/storage_manager.py b/src/model_execution_worker/storage_manager.py index dc1fb5fdb..f9ab8320e 100755 --- a/src/model_execution_worker/storage_manager.py +++ b/src/model_execution_worker/storage_manager.py @@ -13,13 +13,13 @@ LOG_FILE_SUFFIX = 'txt' ARCHIVE_FILE_SUFFIX = 'tar.gz' -#from .backends.aws_storage import AwsObjectStore -#from .backends.azure_storage import AzureObjectStore +# from .backends.aws_storage import AwsObjectStore +# from .backends.azure_storage import AzureObjectStore # # # # -#def StorageSelector(settings_conf): +# def StorageSelector(settings_conf): # """ Returns a `StorageConnector` class based on conf.ini # # Call this method from model_execution_worker.task @@ -53,6 +53,7 @@ class BaseStorageConnector(object): Implements storage for a local fileshare between `server` and `worker` containers """ + def __init__(self, setting, logger=None): self.media_root = setting.get('worker', 'MEDIA_ROOT') self.storage_connector = 'FS-SHARE' diff --git a/src/model_execution_worker/tasks.py b/src/model_execution_worker/tasks.py index 3fe26fbca..0087fcbbb 100755 --- a/src/model_execution_worker/tasks.py +++ b/src/model_execution_worker/tasks.py @@ -31,7 +31,7 @@ from ..conf.iniconf import settings from ..common.data import STORED_FILENAME, ORIGINAL_FILENAME -#from .storage_manager import StorageSelector +# from .storage_manager import StorageSelector from .storage_manager import BaseStorageConnector from .backends.aws_storage import AwsObjectStore from .backends.azure_storage import AzureObjectStore @@ -359,8 +359,8 @@ def start_analysis(analysis_settings, input_location, complex_data_files=None): tmpdir_persist = settings.getboolean('worker', 'KEEP_RUN_DIR', fallback=False) tmpdir_base = settings.get('worker', 'BASE_RUN_DIR', fallback=None) - # Setup Job cancellation handler + def analysis_cancel_handler(signum, frame): logging.info('TASK CANCELLATION') if proc is not None: @@ -413,7 +413,7 @@ def analysis_cancel_handler(signum, frame): args_list = run_args + [''] if (len(run_args) % 2) else run_args mdk_args = [x for t in list(zip(*[iter(args_list)] * 2)) if (None not in t) and ('--model-run-dir' not in t) for x in t] logging.info('run_directory: {}'.format(oasis_files_dir)) - #logging.info('args_list: {}'.format(str(run_args))) + # logging.info('args_list: {}'.format(str(run_args))) logging.info("\nExecuting: generate-losses") if debug_worker: logging.info("\nCLI command: \noasislmf model generate-losses {}".format( @@ -512,10 +512,10 @@ def generate_input_cancel_handler(signum, frame): with tmp_dir as oasis_files_dir, tmp_input_dir as input_data_dir: # Fetch input files - location_file = filestore.get(loc_file, oasis_files_dir, required=True) - accounts_file = filestore.get(acc_file, oasis_files_dir) - ri_info_file = filestore.get(info_file, oasis_files_dir) - ri_scope_file = filestore.get(scope_file, oasis_files_dir) + location_file = filestore.get(loc_file, oasis_files_dir, required=True) + accounts_file = filestore.get(acc_file, oasis_files_dir) + ri_info_file = filestore.get(info_file, oasis_files_dir) + ri_scope_file = filestore.get(scope_file, oasis_files_dir) lookup_settings_file = filestore.get(settings_file, oasis_files_dir) run_args = [ @@ -547,12 +547,11 @@ def generate_input_cancel_handler(signum, frame): if debug_worker: run_args += ['--verbose'] - # Log MDK generate command args_list = run_args + [''] if (len(run_args) % 2) else run_args mdk_args = [x for t in list(zip(*[iter(args_list)] * 2)) if None not in t for x in t] logging.info('run_directory: {}'.format(oasis_files_dir)) - #logging.info('args_list: {}'.format(str(run_args))) + # logging.info('args_list: {}'.format(str(run_args))) logging.info("\nExecuting: generate-oasis-files") if debug_worker: logging.info("\nCLI command: \noasislmf model generate-oasis-files {}".format( @@ -564,7 +563,7 @@ def generate_input_cancel_handler(signum, frame): proc = subprocess.Popen( ['oasislmf', 'model', 'generate-oasis-files'] + run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=worker_env, - preexec_fn=os.setsid, # run in a new session, assigning a new process group to it and its children. + preexec_fn=os.setsid, # run in a new session, assigning a new process group to it and its children. ) stdout, stderr = proc.communicate() @@ -581,13 +580,13 @@ def generate_input_cancel_handler(signum, frame): summary_levels_fp = next(iter(glob.glob(os.path.join(oasis_files_dir, 'exposure_summary_levels.json'))), None) # Store result files - traceback_file = filestore.create_traceback(stdout.decode(), stderr.decode(), oasis_files_dir) - traceback = filestore.put(traceback_file) - lookup_error = filestore.put(lookup_error_fp) - lookup_success = filestore.put(lookup_success_fp) + traceback_file = filestore.create_traceback(stdout.decode(), stderr.decode(), oasis_files_dir) + traceback = filestore.put(traceback_file) + lookup_error = filestore.put(lookup_error_fp) + lookup_success = filestore.put(lookup_success_fp) lookup_validation = filestore.put(lookup_validation_fp) - summary_levels = filestore.put(summary_levels_fp) - output_tar_path = filestore.put(oasis_files_dir) + summary_levels = filestore.put(summary_levels_fp) + output_tar_path = filestore.put(oasis_files_dir) return output_tar_path, lookup_error, lookup_success, lookup_validation, summary_levels, traceback, proc.returncode diff --git a/src/server/oasisapi/analyses/models.py b/src/server/oasisapi/analyses/models.py index 021eb4faf..4a4ac17d7 100644 --- a/src/server/oasisapi/analyses/models.py +++ b/src/server/oasisapi/analyses/models.py @@ -43,26 +43,33 @@ class Analysis(TimeStampedModel): portfolio = models.ForeignKey(Portfolio, on_delete=models.CASCADE, related_name='analyses', help_text=_('The portfolio to link the analysis to')) model = models.ForeignKey(AnalysisModel, on_delete=models.CASCADE, related_name='analyses', help_text=_('The model to link the analysis to')) name = models.CharField(help_text='The name of the analysis', max_length=255) - status = models.CharField(max_length=max(len(c) for c in status_choices._db_values), choices=status_choices, default=status_choices.NEW, editable=False) + status = models.CharField(max_length=max(len(c) for c in status_choices._db_values), + choices=status_choices, default=status_choices.NEW, editable=False) task_started = models.DateTimeField(editable=False, null=True, default=None) task_finished = models.DateTimeField(editable=False, null=True, default=None) run_task_id = models.CharField(max_length=255, editable=False, default='', blank=True) generate_inputs_task_id = models.CharField(max_length=255, editable=False, default='', blank=True) complex_model_data_files = models.ManyToManyField(DataFile, blank=True, related_name='complex_model_files_analyses') - settings_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='settings_file_analyses') + settings_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='settings_file_analyses') input_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='input_file_analyses') - input_generation_traceback_file = models.ForeignKey(RelatedFile, on_delete=models.SET_NULL, blank=True, null=True, default=None, related_name='input_generation_traceback_analyses') + input_generation_traceback_file = models.ForeignKey(RelatedFile, on_delete=models.SET_NULL, + blank=True, null=True, default=None, related_name='input_generation_traceback_analyses') output_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='output_file_analyses') - run_traceback_file = models.ForeignKey(RelatedFile, on_delete=models.SET_NULL, blank=True, null=True, default=None, related_name='run_traceback_file_analyses') - run_log_file = models.ForeignKey(RelatedFile, on_delete=models.SET_NULL, blank=True, null=True, default=None, related_name='run_log_file_analyses') - - lookup_errors_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='lookup_errors_file_analyses') - lookup_success_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='lookup_success_file_analyses') - lookup_validation_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='lookup_validation_file_analyses') - summary_levels_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='summary_levels_file_analyses') - - + run_traceback_file = models.ForeignKey(RelatedFile, on_delete=models.SET_NULL, blank=True, null=True, + default=None, related_name='run_traceback_file_analyses') + run_log_file = models.ForeignKey(RelatedFile, on_delete=models.SET_NULL, blank=True, + null=True, default=None, related_name='run_log_file_analyses') + + lookup_errors_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='lookup_errors_file_analyses') + lookup_success_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='lookup_success_file_analyses') + lookup_validation_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='lookup_validation_file_analyses') + summary_levels_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='summary_levels_file_analyses') class Meta: verbose_name_plural = 'analyses' @@ -123,11 +130,10 @@ def get_absolute_run_traceback_file_url(self, request=None): def get_absolute_run_log_file_url(self, request=None): return reverse('analysis-run-log-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request) - + def get_absolute_storage_url(self, request=None): return reverse('analysis-storage-links', kwargs={'version': 'v1', 'pk': self.pk}, request=request) - def validate_run(self): valid_choices = [ self.status_choices.READY, @@ -235,14 +241,13 @@ def cancel_any(self): valid_choices = INPUTS_GENERATION_STATES + RUN_ANALYSIS_STATES if self.status not in valid_choices: raise ValidationError({'status': ['Analysis is not running or queued']}) - + if self.status in INPUTS_GENERATION_STATES: self.cancel_generate_inputs() - + if self.status in RUN_ANALYSIS_STATES: self.cancel_analysis() - def cancel_analysis(self): valid_choices = [ self.status_choices.RUN_QUEUED, @@ -306,7 +311,6 @@ def create_complex_model_data_file_dicts(self): ] return complex_data_files - def copy_file(self, obj): """ Duplicate a conneced DB object and store under a new ID @@ -341,22 +345,23 @@ def copy(self): new_instance.summary_levels_file = None return new_instance + @receiver(post_delete, sender=Analysis) def delete_connected_files(sender, instance, **kwargs): """ Post delete handler to clear out any dangaling analyses files """ - files_for_removal = [ - 'settings_file', - 'input_file', - 'input_generation_traceback_file', - 'output_file', - 'run_traceback_file', - 'run_log_file', - 'lookup_errors_file', - 'lookup_success_file', - 'lookup_validation_file', - 'summary_levels_file', - ] + files_for_removal = [ + 'settings_file', + 'input_file', + 'input_generation_traceback_file', + 'output_file', + 'run_traceback_file', + 'run_log_file', + 'lookup_errors_file', + 'lookup_success_file', + 'lookup_validation_file', + 'summary_levels_file', + ] for ref in files_for_removal: file_ref = getattr(instance, ref) if file_ref: diff --git a/src/server/oasisapi/analyses/serializers.py b/src/server/oasisapi/analyses/serializers.py index 48e39964a..7b01f6836 100644 --- a/src/server/oasisapi/analyses/serializers.py +++ b/src/server/oasisapi/analyses/serializers.py @@ -5,6 +5,7 @@ from .models import Analysis from ..files.models import file_storage_link + class AnalysisListSerializer(serializers.Serializer): """ Read Only Analyses Deserializer for efficiently returning a list of all Analyses from DB @@ -36,7 +37,6 @@ class AnalysisListSerializer(serializers.Serializer): run_log_file = serializers.SerializerMethodField(read_only=True) storage_links = serializers.SerializerMethodField(read_only=True) - @swagger_serializer_method(serializer_or_field=serializers.URLField) def get_input_file(self, instance): request = self.context.get('request') @@ -98,7 +98,6 @@ def get_storage_links(self, instance): return instance.get_absolute_storage_url(request=request) - class AnalysisSerializer(serializers.ModelSerializer): input_file = serializers.SerializerMethodField() settings_file = serializers.SerializerMethodField() @@ -200,7 +199,6 @@ def get_storage_links(self, instance): request = self.context.get('request') return instance.get_absolute_storage_url(request=request) - def validate(self, attrs): if not attrs.get('creator') and 'request' in self.context: attrs['creator'] = self.context.get('request').user @@ -228,7 +226,7 @@ class AnalysisStorageSerializer(serializers.ModelSerializer): lookup_errors_file = serializers.SerializerMethodField() lookup_success_file = serializers.SerializerMethodField() lookup_validation_file = serializers.SerializerMethodField() - summary_levels_file = serializers.SerializerMethodField() + summary_levels_file = serializers.SerializerMethodField() class Meta: model = Analysis diff --git a/src/server/oasisapi/analyses/tasks.py b/src/server/oasisapi/analyses/tasks.py index 2759f1e5f..d65f619b5 100644 --- a/src/server/oasisapi/analyses/tasks.py +++ b/src/server/oasisapi/analyses/tasks.py @@ -5,7 +5,7 @@ # Remote debugging 'rdb.set_trace()' # https://docs.celeryproject.org/en/stable/userguide/debugging.html -from celery.contrib import rdb +# from celery.contrib import rdb from celery.utils.log import get_task_logger from celery import Task @@ -42,6 +42,7 @@ def is_valid_url(url): else: return False + def is_in_bucket(object_key): if not hasattr(default_storage, 'bucket'): return False @@ -55,6 +56,7 @@ def is_in_bucket(object_key): else: raise e + def is_in_container(object_key): if not hasattr(default_storage, 'azure_container'): return False @@ -271,6 +273,7 @@ def log_worker_monitor(sender, **k): logger.info('AWS_SHARED_BUCKET: {}'.format(settings.AWS_SHARED_BUCKET)) logger.info('AWS_IS_GZIPPED: {}'.format(settings.AWS_IS_GZIPPED)) + @celery_app.task(name='run_register_worker') def run_register_worker(m_supplier, m_name, m_id, m_settings, m_version): logger.info('model_supplier: {}, model_name: {}, model_id: {}'.format(m_supplier, m_name, m_id)) @@ -313,7 +316,7 @@ def run_register_worker(m_supplier, m_name, m_id, m_settings, m_version): # Update model version info if m_version: try: - model.ver_ktools = m_version['ktools'] + model.ver_ktools = m_version['ktools'] model.ver_oasislmf = m_version['oasislmf'] model.ver_platform = m_version['platform'] model.save() @@ -327,6 +330,7 @@ def run_register_worker(m_supplier, m_name, m_id, m_settings, m_version): logger.exception(str(e)) logger.exception(model) + @celery_app.task(name='set_task_status') def set_task_status(analysis_pk, task_status): try: @@ -407,19 +411,26 @@ def record_generate_input_result(result, analysis_pk, initiator_pk): analysis.status = Analysis.status_choices.INPUTS_GENERATION_ERROR # Add current Output - analysis.input_file = store_file(input_location, 'application/gzip', initiator, filename=f'analysis_{analysis_pk}_inputs.tar.gz') if input_location else None - analysis.lookup_success_file = store_file(lookup_success_fp, 'text/csv', initiator, filename=f'analysis_{analysis_pk}_gul_summary_map.csv') if lookup_success_fp else None - analysis.lookup_errors_file = store_file(lookup_error_fp, 'text/csv', initiator, required=False, filename=f'analysis_{analysis_pk}_keys-errors.csv') if lookup_error_fp else None - analysis.lookup_validation_file = store_file(lookup_validation_fp, 'application/json', initiator, required=False, filename=f'analysis_{analysis_pk}_exposure_summary_report.json') if lookup_validation_fp else None - analysis.summary_levels_file = store_file(summary_levels_fp, 'application/json', initiator, required=False, filename=f'analysis_{analysis_pk}_exposure_summary_levels.json') if summary_levels_fp else None + analysis.input_file = store_file(input_location, 'application/gzip', initiator, + filename=f'analysis_{analysis_pk}_inputs.tar.gz') if input_location else None + analysis.lookup_success_file = store_file(lookup_success_fp, 'text/csv', initiator, + filename=f'analysis_{analysis_pk}_gul_summary_map.csv') if lookup_success_fp else None + analysis.lookup_errors_file = store_file(lookup_error_fp, 'text/csv', initiator, required=False, + filename=f'analysis_{analysis_pk}_keys-errors.csv') if lookup_error_fp else None + analysis.lookup_validation_file = store_file(lookup_validation_fp, 'application/json', initiator, required=False, + filename=f'analysis_{analysis_pk}_exposure_summary_report.json') if lookup_validation_fp else None + analysis.summary_levels_file = store_file(summary_levels_fp, 'application/json', initiator, required=False, + filename=f'analysis_{analysis_pk}_exposure_summary_levels.json') if summary_levels_fp else None analysis.task_finished = timezone.now() # always store traceback if traceback_fp: - analysis.input_generation_traceback_file = store_file(traceback_fp, 'text/plain', initiator, filename=f'analysis_{analysis_pk}_generation_traceback.txt') + analysis.input_generation_traceback_file = store_file( + traceback_fp, 'text/plain', initiator, filename=f'analysis_{analysis_pk}_generation_traceback.txt') logger.info(analysis.input_generation_traceback_file) analysis.save() + @celery_app.task(name='record_run_analysis_failure') def record_run_analysis_failure(analysis_pk, initiator_pk, traceback): logger.warning('"run_analysis_success" is deprecated and should only be used to process tasks already on the queue.') @@ -482,6 +493,7 @@ def record_generate_input_failure(analysis_pk, initiator_pk, traceback): ## --- Deprecated tasks ---------------------------------------------------- ## + @celery_app.task(name='run_analysis_success') def run_analysis_success(output_location, analysis_pk, initiator_pk): logger.warning('"run_analysis_success" is deprecated and should only be used to process tasks already on the queue.') @@ -513,7 +525,6 @@ def run_analysis_success(output_location, analysis_pk, initiator_pk): logger.exception(str(e)) - @celery_app.task(name='generate_input_success') def generate_input_success(result, analysis_pk, initiator_pk): logger.warning('"generate_input_success" is deprecated and should only be used to process tasks already on the queue.') diff --git a/src/server/oasisapi/analyses/tests/test_analysis_api.py b/src/server/oasisapi/analyses/tests/test_analysis_api.py index c81eb7cc2..d84a3e1fa 100644 --- a/src/server/oasisapi/analyses/tests/test_analysis_api.py +++ b/src/server/oasisapi/analyses/tests/test_analysis_api.py @@ -31,7 +31,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_user_is_authenticated_object_does_not_exist___response_is_404(self): user = fake_user() @@ -244,7 +244,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.post(analysis.get_absolute_run_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_user_is_authenticated_object_does_not_exist___response_is_404(self): user = fake_user() @@ -280,7 +280,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.post(analysis.get_absolute_cancel_analysis_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_user_is_authenticated_object_does_not_exist___response_is_404(self): user = fake_user() @@ -316,7 +316,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.post(analysis.get_absolute_generate_inputs_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_user_is_authenticated_object_does_not_exist___response_is_404(self): user = fake_user() @@ -352,7 +352,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.post(analysis.get_absolute_cancel_inputs_generation_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_user_is_authenticated_object_does_not_exist___response_is_404(self): user = fake_user() @@ -388,7 +388,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.post(analysis.get_absolute_copy_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_user_is_authenticated_object_does_not_exist___response_is_404(self): user = fake_user() @@ -692,7 +692,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_settings_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_settings_json_is_not_present___get_response_is_404(self): user = fake_user() @@ -735,22 +735,22 @@ def test_settings_json_is_not_valid___response_is_400(self): "number_of_samples": -1, "gul_threshold": 0, "model_settings": { - "use_random_number_file": True, - "event_occurrence_file_id": "1" + "use_random_number_file": True, + "event_occurrence_file_id": "1" }, "gul_output": True, "gul_summaries": [ - { - "id": 1, - "summarycalc": True, - "eltcalc": True, - "aalcalc": "Not-A-Boolean", - "pltcalc": True, - "lec_output":False - } + { + "id": 1, + "summarycalc": True, + "eltcalc": True, + "aalcalc": "Not-A-Boolean", + "pltcalc": True, + "lec_output": False + } ], "il_output": False - } + } } response = self.app.post( @@ -763,14 +763,13 @@ def test_settings_json_is_not_valid___response_is_400(self): expect_errors=True, ) - validation_error = { + validation_error = { 'number_of_samples': ['-1 is less than the minimum of 0'], 'gul_summaries-0-aalcalc': ["'Not-A-Boolean' is not of type 'boolean'"] } self.assertEqual(400, response.status_code) self.assertEqual(json.loads(response.body), validation_error) - def test_settings_json_is_uploaded___can_be_retrieved(self): with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d): @@ -785,24 +784,24 @@ def test_settings_json_is_uploaded___can_be_retrieved(self): "number_of_samples": 10, "gul_threshold": 0, "model_settings": { - "use_random_number_file": True, - "event_occurrence_file_id": "1" + "use_random_number_file": True, + "event_occurrence_file_id": "1" }, "gul_output": True, "gul_summaries": [ - { - "id": 1, - "summarycalc": True, - "eltcalc": True, - "aalcalc": True, - "pltcalc": True, - "lec_output":False - } + { + "id": 1, + "summarycalc": True, + "eltcalc": True, + "aalcalc": True, + "pltcalc": True, + "lec_output": False + } ], "il_output": False, 'model_version_id': '1', 'module_supplier_id': 'OasisIM' - } + } } self.app.post( @@ -829,7 +828,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_settings_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_settings_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -911,7 +910,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_input_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_input_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -950,7 +949,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_lookup_errors_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_lookup_errors_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -1005,7 +1004,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_lookup_success_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_lookup_success_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -1060,7 +1059,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_lookup_validation_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_lookup_validation_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -1115,7 +1114,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_input_generation_traceback_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_input_generation_traceback_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -1168,7 +1167,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_output_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_output_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -1240,7 +1239,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): analysis = fake_analysis() response = self.app.get(analysis.get_absolute_run_traceback_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_run_traceback_file_is_not_present___get_response_is_404(self): user = fake_user() diff --git a/src/server/oasisapi/analyses/tests/test_analysis_model.py b/src/server/oasisapi/analyses/tests/test_analysis_model.py index fecac0879..e8afc700b 100644 --- a/src/server/oasisapi/analyses/tests/test_analysis_model.py +++ b/src/server/oasisapi/analyses/tests/test_analysis_model.py @@ -106,7 +106,8 @@ def test_state_is_running_or_generating_inputs___validation_error_is_raised_revo with self.assertRaises(ValidationError) as ex: analysis.run(initiator) - self.assertEqual({'status': ['Analysis must be in one of the following states [READY, RUN_COMPLETED, RUN_ERROR, RUN_CANCELLED]']}, ex.exception.detail) + self.assertEqual( + {'status': ['Analysis must be in one of the following states [READY, RUN_COMPLETED, RUN_ERROR, RUN_CANCELLED]']}, ex.exception.detail) self.assertEqual(status, analysis.status) self.assertFalse(res_factory.revoke_called) diff --git a/src/server/oasisapi/analyses/tests/test_analysis_tasks.py b/src/server/oasisapi/analyses/tests/test_analysis_tasks.py index 6bbac58e0..472e92a65 100644 --- a/src/server/oasisapi/analyses/tests/test_analysis_tasks.py +++ b/src/server/oasisapi/analyses/tests/test_analysis_tasks.py @@ -60,7 +60,7 @@ def test_output_file_and_status_are_updated(self, output_location, log_location, self.assertEqual(analysis.output_file.content_type, 'application/gzip') self.assertEqual(analysis.output_file.creator, initiator) else: - self.assertEqual(analysis.output_file, None) + self.assertEqual(analysis.output_file, None) self.assertEqual(analysis.run_log_file.file.name, log_location) self.assertEqual(analysis.run_log_file.content_type, 'application/gzip') @@ -116,12 +116,12 @@ def test_input_file_lookup_files_and_status_are_updated(self, input_location, lo return_code = 0 record_generate_input_result(( - os.path.join(d, input_location), - os.path.join(d, lookup_error_fp), - os.path.join(d, lookup_success_fp), - os.path.join(d, lookup_validation_fp), - os.path.join(d, summary_levels_fp), - os.path.join(d, traceback_fp), + os.path.join(d, input_location), + os.path.join(d, lookup_error_fp), + os.path.join(d, lookup_success_fp), + os.path.join(d, lookup_validation_fp), + os.path.join(d, summary_levels_fp), + os.path.join(d, traceback_fp), return_code), analysis.pk, initiator.pk) analysis.refresh_from_db() diff --git a/src/server/oasisapi/analyses/viewsets.py b/src/server/oasisapi/analyses/viewsets.py index db3bb4e1c..d9fdbd599 100644 --- a/src/server/oasisapi/analyses/viewsets.py +++ b/src/server/oasisapi/analyses/viewsets.py @@ -7,7 +7,7 @@ from rest_framework import viewsets from rest_framework import permissions from rest_framework.decorators import action -from rest_framework.parsers import MultiPartParser, FormParser +from rest_framework.parsers import MultiPartParser from rest_framework.response import Response from rest_framework.settings import api_settings from rest_framework.serializers import Serializer @@ -29,25 +29,26 @@ from ..schemas.serializers import AnalysisSettingsSerializer - class LogAcessDenied(APIException): status_code = 403 default_detail = 'Only accounts with staff access are alowed to view system logs.' default_code = 'system logs disabled by admin' + class check_log_permission(permissions.BasePermission): RESTRICTED_ACTIONS = [ 'input_generation_traceback_file', 'run_traceback_file', 'run_log_file' ] + def has_permission(self, request, view): - if not settings.RESTRICT_SYSTEM_LOGS: # are analyses log restricted? + if not settings.RESTRICT_SYSTEM_LOGS: # are analyses log restricted? + return True + if request.user.is_staff: # user is admin? return True - if request.user.is_staff: # user is admin? - return True - # was it a system log message? - if view.action not in self.RESTRICTED_ACTIONS: # request for a log file? + # was it a system log message? + if view.action not in self.RESTRICTED_ACTIONS: # request for a log file? return True else: raise LogAcessDenied @@ -217,7 +218,6 @@ def run(self, request, pk=None, version=None): obj.run(request.user) return Response(AnalysisSerializer(instance=obj, context=self.get_serializer_context()).data) - @swagger_auto_schema(responses={200: AnalysisSerializer}) @action(methods=['post'], detail=True) def cancel(self, request, pk=None, version=None): @@ -229,7 +229,6 @@ def cancel(self, request, pk=None, version=None): obj.cancel_any() return Response(AnalysisSerializer(instance=obj, context=self.get_serializer_context()).data) - @swagger_auto_schema(responses={200: AnalysisSerializer}) @action(methods=['post'], detail=True) def cancel_analysis_run(self, request, pk=None, version=None): @@ -428,8 +427,6 @@ def data_files(self, request, pk=None, version=None): df_serializer = DataFileSerializer(df, many=True, context=context) return Response(df_serializer.data) - - @action(methods=['get'], detail=True) def storage_links(self, request, pk=None, version=None): """ diff --git a/src/server/oasisapi/analysis_models/admin.py b/src/server/oasisapi/analysis_models/admin.py index 3b930fd35..8a492324d 100644 --- a/src/server/oasisapi/analysis_models/admin.py +++ b/src/server/oasisapi/analysis_models/admin.py @@ -5,14 +5,19 @@ """ Cascading delete of Model and anything linked to it via foreign key """ + + def delete_hard(modeladmin, request, queryset): if not modeladmin.has_delete_permission(request): raise PermissionDenied for obj in queryset: obj.hard_delete() + """ Re-enables a soft-deleted model by toggling database flag """ + + def activate_model(modeladmin, request, queryset): if not modeladmin.has_add_permission(request): raise PermissionDenied @@ -37,5 +42,3 @@ def get_queryset(self, request): @admin.register(SettingsTemplate) class SettingsTemplateAdmin(admin.ModelAdmin): list_display = ['file', 'name', 'creator'] - - diff --git a/src/server/oasisapi/analysis_models/models.py b/src/server/oasisapi/analysis_models/models.py index b9b148999..81c1db8d4 100644 --- a/src/server/oasisapi/analysis_models/models.py +++ b/src/server/oasisapi/analysis_models/models.py @@ -84,7 +84,6 @@ def get_absolute_settings_template_url(self, model_pk, request=None): return reverse('models-setting_templates-content', kwargs={'version': 'v1', 'pk': self.pk, 'models_pk': model_pk}, request=request) - class AnalysisModel(TimeStampedModel): supplier_id = models.CharField(max_length=255, help_text=_('The supplier ID for the model.')) model_id = models.CharField(max_length=255, help_text=_('The model ID for the model.')) @@ -98,7 +97,7 @@ class AnalysisModel(TimeStampedModel): ver_platform = models.CharField(max_length=255, null=True, default=None, help_text=_('The worker platform version.')) deleted = models.BooleanField(default=False, editable=False) - ## Logical Delete + # Logical Delete objects = SoftDeleteManager() all_objects = SoftDeleteManager(alive_only=False) @@ -138,17 +137,20 @@ def activate(self, request=None): def get_absolute_resources_file_url(self, request=None): return reverse('analysis-model-resource-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request) + def get_absolute_versions_url(self, request=None): return reverse('analysis-model-versions', kwargs={'version': 'v1', 'pk': self.pk}, request=request) + def get_absolute_settings_url(self, request=None): return reverse('model-settings', kwargs={'version': 'v1', 'pk': self.pk}, request=request) + @receiver(post_delete, sender=AnalysisModel) def delete_connected_files(sender, instance, **kwargs): """ Post delete handler to clear out any dangaling analyses files """ files_for_removal = [ - 'resource_file', + 'resource_file', ] for ref in files_for_removal: try: diff --git a/src/server/oasisapi/analysis_models/serializers.py b/src/server/oasisapi/analysis_models/serializers.py index 969295801..5fe285fc3 100644 --- a/src/server/oasisapi/analysis_models/serializers.py +++ b/src/server/oasisapi/analysis_models/serializers.py @@ -1,5 +1,4 @@ from drf_yasg.utils import swagger_serializer_method -from django.core.files import File from django.core.exceptions import ObjectDoesNotExist from rest_framework import serializers from rest_framework.exceptions import ValidationError @@ -54,7 +53,7 @@ class TemplateSerializer(serializers.ModelSerializer): """ Catch-all Analysis settings Template Serializer, intended to be called from a nested ViewSet """ - file_url = serializers.SerializerMethodField() + file_url = serializers.SerializerMethodField() class Meta: model = SettingsTemplate @@ -93,13 +92,13 @@ class Meta: def validate(self, attrs): analysis_id = attrs.pop('analysis_id', None) - if analysis_id: + if analysis_id: try: analysis = Analysis.objects.get(id=analysis_id) except ObjectDoesNotExist: - raise ValidationError({"Detail": f"analysis_id = {analysis_id} not found"}) + raise ValidationError({"Detail": f"analysis_id = {analysis_id} not found"}) if not analysis.settings_file: - raise ValidationError({"Detail": f"analysis_id = {analysis_id} has no attached settings file"}) + raise ValidationError({"Detail": f"analysis_id = {analysis_id} has no attached settings file"}) new_settings = analysis.copy_file(analysis.settings_file) new_settings.name = attrs.get('name') diff --git a/src/server/oasisapi/analysis_models/tests/test_analysis_model.py b/src/server/oasisapi/analysis_models/tests/test_analysis_model.py index a06c149bd..db1088e61 100644 --- a/src/server/oasisapi/analysis_models/tests/test_analysis_model.py +++ b/src/server/oasisapi/analysis_models/tests/test_analysis_model.py @@ -96,17 +96,16 @@ def test_data_is_valid___object_is_created(self, supplier_id, model_id, version_ self.assertEqual(model.model_id, model_id) - class ModelSettingsJson(WebTestMixin, TestCase): def test_user_is_not_authenticated___response_is_forbidden(self): models = fake_analysis_model() response = self.app.get(models.get_absolute_settings_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) - + self.assertIn(response.status_code, [401, 403]) """ Add these check back in once models auto-update their settings fields """ + def test_settings_json_is_not_present___get_response_is_404(self): user = fake_user() models = fake_analysis_model() @@ -141,38 +140,38 @@ def test_settings_json_is_not_valid___response_is_400(self): user = fake_user() models = fake_analysis_model() json_data = { - "model_settings":{ - "event_set":{ + "model_settings": { + "event_set": { "name": "Event Set", "default": "P", - "options":[ - {"id":"P", "desc": "Proabilistic"}, - {"id":"H", "desc": "Historic"} + "options": [ + {"id": "P", "desc": "Proabilistic"}, + {"id": "H", "desc": "Historic"} ] - }, - "event_occurrence_id":{ + }, + "event_occurrence_id": { "name": "Occurrence Set", "desc": "PiWind Occurrence selection", "default": 1, - "options":[ - {"id":"1", "desc": "Long Term"} + "options": [ + {"id": "1", "desc": "Long Term"} ] }, "boolean_parameters": [ - {"name": "peril_wind", "desc":"Boolean option", "default": 1.1}, - {"name": "peril_surge", "desc":"Boolean option", "default": True} + {"name": "peril_wind", "desc": "Boolean option", "default": 1.1}, + {"name": "peril_surge", "desc": "Boolean option", "default": True} ], "float_parameter": [ - {"name": "float_1", "desc":"Some float value", "default": False, "max":1.0, "min":0.0}, - {"name": "float_2", "desc":"Some float value", "default": 0.3, "max":1.0, "min":0.0} + {"name": "float_1", "desc": "Some float value", "default": False, "max": 1.0, "min": 0.0}, + {"name": "float_2", "desc": "Some float value", "default": 0.3, "max": 1.0, "min": 0.0} ] - }, - "lookup_settings":{ - "supported_perils":[ - {"i": "WSS", "desc": "Single Peril: Storm Surge"}, - {"id": "WTC", "des": "Single Peril: Tropical Cyclone"}, - {"id": "WW11", "desc": "Group Peril: Windstorm with storm surge"}, - {"id": "WW2", "desc": "Group Peril: Windstorm w/o storm surge"} + }, + "lookup_settings": { + "supported_perils": [ + {"i": "WSS", "desc": "Single Peril: Storm Surge"}, + {"id": "WTC", "des": "Single Peril: Tropical Cyclone"}, + {"id": "WW11", "desc": "Group Peril: Windstorm with storm surge"}, + {"id": "WW2", "desc": "Group Peril: Windstorm w/o storm surge"} ] } } @@ -187,8 +186,8 @@ def test_settings_json_is_not_valid___response_is_400(self): expect_errors=True, ) - validation_error = { - 'model_settings': ["Additional properties are not allowed ('float_parameter' was unexpected)"], + validation_error = { + 'model_settings': ["Additional properties are not allowed ('float_parameter' was unexpected)"], 'model_settings-event_set': ["'desc' is a required property"], 'model_settings-event_occurrence_id-default': ["1 is not of type 'string'"], 'model_settings-boolean_parameters-0-default': ["1.1 is not of type 'boolean'"], @@ -201,46 +200,45 @@ def test_settings_json_is_not_valid___response_is_400(self): self.assertDictEqual.__self__.maxDiff = None self.assertDictEqual(json.loads(response.body), validation_error) - def test_settings_json_is_uploaded___can_be_retrieved(self): with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d): user = fake_user() models = fake_analysis_model() json_data = { - "model_settings":{ - "event_set":{ + "model_settings": { + "event_set": { "name": "Event Set", "desc": "Either Probablistic or Historic", "default": "P", - "options":[ - {"id":"P", "desc": "Proabilistic"}, - {"id":"H", "desc": "Historic"} + "options": [ + {"id": "P", "desc": "Proabilistic"}, + {"id": "H", "desc": "Historic"} ] - }, - "event_occurrence_id":{ + }, + "event_occurrence_id": { "name": "Occurrence Set", "desc": "PiWind Occurrence selection", "default": "1", - "options":[ - {"id":"1", "desc": "Long Term"} + "options": [ + {"id": "1", "desc": "Long Term"} ] }, "boolean_parameters": [ - {"name": "peril_wind", "desc":"Boolean option", "default": False}, - {"name": "peril_surge", "desc":"Boolean option", "default": True} + {"name": "peril_wind", "desc": "Boolean option", "default": False}, + {"name": "peril_surge", "desc": "Boolean option", "default": True} ], "float_parameters": [ - {"name": "float_1", "desc":"Some float value", "default": 0.5, "max":1.0, "min":0.0}, - {"name": "float_2", "desc":"Some float value", "default": 0.3, "max":1.0, "min":0.0} + {"name": "float_1", "desc": "Some float value", "default": 0.5, "max": 1.0, "min": 0.0}, + {"name": "float_2", "desc": "Some float value", "default": 0.3, "max": 1.0, "min": 0.0} ] - }, - "lookup_settings":{ - "supported_perils":[ - {"id": "WSS", "desc": "Single Peril: Storm Surge"}, - {"id": "WTC", "desc": "Single Peril: Tropical Cyclone"}, - {"id": "WW1", "desc": "Group Peril: Windstorm with storm surge"}, - {"id": "WW2", "desc": "Group Peril: Windstorm w/o storm surge"} + }, + "lookup_settings": { + "supported_perils": [ + {"id": "WSS", "desc": "Single Peril: Storm Surge"}, + {"id": "WTC", "desc": "Single Peril: Tropical Cyclone"}, + {"id": "WW1", "desc": "Group Peril: Windstorm with storm surge"}, + {"id": "WW2", "desc": "Group Peril: Windstorm w/o storm surge"} ] } } diff --git a/src/server/oasisapi/analysis_models/viewsets.py b/src/server/oasisapi/analysis_models/viewsets.py index 65a8dbcf8..01373c2d9 100644 --- a/src/server/oasisapi/analysis_models/viewsets.py +++ b/src/server/oasisapi/analysis_models/viewsets.py @@ -1,13 +1,9 @@ from __future__ import absolute_import -import io -import json -import os -from django.conf import settings from django.utils.translation import gettext_lazy as _ from django.utils.decorators import method_decorator from django_filters import rest_framework as filters -from django.http import JsonResponse, Http404 +from django.http import Http404 from drf_yasg.utils import swagger_auto_schema from rest_framework import viewsets from rest_framework.decorators import action @@ -113,7 +109,7 @@ def get_serializer_class(self): else: return super(SettingsTemplateViewSet, self).get_serializer_class() - def list(self, request, models_pk=None, **kwargs): + def list(self, request, models_pk=None, **kwargs): context = {'request': request} template_list = self.get_queryset() serializer = TemplateSerializer(template_list, many=True, context=context) @@ -131,11 +127,10 @@ def create(self, request, models_pk=None, **kwargs): model.template_files.add(new_template) return Response(TemplateSerializer(new_template, context=context).data) - @swagger_auto_schema(methods=['get'], responses={200: AnalysisSettingsSerializer}) @swagger_auto_schema(methods=['post'], request_body=AnalysisSettingsSerializer, responses={201: RelatedFileSerializer}) @action(methods=['get', 'post', 'delete'], detail=True) - def content(self, request, pk=None, models_pk=None, version=None): + def content(self, request, pk=None, models_pk=None, version=None): """ get: Gets the analyses template `settings` contents @@ -149,7 +144,6 @@ def content(self, request, pk=None, models_pk=None, version=None): return handle_json_data(self.get_object(), 'file', request, AnalysisSettingsSerializer) - class AnalysisModelViewSet(viewsets.ModelViewSet): """ list: @@ -189,7 +183,7 @@ class AnalysisModelViewSet(viewsets.ModelViewSet): queryset = AnalysisModel.objects.all() serializer_class = AnalysisModelSerializer filterset_class = AnalysisModelFilter - #lookup_field = 'id' + # lookup_field = 'id' def get_serializer_class(self): if self.action in ['resource_file', 'set_resource_file']: diff --git a/src/server/oasisapi/asgi.py b/src/server/oasisapi/asgi.py index a3bfea58b..379b0f00e 100644 --- a/src/server/oasisapi/asgi.py +++ b/src/server/oasisapi/asgi.py @@ -5,7 +5,7 @@ import os import django -#from channels.routing import get_default_application +# from channels.routing import get_default_application from django.core.asgi import get_asgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "src.server.oasisapi.settings") diff --git a/src/server/oasisapi/auth/serializers.py b/src/server/oasisapi/auth/serializers.py index 31f286553..8e0b6439e 100644 --- a/src/server/oasisapi/auth/serializers.py +++ b/src/server/oasisapi/auth/serializers.py @@ -1,10 +1,8 @@ from django.utils.translation import gettext_lazy as _ -from rest_framework import serializers from rest_framework.exceptions import ValidationError from rest_framework_simplejwt import settings as jwt_settings from rest_framework_simplejwt.serializers import TokenObtainPairSerializer as BaseTokenObtainPairSerializer from rest_framework_simplejwt.serializers import TokenRefreshSerializer as BaseTokenRefreshSerializer -from rest_framework_simplejwt.tokens import RefreshToken class TokenObtainPairSerializer(BaseTokenObtainPairSerializer): @@ -21,8 +19,7 @@ def validate(self, attrs): data['access_token'] = data['access'] data['token_type'] = 'Bearer' data['expires_in'] = jwt_settings.api_settings.ACCESS_TOKEN_LIFETIME.total_seconds() - #data['expires_in'] = jwt_settings.api_settings.REFRESH_TOKEN_LIFETIME.total_seconds() - + # data['expires_in'] = jwt_settings.api_settings.REFRESH_TOKEN_LIFETIME.total_seconds() del data['refresh'] del data['access'] diff --git a/src/server/oasisapi/data_files/models.py b/src/server/oasisapi/data_files/models.py index 079cc9930..fed2ece72 100644 --- a/src/server/oasisapi/data_files/models.py +++ b/src/server/oasisapi/data_files/models.py @@ -12,7 +12,7 @@ class DataFile(TimeStampedModel): max_length=255, help_text=_('Type of data contained within the file.') ) - file_category= models.CharField( + file_category = models.CharField( max_length=255, blank=True, null=True, diff --git a/src/server/oasisapi/data_files/tests/test_data_files.py b/src/server/oasisapi/data_files/tests/test_data_files.py index 06b98e739..9162a2fc3 100644 --- a/src/server/oasisapi/data_files/tests/test_data_files.py +++ b/src/server/oasisapi/data_files/tests/test_data_files.py @@ -51,7 +51,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): cmf = fake_data_file() response = self.app.get(cmf.get_absolute_data_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_data_file_is_not_present___get_response_is_404(self): user = fake_user() diff --git a/src/server/oasisapi/data_files/viewsets.py b/src/server/oasisapi/data_files/viewsets.py index 82a90cd82..7386f881a 100644 --- a/src/server/oasisapi/data_files/viewsets.py +++ b/src/server/oasisapi/data_files/viewsets.py @@ -82,7 +82,7 @@ class DataFileViewset(viewsets.ModelViewSet): def get_serializer_class(self): if self.action in ['content', 'set_content']: return RelatedFileSerializer - elif self.action in ['list']: + elif self.action in ['list']: return DataFileListSerializer else: return super(DataFileViewset, self).get_serializer_class() diff --git a/src/server/oasisapi/files/models.py b/src/server/oasisapi/files/models.py index 9a2f69acc..4f536cfec 100644 --- a/src/server/oasisapi/files/models.py +++ b/src/server/oasisapi/files/models.py @@ -10,7 +10,6 @@ from model_utils.models import TimeStampedModel - def related_file_to_df(RelatedFile): if not RelatedFile: return None @@ -35,38 +34,38 @@ def random_file_name(instance, filename): def file_storage_link(storage_obj, fullpath=False): - """ - Return link to file storage based on 'STORAGE_TYPE' value in settings.py - - storage_obj should point to a `RelatedFile` Obj - - STORAGE_TYPE; - 'Default': local filesystem -> return filename - 'AWS-S3': Remote Object Store -> Return URL with expire time - - fullpath: return the S3 storage path with aws_location - """ - # GUARD check for file, return None it missing - if not hasattr(storage_obj, 'file'): - return None - if not storage_obj.file: - return None - - # Remote storage links (Azure or AWS-S3) - if settings.STORAGE_TYPE in ['aws-s3', 's3', 'aws', 'azure']: - if settings.AWS_SHARED_BUCKET or settings.AZURE_SHARED_CONTAINER or fullpath: - # Return object key for shared S3 bucket - return os.path.join( - storage_obj.file.storage.location, - storage_obj.file.name, - ) - else: - # Return Download URL to S3 Object - return storage_obj.file.storage.url(storage_obj.file.name) - - # Shared FS filename - else: - return storage_obj.file.name + """ + Return link to file storage based on 'STORAGE_TYPE' value in settings.py + + storage_obj should point to a `RelatedFile` Obj + + STORAGE_TYPE; + 'Default': local filesystem -> return filename + 'AWS-S3': Remote Object Store -> Return URL with expire time + + fullpath: return the S3 storage path with aws_location + """ + # GUARD check for file, return None it missing + if not hasattr(storage_obj, 'file'): + return None + if not storage_obj.file: + return None + + # Remote storage links (Azure or AWS-S3) + if settings.STORAGE_TYPE in ['aws-s3', 's3', 'aws', 'azure']: + if settings.AWS_SHARED_BUCKET or settings.AZURE_SHARED_CONTAINER or fullpath: + # Return object key for shared S3 bucket + return os.path.join( + storage_obj.file.storage.location, + storage_obj.file.name, + ) + else: + # Return Download URL to S3 Object + return storage_obj.file.storage.url(storage_obj.file.name) + + # Shared FS filename + else: + return storage_obj.file.name class RelatedFile(TimeStampedModel): diff --git a/src/server/oasisapi/files/serializers.py b/src/server/oasisapi/files/serializers.py index 045fdba8a..03f04db66 100644 --- a/src/server/oasisapi/files/serializers.py +++ b/src/server/oasisapi/files/serializers.py @@ -1,7 +1,6 @@ import logging import hashlib import io -import pandas as pd from ods_tools.oed.exposure import OedExposure from rest_framework import serializers @@ -95,7 +94,6 @@ def validate(self, attrs): attrs['oed_validated'] = self.oed_validate return super(RelatedFileSerializer, self).validate(attrs) - def validate_file(self, value): mapped_content_type = CONTENT_TYPE_MAPPING.get(value.content_type, value.content_type) if self.content_types and mapped_content_type not in self.content_types: diff --git a/src/server/oasisapi/files/upload.py b/src/server/oasisapi/files/upload.py index 4bccb0da9..40e1e1604 100644 --- a/src/server/oasisapi/files/upload.py +++ b/src/server/oasisapi/files/upload.py @@ -22,4 +22,4 @@ def wait_for_blob_copy(blob): raise TimeoutError('Timed out waiting for async copy to complete.') time.sleep(5) props = blob.get_blob_properties() - return props + return props diff --git a/src/server/oasisapi/files/views.py b/src/server/oasisapi/files/views.py index dc2c078cd..95b8dc8bc 100644 --- a/src/server/oasisapi/files/views.py +++ b/src/server/oasisapi/files/views.py @@ -5,7 +5,6 @@ from ods_tools.oed.exposure import OedExposure from django.core.files.uploadedfile import UploadedFile from django.http import StreamingHttpResponse, Http404, QueryDict -from django.conf import settings as django_settings from rest_framework.response import Response from .serializers import RelatedFileSerializer @@ -29,6 +28,7 @@ def _delete_related_file(parent, field): parent.save(update_fields=[field]) current.delete() + def _get_chunked_content(f, chunk_size=1024): content = f.read(chunk_size) while content: @@ -66,7 +66,7 @@ def _handle_get_related_file(parent, field, file_format): if file_format == 'csv' and f.content_type == 'application/octet-stream': output_buffer = io.BytesIO() - exposure = OedExposure(**{ + exposure = OedExposure(**{ EXPOSURE_ARGS[field]: pd.read_parquet(io.BytesIO(f.file.read())), 'check_oed': False, }) @@ -86,7 +86,8 @@ def _handle_get_related_file(parent, field, file_format): def _handle_post_related_file(parent, field, request, content_types, parquet_storage, oed_validate): - serializer = RelatedFileSerializer(data=request.data, content_types=content_types, context={'request': request}, parquet_storage=parquet_storage, field=field, oed_validate=oed_validate) + serializer = RelatedFileSerializer(data=request.data, content_types=content_types, context={ + 'request': request}, parquet_storage=parquet_storage, field=field, oed_validate=oed_validate) serializer.is_valid(raise_exception=True) instance = serializer.create(serializer.validated_data) @@ -150,6 +151,7 @@ def _json_write_to_file(parent, field, request, serializer): response.data['file'] = instance.file.name return response + def _json_read_from_file(parent, field): f = getattr(parent, field) if not f: @@ -157,6 +159,7 @@ def _json_read_from_file(parent, field): else: return Response(json.load(f)) + def handle_related_file(parent, field, request, content_types, parquet_storage=False, oed_validate=None): method = request.method.lower() diff --git a/src/server/oasisapi/info/tests/test_info.py b/src/server/oasisapi/info/tests/test_info.py index c8461d664..1b536918b 100644 --- a/src/server/oasisapi/info/tests/test_info.py +++ b/src/server/oasisapi/info/tests/test_info.py @@ -39,7 +39,7 @@ def test_user_is_not_authenticated___response_is_Forbidden(self): def test_user_is_authenticated___response_is_ok(self): user = fake_user() response = self.app.get( - reverse('serverinfo'), + reverse('serverinfo'), user=user, headers={ 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) diff --git a/src/server/oasisapi/info/views.py b/src/server/oasisapi/info/views.py index 03b961d1a..77d9a9e62 100644 --- a/src/server/oasisapi/info/views.py +++ b/src/server/oasisapi/info/views.py @@ -49,11 +49,11 @@ def get(self, request): server_config['LANGUAGE_CODE'] = settings.LANGUAGE_CODE server_config['TIME_ZONE'] = settings.TIME_ZONE - # Backends + # Backends server_config['DEFAULT_FILE_STORAGE'] = settings.DEFAULT_FILE_STORAGE server_config['DB_ENGINE'] = settings.DB_ENGINE - # Storage + # Storage server_config['STORAGE_TYPE'] = settings.STORAGE_TYPE server_config['MEDIA_ROOT'] = settings.MEDIA_ROOT server_config['AWS_STORAGE_BUCKET_NAME'] = settings.AWS_STORAGE_BUCKET_NAME @@ -71,5 +71,3 @@ def get(self, request): 'version': server_version, 'config': server_config }) - - diff --git a/src/server/oasisapi/portfolios/models.py b/src/server/oasisapi/portfolios/models.py index 78947f38c..df701ac92 100644 --- a/src/server/oasisapi/portfolios/models.py +++ b/src/server/oasisapi/portfolios/models.py @@ -19,10 +19,14 @@ class Portfolio(TimeStampedModel): name = models.CharField(max_length=255, help_text=_('The name of the portfolio')) creator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='portfolios') - accounts_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='accounts_file_portfolios') - location_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='location_file_portfolios') - reinsurance_info_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='reinsurance_info_file_portfolios') - reinsurance_scope_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='reinsurance_scope_file_portfolios') + accounts_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='accounts_file_portfolios') + location_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='location_file_portfolios') + reinsurance_info_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='reinsurance_info_file_portfolios') + reinsurance_scope_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, + default=None, related_name='reinsurance_scope_file_portfolios') def __str__(self): return self.name @@ -50,10 +54,10 @@ def get_absolute_storage_url(self, request=None): def set_portolio_valid(self): oed_files = [ - 'accounts_file', - 'location_file', - 'reinsurance_info_file', - 'reinsurance_scope_file', + 'accounts_file', + 'location_file', + 'reinsurance_info_file', + 'reinsurance_scope_file', ] for ref in oed_files: file_ref = getattr(self, ref) @@ -82,15 +86,16 @@ class PortfolioStatus(TimeStampedModel): def __str__(self): pass + @receiver(post_delete, sender=Portfolio) def delete_connected_files(sender, instance, **kwargs): """ Post delete handler to clear out any dangaling analyses files """ files_for_removal = [ - 'accounts_file', - 'location_file', - 'reinsurance_info_file', - 'reinsurance_scope_file', + 'accounts_file', + 'location_file', + 'reinsurance_info_file', + 'reinsurance_scope_file', ] for ref in files_for_removal: file_ref = getattr(instance, ref) diff --git a/src/server/oasisapi/portfolios/serializers.py b/src/server/oasisapi/portfolios/serializers.py index 4392ad271..9e2ba5718 100644 --- a/src/server/oasisapi/portfolios/serializers.py +++ b/src/server/oasisapi/portfolios/serializers.py @@ -224,7 +224,6 @@ def is_in_storage(self, value): else: return default_storage.exists(value) - def validate(self, attrs): file_keys = [k for k in self.fields.keys()] @@ -258,10 +257,10 @@ def validate(self, attrs): return super(PortfolioStorageSerializer, self).validate(attrs) def get_content_type(self, stored_filename): - try: # fetch content_type stored in Django's DB + try: # fetch content_type stored in Django's DB return RelatedFile.objects.get(file=path.basename(stored_filename)).content_type except ObjectDoesNotExist: - try: # Find content_type from S3 Object header + try: # Find content_type from S3 Object header object_header = default_storage.connection.meta.client.head_object( Bucket=default_storage.bucket_name, Key=stored_filename) @@ -372,14 +371,12 @@ def create(self, validated_data): return super(CreateAnalysisSerializer, self).create(data) - class PortfolioValidationSerializer(serializers.ModelSerializer): accounts_validated = serializers.SerializerMethodField() location_validated = serializers.SerializerMethodField() reinsurance_info_validated = serializers.SerializerMethodField() reinsurance_scope_validated = serializers.SerializerMethodField() - class Meta: model = Portfolio fields = ( @@ -389,7 +386,7 @@ class Meta: 'reinsurance_scope_validated', ) - @swagger_serializer_method(serializer_or_field=serializers.CharField) # should it be BooleanField ? + @swagger_serializer_method(serializer_or_field=serializers.CharField) # should it be BooleanField ? def get_location_validated(self, instance): if instance.location_file: return instance.location_file.oed_validated diff --git a/src/server/oasisapi/portfolios/tests/test_portfolio.py b/src/server/oasisapi/portfolios/tests/test_portfolio.py index e7a3de411..b60b9ad39 100644 --- a/src/server/oasisapi/portfolios/tests/test_portfolio.py +++ b/src/server/oasisapi/portfolios/tests/test_portfolio.py @@ -10,7 +10,7 @@ from django_webtest import WebTestMixin from hypothesis import given, settings from hypothesis.extra.django import TestCase -from hypothesis.strategies import text, binary, sampled_from, fixed_dictionaries, integers +from hypothesis.strategies import text, binary, sampled_from from mock import patch from rest_framework_simplejwt.tokens import AccessToken @@ -26,13 +26,12 @@ settings.load_profile("ci") - class PortfolioApi(WebTestMixin, TestCase): def test_user_is_not_authenticated___response_is_forbidden(self): portfolio = fake_portfolio() response = self.app.get(portfolio.get_absolute_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_user_is_authenticated_object_does_not_exist___response_is_404(self): user = fake_user() @@ -145,7 +144,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): portfolio = fake_portfolio() response = self.app.get(portfolio.get_absolute_create_analysis_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_user_is_authenticated_object_does_not_exist___response_is_404(self): user = fake_user() @@ -295,12 +294,17 @@ def test_cleaned_name_and_model_are_present___object_is_created_inputs_are_gener self.assertEqual(response.json['model'], model.pk) self.assertEqual(response.json['settings_file'], response.request.application_url + analysis.get_absolute_settings_file_url()) self.assertEqual(response.json['input_file'], response.request.application_url + analysis.get_absolute_input_file_url()) - self.assertEqual(response.json['lookup_errors_file'], response.request.application_url + analysis.get_absolute_lookup_errors_file_url()) - self.assertEqual(response.json['lookup_success_file'], response.request.application_url + analysis.get_absolute_lookup_success_file_url()) - self.assertEqual(response.json['lookup_validation_file'], response.request.application_url + analysis.get_absolute_lookup_validation_file_url()) - self.assertEqual(response.json['input_generation_traceback_file'], response.request.application_url + analysis.get_absolute_input_generation_traceback_file_url()) + self.assertEqual(response.json['lookup_errors_file'], response.request.application_url + + analysis.get_absolute_lookup_errors_file_url()) + self.assertEqual(response.json['lookup_success_file'], response.request.application_url + + analysis.get_absolute_lookup_success_file_url()) + self.assertEqual(response.json['lookup_validation_file'], response.request.application_url + + analysis.get_absolute_lookup_validation_file_url()) + self.assertEqual(response.json['input_generation_traceback_file'], response.request.application_url + + analysis.get_absolute_input_generation_traceback_file_url()) self.assertEqual(response.json['output_file'], response.request.application_url + analysis.get_absolute_output_file_url()) - self.assertEqual(response.json['run_traceback_file'], response.request.application_url + analysis.get_absolute_run_traceback_file_url()) + self.assertEqual(response.json['run_traceback_file'], response.request.application_url + + analysis.get_absolute_run_traceback_file_url()) generate_mock.assert_called_once_with(analysis, user) @@ -309,7 +313,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): portfolio = fake_portfolio() response = self.app.get(portfolio.get_absolute_accounts_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_accounts_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -385,10 +389,9 @@ def test_accounts_file_is_uploaded___file_can_be_retrieved(self, file_content, c self.assertEqual(response.body, file_content) self.assertEqual(response.content_type, content_type) - def test_accounts_file_invalid_uploaded___parquet_exception_raised(self): - content_type='text/csv' - file_content=b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' + content_type = 'text/csv' + file_content = b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): @@ -408,9 +411,9 @@ def test_accounts_file_invalid_uploaded___parquet_exception_raised(self): self.assertEqual(400, response.status_code) def test_accounts_file_is_uploaded_as_parquet___file_can_be_retrieved(self): - content_type='text/csv' - test_data = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": [4, 5 ,6]}) - file_content= test_data.to_csv(index=False).encode('utf-8') + content_type = 'text/csv' + test_data = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": [4, 5, 6]}) + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): @@ -447,12 +450,13 @@ def test_accounts_file_is_uploaded_as_parquet___file_can_be_retrieved(self): prq_return_data = pd.read_parquet(io.BytesIO(parquet_response.content)) pd.testing.assert_frame_equal(prq_return_data, test_data) + class PortfolioLocationFile(WebTestMixin, TestCase): def test_user_is_not_authenticated___response_is_forbidden(self): portfolio = fake_portfolio() response = self.app.get(portfolio.get_absolute_location_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_location_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -529,8 +533,8 @@ def test_location_file_is_uploaded___file_can_be_retrieved(self, file_content, c self.assertEqual(response.content_type, content_type) def test_location_file_invalid_uploaded___parquet_exception_raised(self): - content_type='text/csv' - file_content=b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' + content_type = 'text/csv' + file_content = b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): @@ -550,9 +554,9 @@ def test_location_file_invalid_uploaded___parquet_exception_raised(self): self.assertEqual(400, response.status_code) def test_location_file_is_uploaded_as_parquet___file_can_be_retrieved(self): - content_type='text/csv' - test_data = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": [4, 5 ,6]}) - file_content= test_data.to_csv(index=False).encode('utf-8') + content_type = 'text/csv' + test_data = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": [4, 5, 6]}) + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): @@ -588,12 +592,13 @@ def test_location_file_is_uploaded_as_parquet___file_can_be_retrieved(self): prq_return_data = pd.read_parquet(io.BytesIO(parquet_response.content)) pd.testing.assert_frame_equal(prq_return_data, test_data) + class PortfolioReinsuranceSourceFile(WebTestMixin, TestCase): def test_user_is_not_authenticated___response_is_forbidden(self): portfolio = fake_portfolio() response = self.app.get(portfolio.get_absolute_reinsurance_scope_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_reinsurance_scope_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -670,8 +675,8 @@ def test_reinsurance_scope_file_is_uploaded___file_can_be_retrieved(self, file_c self.assertEqual(response.content_type, content_type) def test_reinsurance_scope_file_invalid_uploaded___parquet_exception_raised(self): - content_type='text/csv' - file_content=b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' + content_type = 'text/csv' + file_content = b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): @@ -691,9 +696,9 @@ def test_reinsurance_scope_file_invalid_uploaded___parquet_exception_raised(self self.assertEqual(400, response.status_code) def test_reinsurance_scope_file_is_uploaded_as_parquet___file_can_be_retrieved(self): - content_type='text/csv' - test_data = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": [4, 5 ,6]}) - file_content= test_data.to_csv(index=False).encode('utf-8') + content_type = 'text/csv' + test_data = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": [4, 5, 6]}) + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): @@ -736,7 +741,7 @@ def test_user_is_not_authenticated___response_is_forbidden(self): portfolio = fake_portfolio() response = self.app.get(portfolio.get_absolute_reinsurance_info_file_url(), expect_errors=True) - self.assertIn(response.status_code, [401,403]) + self.assertIn(response.status_code, [401, 403]) def test_reinsurance_info_file_is_not_present___get_response_is_404(self): user = fake_user() @@ -813,8 +818,8 @@ def test_reinsurance_info_file_is_uploaded___file_can_be_retrieved(self, file_co self.assertEqual(response.content_type, content_type) def test_reinsurance_info_file_invalid_uploaded___parquet_exception_raised(self): - content_type='text/csv' - file_content=b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' + content_type = 'text/csv' + file_content = b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): @@ -834,9 +839,9 @@ def test_reinsurance_info_file_invalid_uploaded___parquet_exception_raised(self) self.assertEqual(400, response.status_code) def test_reinsurance_info_file_is_uploaded_as_parquet___file_can_be_retrieved(self): - content_type='text/csv' - test_data = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": [4, 5 ,6]}) - file_content= test_data.to_csv(index=False).encode('utf-8') + content_type = 'text/csv' + test_data = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": [4, 5, 6]}) + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): @@ -874,9 +879,6 @@ def test_reinsurance_info_file_is_uploaded_as_parquet___file_can_be_retrieved(se pd.testing.assert_frame_equal(prq_return_data, test_data) - - - LOCATION_DATA_VALID = """PortNumber,AccNumber,LocNumber,IsTenant,BuildingID,CountryCode,Latitude,Longitude,StreetAddress,PostalCode,OccupancyCode,ConstructionCode,LocPerilsCovered,BuildingTIV,OtherTIV,ContentsTIV,BITIV,LocCurrency,OEDVersion 1,A11111,10002082046,1,1,GB,52.76698052,-0.895469856,1 ABINGDON ROAD,LE13 0HL,1050,5000,WW1,220000,0,0,0,GBP,2.0.0 1,A11111,10002082047,1,1,GB,52.76697956,-0.89536613,2 ABINGDON ROAD,LE13 0HL,1050,5000,WW1,790000,0,0,0,GBP,2.0.0 @@ -905,19 +907,20 @@ def test_reinsurance_info_file_is_uploaded_as_parquet___file_can_be_retrieved(se 1,A11111,10002082049,1,1,GB,52.76696096,-0.895473908,4 ABINGDON ROAD,LE13 0HL,1050,-1,WW1,30000,0,0,0,GBP,2.0.0 """ + class PortfolioValidation(WebTestMixin, TestCase): def test_all_exposure__are_valid(self): - content_type='text/csv' + content_type = 'text/csv' loc_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) acc_data = pd.read_csv(io.StringIO(ACCOUNT_DATA_VALID)) inf_data = pd.read_csv(io.StringIO(INFO_DATA_VALID)) scp_data = pd.read_csv(io.StringIO(SCOPE_DATA_VALID)) - loc_file_content= loc_data.to_csv(index=False).encode('utf-8') - acc_file_content= acc_data.to_csv(index=False).encode('utf-8') - inf_file_content= inf_data.to_csv(index=False).encode('utf-8') - scp_file_content= scp_data.to_csv(index=False).encode('utf-8') + loc_file_content = loc_data.to_csv(index=False).encode('utf-8') + acc_file_content = acc_data.to_csv(index=False).encode('utf-8') + inf_file_content = inf_data.to_csv(index=False).encode('utf-8') + scp_file_content = scp_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d): @@ -976,7 +979,6 @@ def test_all_exposure__are_valid(self): 'reinsurance_info_validated': False, 'reinsurance_scope_validated': False}) - # Run validate - check is valid validate_response = self.app.post( portfolio.get_absolute_url() + 'validate/', @@ -991,11 +993,10 @@ def test_all_exposure__are_valid(self): 'reinsurance_info_validated': True, 'reinsurance_scope_validated': True}) - def test_location_file__is_valid(self): - content_type='text/csv' + content_type = 'text/csv' test_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) - file_content= test_data.to_csv(index=False).encode('utf-8') + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d): @@ -1027,7 +1028,6 @@ def test_location_file__is_valid(self): 'reinsurance_info_validated': None, 'reinsurance_scope_validated': None}) - # Run validate - check is valid validate_response = self.app.post( portfolio.get_absolute_url() + 'validate/', @@ -1042,11 +1042,10 @@ def test_location_file__is_valid(self): 'reinsurance_info_validated': None, 'reinsurance_scope_validated': None}) - def test_location_file__is_invalid__response_is_400(self): - content_type='text/csv' + content_type = 'text/csv' test_data = pd.read_csv(io.StringIO(LOCATION_DATA_INVALID)) - file_content= test_data.to_csv(index=False).encode('utf-8') + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d): @@ -1096,9 +1095,9 @@ def test_location_file__is_invalid__response_is_400(self): ]) def test_account_file__is_invalid__response_is_400(self): - content_type='text/csv' + content_type = 'text/csv' test_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) - file_content= test_data.to_csv(index=False).encode('utf-8') + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d): @@ -1130,7 +1129,6 @@ def test_account_file__is_invalid__response_is_400(self): 'reinsurance_info_validated': None, 'reinsurance_scope_validated': None}) - # Run validate - check is valid validate_response = self.app.post( portfolio.get_absolute_url() + 'validate/', @@ -1141,32 +1139,31 @@ def test_account_file__is_invalid__response_is_400(self): ) self.assertEqual(400, validate_response.status_code) self.assertEqual(validate_response.json, [ - ['account', 'missing required column AccCurrency'], - ['account', 'missing required column PolNumber'], - ['account', 'missing required column PolPerilsCovered'], - ['account', "column 'LocNumber' is not a valid oed field"], - ['account', "column 'IsTenant' is not a valid oed field"], - ['account', "column 'BuildingID' is not a valid oed field"], - ['account', "column 'CountryCode' is not a valid oed field"], - ['account', "column 'Latitude' is not a valid oed field"], - ['account', "column 'Longitude' is not a valid oed field"], - ['account', "column 'StreetAddress' is not a valid oed field"], - ['account', "column 'PostalCode' is not a valid oed field"], - ['account', "column 'OccupancyCode' is not a valid oed field"], - ['account', "column 'ConstructionCode' is not a valid oed field"], - ['account', "column 'LocPerilsCovered' is not a valid oed field"], - ['account', "column 'BuildingTIV' is not a valid oed field"], - ['account', "column 'OtherTIV' is not a valid oed field"], - ['account', "column 'ContentsTIV' is not a valid oed field"], - ['account', "column 'BITIV' is not a valid oed field"], + ['account', 'missing required column AccCurrency'], + ['account', 'missing required column PolNumber'], + ['account', 'missing required column PolPerilsCovered'], + ['account', "column 'LocNumber' is not a valid oed field"], + ['account', "column 'IsTenant' is not a valid oed field"], + ['account', "column 'BuildingID' is not a valid oed field"], + ['account', "column 'CountryCode' is not a valid oed field"], + ['account', "column 'Latitude' is not a valid oed field"], + ['account', "column 'Longitude' is not a valid oed field"], + ['account', "column 'StreetAddress' is not a valid oed field"], + ['account', "column 'PostalCode' is not a valid oed field"], + ['account', "column 'OccupancyCode' is not a valid oed field"], + ['account', "column 'ConstructionCode' is not a valid oed field"], + ['account', "column 'LocPerilsCovered' is not a valid oed field"], + ['account', "column 'BuildingTIV' is not a valid oed field"], + ['account', "column 'OtherTIV' is not a valid oed field"], + ['account', "column 'ContentsTIV' is not a valid oed field"], + ['account', "column 'BITIV' is not a valid oed field"], ['account', "column 'LocCurrency' is not a valid oed field"] ]) - def test_reinsurance_info_file__is_invalid__response_is_400(self): - content_type='text/csv' + content_type = 'text/csv' test_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) - file_content= test_data.to_csv(index=False).encode('utf-8') + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d): @@ -1198,7 +1195,6 @@ def test_reinsurance_info_file__is_invalid__response_is_400(self): 'reinsurance_info_validated': False, 'reinsurance_scope_validated': None}) - # Run validate - check is valid validate_response = self.app.post( portfolio.get_absolute_url() + 'validate/', @@ -1209,37 +1205,37 @@ def test_reinsurance_info_file__is_invalid__response_is_400(self): ) self.assertEqual(400, validate_response.status_code) self.assertEqual(validate_response.json, [ - ['ri_info', 'missing required column ReinsNumber'], - ['ri_info', 'missing required column ReinsPeril'], - ['ri_info', 'missing required column PlacedPercent'], - ['ri_info', 'missing required column ReinsCurrency'], - ['ri_info', 'missing required column InuringPriority'], - ['ri_info', 'missing required column ReinsType'], - ['ri_info', 'missing required column RiskLevel'], - ['ri_info', "column 'PortNumber' is not a valid oed field"], - ['ri_info', "column 'AccNumber' is not a valid oed field"], - ['ri_info', "column 'LocNumber' is not a valid oed field"], - ['ri_info', "column 'IsTenant' is not a valid oed field"], - ['ri_info', "column 'BuildingID' is not a valid oed field"], - ['ri_info', "column 'CountryCode' is not a valid oed field"], - ['ri_info', "column 'Latitude' is not a valid oed field"], - ['ri_info', "column 'Longitude' is not a valid oed field"], - ['ri_info', "column 'StreetAddress' is not a valid oed field"], - ['ri_info', "column 'PostalCode' is not a valid oed field"], - ['ri_info', "column 'OccupancyCode' is not a valid oed field"], - ['ri_info', "column 'ConstructionCode' is not a valid oed field"], - ['ri_info', "column 'LocPerilsCovered' is not a valid oed field"], - ['ri_info', "column 'BuildingTIV' is not a valid oed field"], - ['ri_info', "column 'OtherTIV' is not a valid oed field"], - ['ri_info', "column 'ContentsTIV' is not a valid oed field"], - ['ri_info', "column 'BITIV' is not a valid oed field"], + ['ri_info', 'missing required column ReinsNumber'], + ['ri_info', 'missing required column ReinsPeril'], + ['ri_info', 'missing required column PlacedPercent'], + ['ri_info', 'missing required column ReinsCurrency'], + ['ri_info', 'missing required column InuringPriority'], + ['ri_info', 'missing required column ReinsType'], + ['ri_info', 'missing required column RiskLevel'], + ['ri_info', "column 'PortNumber' is not a valid oed field"], + ['ri_info', "column 'AccNumber' is not a valid oed field"], + ['ri_info', "column 'LocNumber' is not a valid oed field"], + ['ri_info', "column 'IsTenant' is not a valid oed field"], + ['ri_info', "column 'BuildingID' is not a valid oed field"], + ['ri_info', "column 'CountryCode' is not a valid oed field"], + ['ri_info', "column 'Latitude' is not a valid oed field"], + ['ri_info', "column 'Longitude' is not a valid oed field"], + ['ri_info', "column 'StreetAddress' is not a valid oed field"], + ['ri_info', "column 'PostalCode' is not a valid oed field"], + ['ri_info', "column 'OccupancyCode' is not a valid oed field"], + ['ri_info', "column 'ConstructionCode' is not a valid oed field"], + ['ri_info', "column 'LocPerilsCovered' is not a valid oed field"], + ['ri_info', "column 'BuildingTIV' is not a valid oed field"], + ['ri_info', "column 'OtherTIV' is not a valid oed field"], + ['ri_info', "column 'ContentsTIV' is not a valid oed field"], + ['ri_info', "column 'BITIV' is not a valid oed field"], ['ri_info', "column 'LocCurrency' is not a valid oed field"] ]) def test_reinsurance_scope_file__is_invalid__response_is_400(self): - content_type='text/csv' + content_type = 'text/csv' test_data = pd.read_csv(io.StringIO(LOCATION_DATA_VALID)) - file_content= test_data.to_csv(index=False).encode('utf-8') + file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: with override_settings(MEDIA_ROOT=d): @@ -1271,7 +1267,6 @@ def test_reinsurance_scope_file__is_invalid__response_is_400(self): 'reinsurance_info_validated': None, 'reinsurance_scope_validated': False}) - # Run validate - check is valid validate_response = self.app.post( portfolio.get_absolute_url() + 'validate/', @@ -1281,20 +1276,20 @@ def test_reinsurance_scope_file__is_invalid__response_is_400(self): expect_errors=True, ) self.assertEqual(400, validate_response.status_code) - self.assertEqual(validate_response.json,[ - ['ri_scope', 'missing required column ReinsNumber'], - ['ri_scope', "column 'IsTenant' is not a valid oed field"], - ['ri_scope', "column 'BuildingID' is not a valid oed field"], - ['ri_scope', "column 'Latitude' is not a valid oed field"], - ['ri_scope', "column 'Longitude' is not a valid oed field"], - ['ri_scope', "column 'StreetAddress' is not a valid oed field"], - ['ri_scope', "column 'PostalCode' is not a valid oed field"], - ['ri_scope', "column 'OccupancyCode' is not a valid oed field"], - ['ri_scope', "column 'ConstructionCode' is not a valid oed field"], - ['ri_scope', "column 'LocPerilsCovered' is not a valid oed field"], - ['ri_scope', "column 'BuildingTIV' is not a valid oed field"], - ['ri_scope', "column 'OtherTIV' is not a valid oed field"], - ['ri_scope', "column 'ContentsTIV' is not a valid oed field"], - ['ri_scope', "column 'BITIV' is not a valid oed field"], + self.assertEqual(validate_response.json, [ + ['ri_scope', 'missing required column ReinsNumber'], + ['ri_scope', "column 'IsTenant' is not a valid oed field"], + ['ri_scope', "column 'BuildingID' is not a valid oed field"], + ['ri_scope', "column 'Latitude' is not a valid oed field"], + ['ri_scope', "column 'Longitude' is not a valid oed field"], + ['ri_scope', "column 'StreetAddress' is not a valid oed field"], + ['ri_scope', "column 'PostalCode' is not a valid oed field"], + ['ri_scope', "column 'OccupancyCode' is not a valid oed field"], + ['ri_scope', "column 'ConstructionCode' is not a valid oed field"], + ['ri_scope', "column 'LocPerilsCovered' is not a valid oed field"], + ['ri_scope', "column 'BuildingTIV' is not a valid oed field"], + ['ri_scope', "column 'OtherTIV' is not a valid oed field"], + ['ri_scope', "column 'ContentsTIV' is not a valid oed field"], + ['ri_scope', "column 'BITIV' is not a valid oed field"], ['ri_scope', "column 'LocCurrency' is not a valid oed field"] ]) diff --git a/src/server/oasisapi/portfolios/viewsets.py b/src/server/oasisapi/portfolios/viewsets.py index f37e53b61..22502d07e 100644 --- a/src/server/oasisapi/portfolios/viewsets.py +++ b/src/server/oasisapi/portfolios/viewsets.py @@ -30,7 +30,8 @@ class PortfolioFilter(TimeStampedFilter): name = filters.CharFilter(help_text=_('Filter results by case insensitive names equal to the given string'), lookup_expr='iexact') - name__contains = filters.CharFilter(help_text=_('Filter results by case insensitive name containing the given string'), lookup_expr='icontains', field_name='name') + name__contains = filters.CharFilter(help_text=_( + 'Filter results by case insensitive name containing the given string'), lookup_expr='icontains', field_name='name') user = filters.CharFilter( help_text=_('Filter results by case insensitive `user` equal to the given string'), lookup_expr='iexact', @@ -98,7 +99,6 @@ class PortfolioViewSet(viewsets.ModelViewSet): 'application/octet-stream', ] - def get_serializer_class(self): if self.action == 'create_analysis': return CreateAnalysisSerializer @@ -115,13 +115,12 @@ def get_serializer_class(self): else: return super(PortfolioViewSet, self).get_serializer_class() - @property def parser_classes(self): method = self.request.method.lower() upload_views = ['accounts_file', 'location_file', 'reinsurance_info_file', 'reinsurance_scope_file'] - if method == 'post' and getattr(self, 'action', None) in upload_views: + if method == 'post' and getattr(self, 'action', None) in upload_views: return [MultiPartParser] else: return api_settings.DEFAULT_PARSER_CLASSES @@ -159,7 +158,6 @@ def storage_links(self, request, pk=None, version=None): serializer.save() return Response(serializer.data) - @swagger_auto_schema(methods=['get'], responses={200: FILE_RESPONSE}, manual_parameters=[FILE_FORMAT_PARAM]) @swagger_auto_schema(methods=['post'], manual_parameters=[FILE_VALIDATION_PARAM]) @action(methods=['get', 'post', 'delete'], detail=True) @@ -229,7 +227,6 @@ def reinsurance_info_file(self, request, pk=None, version=None): oed_validate = None return handle_related_file(self.get_object(), 'reinsurance_info_file', request, self.supported_mime_types, store_as_parquet, oed_validate) - @swagger_auto_schema(methods=['get'], responses={200: FILE_RESPONSE}, manual_parameters=[FILE_FORMAT_PARAM]) @swagger_auto_schema(methods=['post'], manual_parameters=[FILE_VALIDATION_PARAM]) @action(methods=['get', 'post', 'delete'], detail=True) @@ -253,7 +250,6 @@ def reinsurance_scope_file(self, request, pk=None, version=None): oed_validate = None return handle_related_file(self.get_object(), 'reinsurance_scope_file', request, self.supported_mime_types, store_as_parquet, oed_validate) - @action(methods=['get', 'post'], detail=True) def validate(self, request, pk=None, version=None): """ diff --git a/src/server/oasisapi/schemas/custom_swagger.py b/src/server/oasisapi/schemas/custom_swagger.py index a161adbc2..ac6a1ef03 100644 --- a/src/server/oasisapi/schemas/custom_swagger.py +++ b/src/server/oasisapi/schemas/custom_swagger.py @@ -10,7 +10,6 @@ from drf_yasg.openapi import Schema - FILE_RESPONSE = openapi.Response( 'File Download', schema=Schema(type=openapi.TYPE_FILE), diff --git a/src/server/oasisapi/schemas/serializers.py b/src/server/oasisapi/schemas/serializers.py index 5a882b2c3..3a6f1827f 100644 --- a/src/server/oasisapi/schemas/serializers.py +++ b/src/server/oasisapi/schemas/serializers.py @@ -18,6 +18,7 @@ from jsonschema.exceptions import ValidationError as JSONSchemaValidationError from jsonschema.exceptions import SchemaError as JSONSchemaError + class TokenObtainPairResponseSerializer(serializers.Serializer): refresh_token = serializers.CharField(read_only=True) access_token = serializers.CharField(read_only=True) @@ -55,6 +56,7 @@ def create(self, validated_data): def update(self, instance, validated_data): raise NotImplementedError() + class LocFileSerializer(serializers.Serializer): url = serializers.URLField() name = serializers.CharField() @@ -102,6 +104,7 @@ def create(self, validated_data): def update(self, instance, validated_data): raise NotImplementedError() + def update_links(link_prefix, d): """ Linking in pre-defined scheams with path links will be nested @@ -111,7 +114,7 @@ def update_links(link_prefix, d): '#definitions/option' -> #definitions/SWAGGER_OBJECT/definitions/option """ - for k,v in d.items(): + for k, v in d.items(): if isinstance(v, dict): update_links(link_prefix, v) elif isinstance(v, list): @@ -123,6 +126,7 @@ def update_links(link_prefix, d): link = v.split('#')[-1] d[k] = "{}{}".format(link_prefix, link) + def load_json_schema(json_schema_file, link_prefix=None): """ Load json schema stored in the .schema dir @@ -204,10 +208,10 @@ def validate(self, data): # These are added into existing files as a 'fix' so older workers can run without patching the worker schema # This *SHOULD* be removed at a later date once older models are not longer used compatibility_field_map = { - "module_supplier_id":{ + "module_supplier_id": { "updated_to": "model_supplier_id" }, - "model_version_id":{ + "model_version_id": { "updated_to": "model_name_id" }, } diff --git a/src/server/oasisapi/settings.py b/src/server/oasisapi/settings.py index 28bbd5acc..d2258f341 100644 --- a/src/server/oasisapi/settings.py +++ b/src/server/oasisapi/settings.py @@ -21,7 +21,6 @@ from ...common.shared import set_aws_log_level, set_azure_log_level - IN_TEST = 'test' in sys.argv # Build paths inside the project like this: os.path.join(BASE_DIR, ...) @@ -218,27 +217,26 @@ AZURE_LOCATION = '' """ AZURE_ACCOUNT_NAME = iniconf.settings.get('server', 'AZURE_ACCOUNT_NAME', fallback=None) -AZURE_ACCOUNT_KEY = iniconf.settings.get('server', 'AZURE_ACCOUNT_KEY', fallback=None) +AZURE_ACCOUNT_KEY = iniconf.settings.get('server', 'AZURE_ACCOUNT_KEY', fallback=None) AZURE_CUSTOM_DOMAIN = f'{AZURE_ACCOUNT_NAME}.blob.core.windows.net' AZURE_CONTAINER = iniconf.settings.get('server', 'AZURE_CONTAINER', fallback=None) AZURE_LOCATION = iniconf.settings.get('server', 'AZURE_LOCATION', fallback='') AZURE_SHARED_CONTAINER = iniconf.settings.get('server', 'AZURE_SHARED_CONTAINER', fallback=True) AZURE_OVERWRITE_FILES = iniconf.settings.get('server', 'AZURE_OVERWRITE_FILES', fallback=True) -## Optional Blob storage settings +# Optional Blob storage settings AZURE_LOG_LEVEL = iniconf.settings.get('server', 'AZURE_LOG_LEVEL', fallback="") AZURE_SSL = iniconf.settings.get('server', 'AZURE_SSL', fallback=True) # WARNING, adding default settings with 'None' casues storage adapter to break -#AZURE_UPLOAD_MAX_CONN = iniconf.settings.get('server', 'AZURE_UPLOAD_MAX_CONN', fallback=2) -#AZURE_CONNECTION_TIMEOUT_SECS = iniconf.settings.get('server', 'AZURE_CONNECTION_TIMEOUT_SECS', fallback=20) -#AZURE_BLOB_MAX_MEMORY_SIZE = iniconf.settings.get('server', 'AZURE_BLOB_MAX_MEMORY_SIZE', fallback='2MB') -#AZURE_URL_EXPIRATION_SECS = iniconf.settings.get('server', 'AZURE_URL_EXPIRATION_SECS', fallback=None) -#AZURE_CONNECTION_STRING = iniconf.settings.get('server', 'AZURE_CONNECTION_STRING', fallback=None) -#AZURE_TOKEN_CREDENTIAL = iniconf.settings.get('server', 'AZURE_TOKEN_CREDENTIAL', fallback=None) -#AZURE_CACHE_CONTROL = iniconf.settings.get('server', 'AZURE_CACHE_CONTROL', fallback=None) -#AZURE_OBJECT_PARAMETERS = iniconf.settings.get('server', 'AZURE_OBJECT_PARAMETERS', fallback=None) - +# AZURE_UPLOAD_MAX_CONN = iniconf.settings.get('server', 'AZURE_UPLOAD_MAX_CONN', fallback=2) +# AZURE_CONNECTION_TIMEOUT_SECS = iniconf.settings.get('server', 'AZURE_CONNECTION_TIMEOUT_SECS', fallback=20) +# AZURE_BLOB_MAX_MEMORY_SIZE = iniconf.settings.get('server', 'AZURE_BLOB_MAX_MEMORY_SIZE', fallback='2MB') +# AZURE_URL_EXPIRATION_SECS = iniconf.settings.get('server', 'AZURE_URL_EXPIRATION_SECS', fallback=None) +# AZURE_CONNECTION_STRING = iniconf.settings.get('server', 'AZURE_CONNECTION_STRING', fallback=None) +# AZURE_TOKEN_CREDENTIAL = iniconf.settings.get('server', 'AZURE_TOKEN_CREDENTIAL', fallback=None) +# AZURE_CACHE_CONTROL = iniconf.settings.get('server', 'AZURE_CACHE_CONTROL', fallback=None) +# AZURE_OBJECT_PARAMETERS = iniconf.settings.get('server', 'AZURE_OBJECT_PARAMETERS', fallback=None) # Select Data Storage @@ -282,7 +280,7 @@ # https://github.com/davesque/django-rest-framework-simplejwt SIMPLE_JWT = { - 'ACCESS_TOKEN_LIFETIME': iniconf.settings.get_timedelta('server', 'TOKEN_ACCESS_LIFETIME', fallback='hours=1'), + 'ACCESS_TOKEN_LIFETIME': iniconf.settings.get_timedelta('server', 'TOKEN_ACCESS_LIFETIME', fallback='hours=1'), 'REFRESH_TOKEN_LIFETIME': iniconf.settings.get_timedelta('server', 'TOKEN_REFRESH_LIFETIME', fallback='days=2'), 'ROTATE_REFRESH_TOKENS': iniconf.settings.getboolean('server', 'TOKEN_REFRESH_ROTATE', fallback=True), 'BLACKLIST_AFTER_ROTATION': iniconf.settings.getboolean('server', 'TOKEN_REFRESH_ROTATE', fallback=True), @@ -317,14 +315,14 @@ }, 'loggers': { 'drf_yasg': { - 'handlers': ['console'], - 'level': 'WARNING', - 'propagate': False, + 'handlers': ['console'], + 'level': 'WARNING', + 'propagate': False, }, 'numexpr': { - 'handlers': ['console'], - 'level': 'WARNING', - 'propagate': False, + 'handlers': ['console'], + 'level': 'WARNING', + 'propagate': False, }, }, 'formatters': { diff --git a/tests/integration/api_integration.py b/tests/integration/api_integration.py index be09c5fc2..12a84afd7 100644 --- a/tests/integration/api_integration.py +++ b/tests/integration/api_integration.py @@ -18,17 +18,19 @@ cli_test_output = True if os.environ.get('PY_TEST_OUTPUT') else False cli_test_case = os.environ.get('PY_TEST_CASE').split(' ') if os.environ.get('PY_TEST_CASE') else None cli_test_model = os.environ.get('PY_TEST_MODEL') if os.environ.get('PY_TEST_MODEL') else None -cli_test_retry = int(os.environ.get('PY_TEST_RETRY')) if os.environ.get('PY_TEST_MODEL') else 1 +cli_test_retry = int(os.environ.get('PY_TEST_RETRY')) if os.environ.get('PY_TEST_MODEL') else 1 config = configparser.ConfigParser() config.read(os.path.abspath(cli_test_conf)) + def get_path(section, var, config=config): try: return os.path.abspath(config.get(section, var)) except configparser.NoOptionError: return None + def get_different_rows(source_df, new_df): """Returns just the rows from the new dataframe that differ from the source dataframe""" merged_df = source_df.merge(new_df, indicator=True, how='outer') @@ -62,16 +64,16 @@ def check_expected(result_path, expected_path): test_failed = True test_results[filename] = 'FAILED' - #if not df_expect.equals(df_found): + # if not df_expect.equals(df_found): # test_failed = True # test_results[filename] = 'FAILED' # print(get_different_rows(df_expect, df_found)) - #else: + # else: # test_results[filename] = 'PASSED' print('\n -- Results --') print(json.dumps(test_results, indent=2)) - assert(test_failed == False) + assert (test_failed == False) def check_non_empty(result_path): @@ -88,13 +90,11 @@ def check_non_empty(result_path): file_path = os.path.join(result_path, csv) file_size = os.path.getsize(file_path) print(f'{file_size} Bytes: -> {csv}') - assert(file_size > 0) - + assert (file_size > 0) # --- Test Paramatization --------------------------------------------------- # - if cli_test_model: test_model = cli_test_model else: diff --git a/tests/test_tasks.py b/tests/test_tasks.py index 191d55b10..42be742f9 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -18,7 +18,7 @@ start_analysis_task, get_oasislmf_config_path -#from oasislmf.utils.status import OASIS_TASK_STATUS +# from oasislmf.utils.status import OASIS_TASK_STATUS OASIS_TASK_STATUS = { 'pending': {'id': 'PENDING', 'desc': 'Pending'}, 'running': {'id': 'RUNNING', 'desc': 'Running'}, @@ -71,9 +71,9 @@ def test_input_location_is_not_a_tar___exception_is_raised(self): Path(media_root, 'not-tar-file.tar').touch() Path(media_root, 'analysis_settings.json').touch() self.assertRaises(InvalidInputsException, start_analysis, - os.path.join(media_root, 'analysis_settings.json'), - os.path.join(media_root, 'not-tar-file.tar') - ) + os.path.join(media_root, 'analysis_settings.json'), + os.path.join(media_root, 'not-tar-file.tar') + ) def test_custom_model_runner_does_not_exist___generate_losses_is_called_output_files_are_tared_up(self): with TemporaryDirectory() as media_root, \ @@ -93,8 +93,8 @@ def test_custom_model_runner_does_not_exist___generate_losses_is_called_output_f Path(model_data_dir, 'supplier', 'model', 'version').mkdir(parents=True) cmd_instance = Mock() - #cmd_instance.stdout = b'output' - #cmd_instance.stderr = b'errors' + # cmd_instance.stdout = b'output' + # cmd_instance.stderr = b'errors' cmd_instance.returncode = 0 cmd_instance.communicate = Mock(return_value=(b'mock subprocess stdout', b'mock subprocess stderr')) @@ -113,13 +113,13 @@ def fake_run_dir(*args, **kwargs): ) test_env = os.environ.copy() cmd_mock.assert_called_once_with(['oasislmf', 'model', 'generate-losses', - '--oasis-files-dir', os.path.join(run_dir, 'input'), - '--config', get_oasislmf_config_path(settings.get('worker', 'model_id')), - '--model-run-dir', run_dir, - '--analysis-settings-json', os.path.join(media_root, 'analysis_settings.json'), - '--ktools-fifo-relative', - '--verbose', - ], stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=test_env, preexec_fn=os.setsid) + '--oasis-files-dir', os.path.join(run_dir, 'input'), + '--config', get_oasislmf_config_path(settings.get('worker', 'model_id')), + '--model-run-dir', run_dir, + '--analysis-settings-json', os.path.join(media_root, 'analysis_settings.json'), + '--ktools-fifo-relative', + '--verbose', + ], stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=test_env, preexec_fn=os.setsid) tarfile.assert_called_once_with(output_location, os.path.join(run_dir, 'output'), 'output') @@ -127,8 +127,8 @@ class StartAnalysisTask(TestCase): @given(pk=integers(), location=text(), analysis_settings_path=text()) def test_lock_is_not_acquireable___retry_esception_is_raised(self, pk, location, analysis_settings_path): with patch('fasteners.InterProcessLock.acquire', Mock(return_value=False)), \ - patch('src.model_execution_worker.tasks.check_worker_lost', Mock(return_value='')), \ - patch('src.model_execution_worker.tasks.notify_api_status') as api_notify: + patch('src.model_execution_worker.tasks.check_worker_lost', Mock(return_value='')), \ + patch('src.model_execution_worker.tasks.notify_api_status') as api_notify: with self.assertRaises(Retry): start_analysis_task(pk, location, analysis_settings_path) @@ -136,8 +136,8 @@ def test_lock_is_not_acquireable___retry_esception_is_raised(self, pk, location, @given(pk=integers(), location=text(), analysis_settings_path=text()) def test_lock_is_acquireable___start_analysis_is_ran(self, pk, location, analysis_settings_path): with patch('src.model_execution_worker.tasks.start_analysis', Mock(return_value=('', '', '', 0))) as start_analysis_mock, \ - patch('src.model_execution_worker.tasks.check_worker_lost', Mock(return_value='')), \ - patch('src.model_execution_worker.tasks.notify_api_status') as api_notify: + patch('src.model_execution_worker.tasks.check_worker_lost', Mock(return_value='')), \ + patch('src.model_execution_worker.tasks.notify_api_status') as api_notify: start_analysis_task.update_state = Mock() start_analysis_task(pk, location, analysis_settings_path) From fd2232504a33e54664422794a08a74f364ab0c54 Mon Sep 17 00:00:00 2001 From: sambles Date: Mon, 6 Feb 2023 11:59:54 +0000 Subject: [PATCH 03/33] Fix/portfolio validate default (#747) * Fix loading default - PORTFOLIO_UPLOAD_VALIDATION * Fix test_portfolio.py --- .../portfolios/tests/test_portfolio.py | 44 +++++++++---------- src/server/oasisapi/portfolios/viewsets.py | 8 ++-- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/server/oasisapi/portfolios/tests/test_portfolio.py b/src/server/oasisapi/portfolios/tests/test_portfolio.py index b60b9ad39..96f71d6df 100644 --- a/src/server/oasisapi/portfolios/tests/test_portfolio.py +++ b/src/server/oasisapi/portfolios/tests/test_portfolio.py @@ -345,7 +345,7 @@ def test_accounts_file_is_not_present___delete_response_is_404(self): def test_accounts_file_is_not_a_valid_format___response_is_400(self): with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -365,7 +365,7 @@ def test_accounts_file_is_not_a_valid_format___response_is_400(self): @given(file_content=binary(min_size=1), content_type=sampled_from(['text/csv', 'application/json'])) def test_accounts_file_is_uploaded___file_can_be_retrieved(self, file_content, content_type): with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=False): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=False, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -394,7 +394,7 @@ def test_accounts_file_invalid_uploaded___parquet_exception_raised(self): file_content = b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -416,7 +416,7 @@ def test_accounts_file_is_uploaded_as_parquet___file_can_be_retrieved(self): file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -488,7 +488,7 @@ def test_location_file_is_not_present___delete_response_is_404(self): def test_location_file_is_not_a_valid_format___response_is_400(self): with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -508,7 +508,7 @@ def test_location_file_is_not_a_valid_format___response_is_400(self): @given(file_content=binary(min_size=1), content_type=sampled_from(['text/csv', 'application/json'])) def test_location_file_is_uploaded___file_can_be_retrieved(self, file_content, content_type): with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=False): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=False, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -537,7 +537,7 @@ def test_location_file_invalid_uploaded___parquet_exception_raised(self): file_content = b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -559,7 +559,7 @@ def test_location_file_is_uploaded_as_parquet___file_can_be_retrieved(self): file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -630,7 +630,7 @@ def test_reinsurance_scope_file_is_not_present___delete_response_is_404(self): def test_reinsurance_scope_file_is_not_a_valid_format___response_is_400(self): with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -650,7 +650,7 @@ def test_reinsurance_scope_file_is_not_a_valid_format___response_is_400(self): @given(file_content=binary(min_size=1), content_type=sampled_from(['text/csv', 'application/json'])) def test_reinsurance_scope_file_is_uploaded___file_can_be_retrieved(self, file_content, content_type): with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=False): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=False, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -679,7 +679,7 @@ def test_reinsurance_scope_file_invalid_uploaded___parquet_exception_raised(self file_content = b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -701,7 +701,7 @@ def test_reinsurance_scope_file_is_uploaded_as_parquet___file_can_be_retrieved(s file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -773,7 +773,7 @@ def test_reinsurance_info_file_is_not_present___delete_response_is_404(self): def test_reinsurance_info_file_is_not_a_valid_format___response_is_400(self): with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -793,7 +793,7 @@ def test_reinsurance_info_file_is_not_a_valid_format___response_is_400(self): @given(file_content=binary(min_size=1), content_type=sampled_from(['text/csv', 'application/json'])) def test_reinsurance_info_file_is_uploaded___file_can_be_retrieved(self, file_content, content_type): with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=False): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=False, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -822,7 +822,7 @@ def test_reinsurance_info_file_invalid_uploaded___parquet_exception_raised(self) file_content = b'\xf2hb\xca\xd2\xe6\xf3\xb0\xc1\xc7' with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -844,7 +844,7 @@ def test_reinsurance_info_file_is_uploaded_as_parquet___file_can_be_retrieved(se file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_PARQUET_STORAGE=True, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -923,7 +923,7 @@ def test_all_exposure__are_valid(self): scp_file_content = scp_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -999,7 +999,7 @@ def test_location_file__is_valid(self): file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -1048,7 +1048,7 @@ def test_location_file__is_invalid__response_is_400(self): file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -1100,7 +1100,7 @@ def test_account_file__is_invalid__response_is_400(self): file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -1166,7 +1166,7 @@ def test_reinsurance_info_file__is_invalid__response_is_400(self): file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() @@ -1238,7 +1238,7 @@ def test_reinsurance_scope_file__is_invalid__response_is_400(self): file_content = test_data.to_csv(index=False).encode('utf-8') with TemporaryDirectory() as d: - with override_settings(MEDIA_ROOT=d): + with override_settings(MEDIA_ROOT=d, PORTFOLIO_UPLOAD_VALIDATION=False): user = fake_user() portfolio = fake_portfolio() diff --git a/src/server/oasisapi/portfolios/viewsets.py b/src/server/oasisapi/portfolios/viewsets.py index 22502d07e..29d0f1008 100644 --- a/src/server/oasisapi/portfolios/viewsets.py +++ b/src/server/oasisapi/portfolios/viewsets.py @@ -175,7 +175,7 @@ def accounts_file(self, request, pk=None, version=None): method = request.method.lower() if method == 'post': store_as_parquet = django_settings.PORTFOLIO_PARQUET_STORAGE - oed_validate = request.GET.get('validate', 'false').lower() == 'true' + oed_validate = request.GET.get('validate', str(django_settings.PORTFOLIO_UPLOAD_VALIDATION)).lower() == 'true' else: store_as_parquet = None oed_validate = None @@ -198,7 +198,7 @@ def location_file(self, request, pk=None, version=None): method = request.method.lower() if method == 'post': store_as_parquet = django_settings.PORTFOLIO_PARQUET_STORAGE - oed_validate = request.GET.get('validate', 'false').lower() == 'true' + oed_validate = request.GET.get('validate', str(django_settings.PORTFOLIO_UPLOAD_VALIDATION)).lower() == 'true' else: store_as_parquet = None oed_validate = None @@ -221,7 +221,7 @@ def reinsurance_info_file(self, request, pk=None, version=None): method = request.method.lower() if method == 'post': store_as_parquet = django_settings.PORTFOLIO_PARQUET_STORAGE - oed_validate = request.GET.get('validate', 'false').lower() == 'true' + oed_validate = request.GET.get('validate', str(django_settings.PORTFOLIO_UPLOAD_VALIDATION)).lower() == 'true' else: store_as_parquet = None oed_validate = None @@ -244,7 +244,7 @@ def reinsurance_scope_file(self, request, pk=None, version=None): method = request.method.lower() if method == 'post': store_as_parquet = django_settings.PORTFOLIO_PARQUET_STORAGE - oed_validate = request.GET.get('validate', 'false').lower() == 'true' + oed_validate = request.GET.get('validate', str(django_settings.PORTFOLIO_UPLOAD_VALIDATION)).lower() == 'true' else: store_as_parquet = None oed_validate = None From 330cf6e0933a23788294b8aa99eec0ee89de3bdf Mon Sep 17 00:00:00 2001 From: sambles Date: Thu, 9 Feb 2023 13:21:57 +0000 Subject: [PATCH 04/33] Set Validation on Upload to false (#749) --- src/server/oasisapi/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/oasisapi/settings.py b/src/server/oasisapi/settings.py index d2258f341..209ca2c47 100644 --- a/src/server/oasisapi/settings.py +++ b/src/server/oasisapi/settings.py @@ -263,7 +263,7 @@ # storage selector for exposure files PORTFOLIO_PARQUET_STORAGE = iniconf.settings.getboolean('server', 'PORTFOLIO_PARQUET_STORAGE', fallback=False) -PORTFOLIO_UPLOAD_VALIDATION = iniconf.settings.getboolean('server', 'PORTFOLIO_UPLOAD_VALIDATION', fallback=True) +PORTFOLIO_UPLOAD_VALIDATION = iniconf.settings.getboolean('server', 'PORTFOLIO_UPLOAD_VALIDATION', fallback=False) PORTFOLIO_VALIDATION_CONFIG = [ {'name': 'required_fields', 'on_error': 'return'}, {'name': 'unknown_column', 'on_error': 'return'}, From 10ac772ac143d4d12f2e191af47df1253e2ca387 Mon Sep 17 00:00:00 2001 From: sambles Date: Fri, 10 Feb 2023 11:39:38 +0000 Subject: [PATCH 05/33] Update packages dev (#752) * Move update package script * Update packages, CVE-2023-0286, CVE-2023-23931, CVE-2023-23969 * Update pyopenssl to fix error with cryptography ver --- requirements-server.txt | 26 +++++---- requirements-worker.txt | 18 +++--- requirements.txt | 30 +++++----- .../update-packages.sh | 55 +++++++++---------- 4 files changed, 66 insertions(+), 63 deletions(-) rename update-packages.sh => scripts/update-packages.sh (51%) diff --git a/requirements-server.txt b/requirements-server.txt index 537b47060..aa8a7001c 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -47,6 +47,8 @@ chainmap==1.0.3 # via -r requirements-server.in channels==3.0.4 # via -r requirements-server.in +chardet==5.1.0 + # via ods-tools charset-normalizer==2.0.9 # via requests click==8.0.3 @@ -73,7 +75,7 @@ coreschema==0.0.4 # via # coreapi # drf-yasg -cryptography==36.0.0 +cryptography==39.0.1 # via # autobahn # azure-storage-blob @@ -83,7 +85,7 @@ daphne==3.0.2 # via # -r requirements-server.in # channels -django==3.2.16 +django==3.2.17 # via # -r requirements-server.in # channels @@ -118,7 +120,7 @@ djangorestframework-simplejwt==5.0.0 # via -r requirements-server.in drf-nested-routers==0.93.4 # via -r requirements-server.in -drf-yasg==1.21.4 +drf-yasg==1.21.5 # via -r requirements-server.in greenlet==1.1.2 # via sqlalchemy @@ -159,7 +161,7 @@ markupsafe==2.0.1 # via jinja2 msrest==0.7.1 # via azure-storage-blob -numpy==1.23.4 +numpy==1.24.2 # via # pandas # pyarrow @@ -169,13 +171,15 @@ ods-tools==3.0.1 # via -r requirements-server.in packaging==21.3 # via drf-yasg -pandas==1.5.1 - # via -r requirements-server.in +pandas==1.5.3 + # via + # -r requirements-server.in + # ods-tools pathlib2==2.3.6 # via -r requirements-server.in prompt-toolkit==3.0.23 # via click-repl -psycopg2-binary==2.9.4 +psycopg2-binary==2.9.5 # via -r requirements-server.in pyarrow==6.0.1 # via -r requirements-server.in @@ -191,7 +195,7 @@ pyjwt==2.4.0 # via djangorestframework-simplejwt pymysql==1.0.2 # via -r requirements-server.in -pyopenssl==21.0.0 +pyopenssl==23.0.0 # via twisted pyparsing==3.0.6 # via packaging @@ -230,10 +234,9 @@ six==1.16.0 # click-repl # isodate # pathlib2 - # pyopenssl # python-dateutil # service-identity -sqlalchemy==1.4.42 +sqlalchemy==2.0.3 # via -r requirements-server.in sqlparse==0.4.2 # via @@ -245,9 +248,10 @@ twisted[tls]==22.10.0rc1 # daphne txaio==21.2.1 # via autobahn -typing-extensions==4.0.1 +typing-extensions==4.4.0 # via # azure-core + # sqlalchemy # twisted uritemplate==4.1.1 # via diff --git a/requirements-worker.txt b/requirements-worker.txt index 7b55ed3cb..5c18ee4db 100644 --- a/requirements-worker.txt +++ b/requirements-worker.txt @@ -78,7 +78,7 @@ cookiecutter==2.1.1 # via oasislmf cramjam==2.5.0 # via fastparquet -cryptography==36.0.1 +cryptography==39.0.1 # via azure-storage-blob fasteners==0.16.3 # via -r requirements-worker.in @@ -142,7 +142,7 @@ numpy==1.22.4 # pyarrow # scikit-learn # scipy -oasislmf[extra]==1.27.0 +oasislmf[extra]==1.27.1 # via -r requirements-worker.in oauthlib==3.2.2 # via requests-oauthlib @@ -152,7 +152,7 @@ packaging==21.3 # via # numexpr # pytest -pandas==1.5.1 +pandas==1.5.3 # via # fastparquet # geopandas @@ -164,7 +164,7 @@ pluggy==1.0.0 # via pytest prompt-toolkit==3.0.23 # via click-repl -psycopg2-binary==2.9.4 +psycopg2-binary==2.9.5 # via -r requirements-worker.in py==1.11.0 # via pytest @@ -213,9 +213,9 @@ rtree==0.9.7 # via oasislmf s3transfer==0.5.0 # via boto3 -scikit-learn==1.1.2 +scikit-learn==1.2.1 # via oasislmf -scipy==1.9.3 +scipy==1.10.0 # via # oasislmf # scikit-learn @@ -238,7 +238,7 @@ six==1.16.0 # munch # pathlib2 # python-dateutil -sqlalchemy==1.4.42 +sqlalchemy==2.0.3 # via -r requirements-worker.in tabulate==0.8.9 # via oasislmf @@ -253,7 +253,9 @@ toml==0.10.2 tqdm==4.62.3 # via oasislmf typing-extensions==4.4.0 - # via azure-core + # via + # azure-core + # sqlalchemy urllib3==1.26.7 # via # botocore diff --git a/requirements.txt b/requirements.txt index e81bba17f..3573dcaab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -118,13 +118,13 @@ coreschema==0.0.4 # via # coreapi # drf-yasg -coverage[toml]==6.5.0 +coverage[toml]==7.1.0 # via # -r requirements.in # pytest-cov cramjam==2.5.0 # via fastparquet -cryptography==36.0.0 +cryptography==39.0.1 # via # autobahn # azure-storage-blob @@ -136,7 +136,7 @@ daphne==3.0.2 # channels distlib==0.3.6 # via virtualenv -django==3.2.16 +django==3.2.17 # via # -r ./requirements-server.in # channels @@ -176,7 +176,7 @@ djangorestframework-simplejwt==5.0.0 # via -r ./requirements-server.in drf-nested-routers==0.93.4 # via -r ./requirements-server.in -drf-yasg==1.21.4 +drf-yasg==1.21.5 # via -r ./requirements-server.in fasteners==0.16.3 # via @@ -184,7 +184,7 @@ fasteners==0.16.3 # -r requirements.in fastparquet==0.8.0 # via oasislmf -filelock==3.8.0 +filelock==3.9.0 # via # tox # virtualenv @@ -278,7 +278,7 @@ numpy==1.22.4 # pyarrow # scikit-learn # scipy -oasislmf[extra]==1.27.0 +oasislmf[extra]==1.27.1 # via -r ./requirements-worker.in oauthlib==3.2.2 # via requests-oauthlib @@ -291,7 +291,7 @@ packaging==21.3 # drf-yasg # pytest # tox -pandas==1.5.1 +pandas==1.5.3 # via # -r ./requirements-server.in # fastparquet @@ -314,7 +314,7 @@ pluggy==1.0.0 # tox prompt-toolkit==3.0.23 # via click-repl -psycopg2-binary==2.9.4 +psycopg2-binary==2.9.5 # via # -r ./requirements-server.in # -r ./requirements-worker.in @@ -344,7 +344,7 @@ pymysql==1.0.2 # via # -r ./requirements-server.in # -r ./requirements-worker.in -pyopenssl==21.0.0 +pyopenssl==23.0.0 # via # -r requirements.in # twisted @@ -403,9 +403,9 @@ ruamel-yaml-clib==0.2.7 # via ruamel-yaml s3transfer==0.5.0 # via boto3 -scikit-learn==1.1.2 +scikit-learn==1.2.1 # via oasislmf -scipy==1.9.3 +scipy==1.10.0 # via # oasislmf # scikit-learn @@ -430,7 +430,6 @@ six==1.16.0 # isodate # munch # pathlib2 - # pyopenssl # python-dateutil # service-identity # tox @@ -438,7 +437,7 @@ sortedcontainers==2.4.0 # via hypothesis soupsieve==2.3.1 # via beautifulsoup4 -sqlalchemy==1.4.42 +sqlalchemy==2.0.3 # via # -r ./requirements-server.in # -r ./requirements-worker.in @@ -472,9 +471,10 @@ twisted[tls]==22.10.0rc1 # daphne txaio==21.2.1 # via autobahn -typing-extensions==4.0.1 +typing-extensions==4.4.0 # via # azure-core + # sqlalchemy # twisted uritemplate==4.1.1 # via @@ -489,7 +489,7 @@ vine==5.0.0 # amqp # celery # kombu -virtualenv==20.16.5 +virtualenv==20.19.0 # via tox waitress==2.1.2 # via webtest diff --git a/update-packages.sh b/scripts/update-packages.sh similarity index 51% rename from update-packages.sh rename to scripts/update-packages.sh index 14db1dc35..fcd33b819 100755 --- a/update-packages.sh +++ b/scripts/update-packages.sh @@ -1,42 +1,39 @@ pkg_list=( - sqlalchemy - joblib - oauthlib - parso - pandas - ruamel.yaml - distlib - ods-tools - oasislmf - 'django==3.*' - django-celery-results 'celery==5.*' - virtualenv - filelock - text-unidecode + 'django==3.*' azure-storage-blob coverage + cryptography + distlib + django-celery-results django-request-logging drf-yasg + filelock + joblib numpy - scipy - waitress - sklearn + oasislmf + oauthlib + ods-tools + pandas + parso + pyopenssl psycopg2-binary + ruamel.yaml scikit-learn + scipy + sklearn + sqlalchemy + text-unidecode + virtualenv + waitress ) - +PKG_UPDATE='' for pk in "${pkg_list[@]}"; do - pip-compile --upgrade-package $pk requirements-worker.in - pip-compile --upgrade-package $pk requirements-server.in - pip-compile --upgrade-package $pk requirements.in + PKG_UPDATE=$PKG_UPDATE" --upgrade-package $pk" +done - if [[ `git status --porcelain --untracked-files=no` ]]; then - echo "$pk - updated" - git add -u - git commit -m "Updated package $pk" - else - echo "$pk - no update found" - fi -done +set -e +pip-compile $PKG_UPDATE requirements-worker.in +pip-compile $PKG_UPDATE requirements-server.in +pip-compile $PKG_UPDATE requirements.in From f5bf8a86cfb3ba9df20a0299613c1fa61dfd9dc3 Mon Sep 17 00:00:00 2001 From: sambles Date: Fri, 10 Feb 2023 13:13:32 +0000 Subject: [PATCH 06/33] Set ods-tools 3.0.2 (#753) * Set ods-tools 3.0.2 * Dont use ods-tools for format convention If OED exposure is passed though ods-tools extra col will be added which returns different data based on the base format Example: POST: CSV file -> portfolios/{n}/location_file GET: portfolios/{n}/location_file?format=csv (no blank required col inserted) GET: portfolios/{n}/location_file?format=parquet (any missing TIV cols will be added before data is return) Fix: Only add extra columns if file is validated * Remove unsed import * RiskLevel allowed blank, remove error from test * fix unused import --- requirements-server.txt | 2 +- requirements-worker.txt | 2 +- requirements.txt | 2 +- src/server/oasisapi/files/views.py | 26 +++---------------- .../portfolios/tests/test_portfolio.py | 1 - 5 files changed, 6 insertions(+), 27 deletions(-) diff --git a/requirements-server.txt b/requirements-server.txt index aa8a7001c..59333d840 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -167,7 +167,7 @@ numpy==1.24.2 # pyarrow oauthlib==3.2.2 # via requests-oauthlib -ods-tools==3.0.1 +ods-tools==3.0.2 # via -r requirements-server.in packaging==21.3 # via drf-yasg diff --git a/requirements-worker.txt b/requirements-worker.txt index 5c18ee4db..37d47a4f3 100644 --- a/requirements-worker.txt +++ b/requirements-worker.txt @@ -146,7 +146,7 @@ oasislmf[extra]==1.27.1 # via -r requirements-worker.in oauthlib==3.2.2 # via requests-oauthlib -ods-tools==3.0.1 +ods-tools==3.0.2 # via oasislmf packaging==21.3 # via diff --git a/requirements.txt b/requirements.txt index 3573dcaab..77bbf0d7d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -282,7 +282,7 @@ oasislmf[extra]==1.27.1 # via -r ./requirements-worker.in oauthlib==3.2.2 # via requests-oauthlib -ods-tools==3.0.1 +ods-tools==3.0.2 # via # -r ./requirements-server.in # oasislmf diff --git a/src/server/oasisapi/files/views.py b/src/server/oasisapi/files/views.py index 95b8dc8bc..b6fa727af 100644 --- a/src/server/oasisapi/files/views.py +++ b/src/server/oasisapi/files/views.py @@ -1,21 +1,13 @@ import json import io -import pandas as pd -from ods_tools.oed.exposure import OedExposure from django.core.files.uploadedfile import UploadedFile from django.http import StreamingHttpResponse, Http404, QueryDict from rest_framework.response import Response from .serializers import RelatedFileSerializer - -EXPOSURE_ARGS = { - 'accounts_file': 'account', - 'location_file': 'location', - 'reinsurance_info_file': 'ri_info', - 'reinsurance_scope_file': 'ri_scope' -} +from .models import related_file_to_df def _delete_related_file(parent, field): @@ -48,13 +40,7 @@ def _handle_get_related_file(parent, field, file_format): # Parquet format requested and data stored as csv if file_format == 'parquet' and f.content_type == 'text/csv': output_buffer = io.BytesIO() - - # Load DataFrame and pass to ods-tools exposure class - exposure = OedExposure(**{ - EXPOSURE_ARGS[field]: pd.read_csv(io.BytesIO(f.file.read())) - }) - - df = getattr(exposure, EXPOSURE_ARGS[field]).dataframe + df = related_file_to_df(f) df.to_parquet(output_buffer, index=False) output_buffer.seek(0) @@ -65,13 +51,7 @@ def _handle_get_related_file(parent, field, file_format): # CSV format requested and data stored as Parquet if file_format == 'csv' and f.content_type == 'application/octet-stream': output_buffer = io.BytesIO() - - exposure = OedExposure(**{ - EXPOSURE_ARGS[field]: pd.read_parquet(io.BytesIO(f.file.read())), - 'check_oed': False, - }) - - df = getattr(exposure, EXPOSURE_ARGS[field]).dataframe + df = related_file_to_df(f) df.to_csv(output_buffer, index=False) output_buffer.seek(0) diff --git a/src/server/oasisapi/portfolios/tests/test_portfolio.py b/src/server/oasisapi/portfolios/tests/test_portfolio.py index 96f71d6df..26827cbc0 100644 --- a/src/server/oasisapi/portfolios/tests/test_portfolio.py +++ b/src/server/oasisapi/portfolios/tests/test_portfolio.py @@ -1211,7 +1211,6 @@ def test_reinsurance_info_file__is_invalid__response_is_400(self): ['ri_info', 'missing required column ReinsCurrency'], ['ri_info', 'missing required column InuringPriority'], ['ri_info', 'missing required column ReinsType'], - ['ri_info', 'missing required column RiskLevel'], ['ri_info', "column 'PortNumber' is not a valid oed field"], ['ri_info', "column 'AccNumber' is not a valid oed field"], ['ri_info', "column 'LocNumber' is not a valid oed field"], From 834d6af096aed30ad9a695841631f23843263cb3 Mon Sep 17 00:00:00 2001 From: sambles Date: Mon, 27 Feb 2023 16:12:42 +0000 Subject: [PATCH 07/33] Align with branch backports/1.27.x (#762) --- .../oasisapi/schemas/model_settings.json | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/server/oasisapi/schemas/model_settings.json b/src/server/oasisapi/schemas/model_settings.json index cf9531576..ec071b561 100644 --- a/src/server/oasisapi/schemas/model_settings.json +++ b/src/server/oasisapi/schemas/model_settings.json @@ -965,6 +965,12 @@ "title":"OED peril description", "description":"Short string describing the peril", "minLength":1 + }, + "peril_correlation_group": { + "type": "integer", + "title": "peril correlated group ID", + "description": "the peril correlated group ID", + "minLength": 1 } }, "required":[ @@ -975,6 +981,27 @@ } } }, + "correlation_settings": { + "type": "array", + "title": "Correlation Settings", + "description": "The Correlation Settings", + "items": { + "type": "object", + "properties": { + "peril_correlation_group": { + "type": "integer", + "title": "Peril Correlation Group", + "description": "The Peril Correlation Group", + "minLength": 1 + }, + "correlation_value": { + "type": "string", + "title": "Correlation Value", + "description": "The Correlation Value" + } + } + } + }, "data_settings":{ "type":"object", "title":"Model data settings", From 9e29bc3195ab2cf05a056622c081764c20a8f797 Mon Sep 17 00:00:00 2001 From: sambles Date: Tue, 28 Feb 2023 07:02:45 +0000 Subject: [PATCH 08/33] Fix schema build workflow (#763) * fix schema build wkflow * Fix missing dir --- .github/workflows/build-schema.yml | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build-schema.yml b/.github/workflows/build-schema.yml index aa94a5dbc..19bbd030d 100644 --- a/.github/workflows/build-schema.yml +++ b/.github/workflows/build-schema.yml @@ -1,6 +1,16 @@ name: Platform Schema (OpenAPI) on: + push: + branches: + - master + - develop + - backports** + pull_request: + branches: + - master + - develop + - backports** workflow_dispatch: workflow_call: @@ -12,20 +22,18 @@ jobs: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 - with: - ref: ${{ github.ref_name }} - - name: Set up Python - run: | - mkdir -p $(dirname ${{ env.SCHEMA }}) - sudo apt-get update && sudo apt-get upgrade -y - sudo apt-get install -y --no-install-recommends python3 python3-pip + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: '3.10' - name: Install requirments run: pip install -r requirements-server.txt - name: Generate OpenAPI run: | + test -d $(dirname ${{ env.SCHEMA }}) || mkdir -p $(dirname ${{ env.SCHEMA }}) python ./manage.py migrate python ./manage.py generate_swagger ${{ env.SCHEMA }} @@ -38,4 +46,3 @@ jobs: - name: Test Schema run: ./scripts/build-maven.sh $(cat VERSION) - From b90f7063baeb4b4093e2631e0cac054a89c68f3f Mon Sep 17 00:00:00 2001 From: sambles Date: Mon, 13 Mar 2023 12:23:37 +0000 Subject: [PATCH 09/33] Loosen changelog / release note builder repo options (#768) --- scripts/update-changelog.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/update-changelog.py b/scripts/update-changelog.py index b5648f7e7..f275b7747 100755 --- a/scripts/update-changelog.py +++ b/scripts/update-changelog.py @@ -424,7 +424,7 @@ def check_rate_limit(github_token): @cli.command() -@click.option('--repo', type=click.Choice(['ktools', 'OasisLMF', 'OasisPlatform', 'OasisUI'], case_sensitive=True), required=True) +@click.option('--repo', type=click.STRING, required=True, help="Oasislmf Repo name case sensitive, ['ktools', 'OasisLMF', 'OasisUI' ..]") @click.option('--output-path', type=click.Path(exists=False), default='./CHANGELOG.rst', help='changelog output path') @click.option('--local-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') @click.option('--from-tag', required=True, help='Github tag to track changes from') @@ -470,7 +470,7 @@ def build_changelog(repo, from_tag, to_tag, github_token, output_path, apply_mil @cli.command() -@click.option('--repo', type=click.Choice(['ktools', 'OasisLMF', 'OasisUI'], case_sensitive=True), required=True) +@click.option('--repo', type=click.STRING, required=True, help="Oasislmf Repo name case sensitive, ['ktools', 'OasisLMF', 'OasisUI' ..]") @click.option('--output-path', type=click.Path(exists=False), default='./RELEASE.md', help='Release notes output path') @click.option('--local-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') @click.option('--from-tag', required=True, help='Github tag to track changes from') From c9145f8acbee37f2d3c316181568d158505e7043 Mon Sep 17 00:00:00 2001 From: sambles Date: Mon, 13 Mar 2023 14:06:03 +0000 Subject: [PATCH 10/33] 1.27.2 CVE update (#769) * update scanning workflows * Update python packages * Trivy switched "security-checks" to "scanners" * Run image tests in debug mode --- .github/workflows/build-images.yml | 19 ++- .github/workflows/scan.yml | 120 ++------------ .github/workflows/test-images.yml | 8 +- requirements-server.txt | 130 +++++++-------- requirements-worker.txt | 137 ++++++++-------- requirements.txt | 251 +++++++++++++++-------------- scripts/update-packages.sh | 1 + 7 files changed, 283 insertions(+), 383 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index d4126a3a3..6c1dac5a0 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -81,7 +81,7 @@ jobs: tag: 'model_worker_deb-${{ github.sha }}' file: 'Dockerfile.model_worker_debian' report: 'worker-deb-scan.sarif' - dive: 'worker-deb--layers.txt' + dive: 'worker-deb-layers.txt' exit-code: '0' # scan but don't fail steps: @@ -139,18 +139,29 @@ jobs: - name: Vulnerability scanner if: env.SEVERITY != 'SKIP' uses: aquasecurity/trivy-action@master + with: + image-ref: ${{ env.IMAGE_REPO }}:${{ matrix.tag }} + scan-type: 'image' + exit-code: ${{ matrix.exit-code }} + ignore-unfixed: ${{ env.IGNORE_UNFIXED }} + severity: ${{ env.SEVERITY }} + scanners: 'vuln' + + - name: Create Report + if: ( success() || failure() ) && env.SEVERITY != 'SKIP' && matrix.exit-code == '1' + uses: aquasecurity/trivy-action@master with: image-ref: ${{ env.IMAGE_REPO }}:${{ matrix.tag }} format: 'sarif' output: ${{ matrix.report }} scan-type: 'image' - exit-code: ${{ matrix.exit-code }} + exit-code: '0' ignore-unfixed: ${{ env.IGNORE_UNFIXED }} severity: ${{ env.SEVERITY }} - security-checks: 'vuln' + scanners: 'vuln' - name: Upload scan results to Security tab - if: ( success() || failure() ) && matrix.exit-code == '1' && env.SEVERITY != 'SKIP' + if: ( success() || failure() ) && env.SEVERITY != 'SKIP' && matrix.exit-code == '1' uses: github/codeql-action/upload-sarif@v2 with: sarif_file: ${{ matrix.report }} diff --git a/.github/workflows/scan.yml b/.github/workflows/scan.yml index 369f4a690..0fd4b3992 100644 --- a/.github/workflows/scan.yml +++ b/.github/workflows/scan.yml @@ -11,13 +11,15 @@ on: - master - develop - backports** + schedule: + - cron: '0 */6 * * *' # Run scan every 6 hours workflow_dispatch: inputs: cve_severity: description: 'Severities of vulnerabilities to scanned for, [UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL]' required: false - default: 'CRITICAL,HIGH,MEDIUM' + default: 'CRITICAL,HIGH' ignore_unfixed: description: 'Include unfixed vulnerabilities in scan [true,false]' required: false @@ -27,7 +29,7 @@ on: cve_severity: description: 'Severities of vulnerabilities to scanned for, [UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL]' required: false - default: 'CRITICAL,HIGH,MEDIUM' + default: 'CRITICAL,HIGH' type: string ignore_unfixed: description: 'Include unfixed vulnerabilities in scan [true,false]' @@ -44,7 +46,7 @@ jobs: scan_repo: name: Scan Repo env: - SEVERITY: 'MEDIUM,HIGH,CRITICAL' + SEVERITY: 'HIGH,CRITICAL' REPORT: 'repo-results.sarif' IGNORE_UNFIXED: 'true' EXIT_CODE: '1' @@ -62,140 +64,50 @@ jobs: - name: Checkout code uses: actions/checkout@v3 - - name: Trivy vulnerability scanner (Repo) + - name: Trivy vulnerability scanner if: env.SEVERITY != '' uses: aquasecurity/trivy-action@master with: - format: 'sarif' - output: ${{ env.REPORT }} scan-type: 'fs' exit-code: '1' ignore-unfixed: ${{ env.IGNORE_UNFIXED }} severity: ${{ env.SEVERITY }} - security-checks: 'vuln,config,secret' + scanners: 'vuln' skip-dirs: './docker' - - name: Upload Trivy scan results to GitHub Security tab - if: success() || failure() - - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: ${{ env.REPORT }} - - - name: Store CVE report - if: success() || failure() - uses: actions/upload-artifact@v3 - with: - name: ${{ env.REPORT }} - path: ${{ env.REPORT }} - retention-days: 3 - - - scan_worker: - name: Scan worker - env: - SEVERITY: 'MEDIUM,HIGH,CRITICAL' - REPORT: 'worker-results.sarif' - REQU_FILE: 'requirements-worker.txt' - IGNORE_UNFIXED: 'true' - runs-on: ubuntu-latest - steps: - - name: Set inputs - if: github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' - run: | - echo "SEVERITY=${{ inputs.cve_severity }}" >> $GITHUB_ENV - echo "IGNORE_UNFIXED=${{ inputs.ignore_unfixed }}" >> $GITHUB_ENV - - - name: Checkout code - uses: actions/checkout@v3 - - - name: Switch requirments file - run: | - rm requirements.txt - cp ${{ env.REQU_FILE }} requirements.txt - - - name: Vulnerability scanner + - name: Trivy Configuration scanner (no fail) if: env.SEVERITY != '' uses: aquasecurity/trivy-action@master with: - format: 'sarif' - output: ${{ env.REPORT }} scan-type: 'fs' - exit-code: '1' + exit-code: '0' ignore-unfixed: ${{ env.IGNORE_UNFIXED }} severity: ${{ env.SEVERITY }} - security-checks: 'vuln,config,secret' + scanners: 'config,secret' skip-dirs: './docker' - - name: Rename requirments file in report - if: success() || failure() - run: sed -i "s|requirements.txt|${{ env.REQU_FILE }}|g" ${{ env.REPORT }} - - - name: Upload Trivy scan results to GitHub Security tab + - name: Create Report if: success() || failure() - uses: github/codeql-action/upload-sarif@v2 - with: - sarif_file: ${{ env.REPORT }} - - - name: Store report - if: success() || failure() - uses: actions/upload-artifact@v3 - with: - name: ${{ env.REPORT }} - path: ${{ env.REPORT }} - retention-days: 3 - - scan_server: - name: Scan Server - env: - SEVERITY: 'MEDIUM,HIGH,CRITICAL' - REPORT: 'server-results.sarif' - REQU_FILE: 'requirements-server.txt' - IGNORE_UNFIXED: 'true' - runs-on: ubuntu-latest - steps: - - name: Set inputs - if: github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch' - run: | - echo "SEVERITY=${{ inputs.cve_severity }}" >> $GITHUB_ENV - echo "IGNORE_UNFIXED=${{ inputs.ignore_unfixed }}" >> $GITHUB_ENV - - - name: Checkout code - uses: actions/checkout@v3 - - - name: Switch requirments file - run: | - rm requirements.txt - cp ${{ env.REQU_FILE }} requirements.txt - - - name: Vulnerability scanner - if: env.SEVERITY != '' uses: aquasecurity/trivy-action@master with: format: 'sarif' - output: ${{ env.REPORT }} + output: ${{ env.REPORT }} scan-type: 'fs' - exit-code: '1' + exit-code: '0' ignore-unfixed: ${{ env.IGNORE_UNFIXED }} - severity: ${{ env.SEVERITY }} - security-checks: 'vuln,config,secret' + security-checks: 'vuln' skip-dirs: './docker' - - name: Rename requirments file in report - if: success() || failure() - run: sed -i "s|requirements.txt|${{ env.REQU_FILE }}|g" ${{ env.REPORT }} - - name: Upload Trivy scan results to GitHub Security tab if: success() || failure() uses: github/codeql-action/upload-sarif@v2 with: - sarif_file: ${{ env.REPORT }} + sarif_file: ${{ env.REPORT }} - - name: Store report + - name: Store CVE report if: success() || failure() uses: actions/upload-artifact@v3 with: name: ${{ env.REPORT }} path: ${{ env.REPORT }} retention-days: 3 - diff --git a/.github/workflows/test-images.yml b/.github/workflows/test-images.yml index 2e2636efd..6837e405b 100644 --- a/.github/workflows/test-images.yml +++ b/.github/workflows/test-images.yml @@ -126,7 +126,7 @@ jobs: server_tag: ${{ needs.setup.outputs.build_server_tag }} worker_image: 'coreoasis/model_worker' worker_tag: ${{ needs.setup.outputs.release_tag }} - debug_mode: 0 + debug_mode: 1 pytest_opts: ${{ needs.setup.outputs.pytest_opts }} storage_suffix: '-server-compatibility' @@ -141,7 +141,7 @@ jobs: server_tag: ${{ needs.setup.outputs.build_server_tag }} worker_image: ${{ needs.setup.outputs.build_deb_worker_img }} worker_tag: ${{ needs.setup.outputs.build_deb_worker_tag }} - debug_mode: 0 + debug_mode: 1 pytest_opts: ${{ needs.setup.outputs.pytest_opts }} storage_suffix: '-worker-debian' @@ -156,7 +156,7 @@ jobs: server_tag: ${{ needs.setup.outputs.release_tag }} worker_image: ${{ needs.setup.outputs.build_worker_img }} worker_tag: ${{ needs.setup.outputs.build_worker_tag }} - debug_mode: 0 + debug_mode: 1 pytest_opts: ${{ needs.setup.outputs.pytest_opts }} storage_suffix: '-worker-compatibility' @@ -171,6 +171,6 @@ jobs: server_tag: ${{ needs.setup.outputs.build_server_tag }} worker_image: ${{ needs.setup.outputs.build_worker_img }} worker_tag: ${{ needs.setup.outputs.build_worker_tag }} - debug_mode: 0 + debug_mode: 1 pytest_opts: "--docker-compose=./docker/s3.docker-compose.yml ${{ needs.setup.outputs.pytest_opts }}" storage_suffix: '-s3' diff --git a/requirements-server.txt b/requirements-server.txt index 59333d840..131a44510 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -6,52 +6,48 @@ # amqp==5.1.1 # via kombu -asgiref==3.4.1 +asgiref==3.6.0 # via # channels # daphne # django -attrs==21.2.0 +attrs==22.2.0 # via # automat # jsonschema # service-identity # twisted -autobahn==21.11.1 +autobahn==23.1.2 # via daphne -automat==20.2.0 +automat==22.10.0 # via twisted -azure-core==1.26.0 - # via - # azure-storage-blob - # msrest -azure-storage-blob==12.14.1 +azure-core==1.26.3 + # via azure-storage-blob +azure-storage-blob==12.15.0 # via django-storages billiard==3.6.4.0 # via celery -boto3==1.20.20 +boto3==1.26.89 # via -r requirements-server.in -botocore==1.23.20 +botocore==1.29.89 # via # boto3 # s3transfer celery==5.2.7 # via -r requirements-server.in -certifi==2022.12.07 - # via - # msrest - # requests -cffi==1.15.0 +certifi==2022.12.7 + # via requests +cffi==1.15.1 # via cryptography chainmap==1.0.3 # via -r requirements-server.in -channels==3.0.4 +channels==4.0.0 # via -r requirements-server.in chardet==5.1.0 # via ods-tools -charset-normalizer==2.0.9 +charset-normalizer==3.1.0 # via requests -click==8.0.3 +click==8.1.3 # via # celery # click-didyoumean @@ -63,7 +59,7 @@ click-plugins==1.1.1 # via celery click-repl==0.2.0 # via celery -configparser==5.2.0 +configparser==5.3.0 # via -r requirements-server.in constantly==15.1.0 # via twisted @@ -75,17 +71,15 @@ coreschema==0.0.4 # via # coreapi # drf-yasg -cryptography==39.0.1 +cryptography==39.0.2 # via # autobahn # azure-storage-blob # pyopenssl # service-identity -daphne==3.0.2 - # via - # -r requirements-server.in - # channels -django==3.2.17 +daphne==4.0.0 + # via -r requirements-server.in +django==3.2.18 # via # -r requirements-server.in # channels @@ -98,31 +92,31 @@ django==3.2.17 # djangorestframework-simplejwt # drf-nested-routers # drf-yasg -django-cleanup==5.2.0 +django-cleanup==7.0.0 # via -r requirements-server.in -django-debug-toolbar==3.2.4 +django-debug-toolbar==3.8.1 # via -r requirements-server.in -django-filter==21.1 +django-filter==22.1 # via -r requirements-server.in -django-model-utils==4.2.0 +django-model-utils==4.3.1 # via -r requirements-server.in django-request-logging==0.7.5 # via -r requirements-server.in -django-storages[azure]==1.12.3 +django-storages[azure]==1.13.2 # via -r requirements-server.in -djangorestframework==3.12.4 +djangorestframework==3.14.0 # via # -r requirements-server.in # djangorestframework-simplejwt # drf-nested-routers # drf-yasg -djangorestframework-simplejwt==5.0.0 +djangorestframework-simplejwt==5.2.2 # via -r requirements-server.in drf-nested-routers==0.93.4 # via -r requirements-server.in drf-yasg==1.21.5 # via -r requirements-server.in -greenlet==1.1.2 +greenlet==2.0.2 # via sqlalchemy gunicorn==20.1.0 # via -r requirements-server.in @@ -130,58 +124,54 @@ hyperlink==21.0.0 # via # autobahn # twisted -idna==3.3 +idna==3.4 # via # hyperlink # requests # twisted -incremental==21.3.0 +incremental==22.10.0 # via twisted inflection==0.5.1 # via drf-yasg isodate==0.6.1 - # via msrest + # via azure-storage-blob itypes==1.2.0 # via coreapi -jinja2==3.0.3 +jinja2==3.1.2 # via coreschema -jmespath==0.10.0 +jmespath==1.0.1 # via # boto3 # botocore -jsonpickle==2.0.0 +jsonpickle==3.0.1 # via -r requirements-server.in -jsonschema==4.2.1 +jsonschema==4.17.3 # via -r requirements-server.in kombu==5.2.4 # via celery -markdown==3.3.6 +markdown==3.4.1 # via -r requirements-server.in -markupsafe==2.0.1 +markupsafe==2.1.2 # via jinja2 -msrest==0.7.1 - # via azure-storage-blob numpy==1.24.2 # via # pandas # pyarrow -oauthlib==3.2.2 - # via requests-oauthlib -ods-tools==3.0.2 +ods-tools==3.0.3 # via -r requirements-server.in -packaging==21.3 +packaging==23.0 # via drf-yasg pandas==1.5.3 # via # -r requirements-server.in # ods-tools -pathlib2==2.3.6 +pathlib2==2.3.7.post1 # via -r requirements-server.in -prompt-toolkit==3.0.23 +prompt-toolkit==3.0.38 # via click-repl psycopg2-binary==2.9.5 # via -r requirements-server.in -pyarrow==6.0.1 +pyarrow==11.0.0 # via -r requirements-server.in pyasn1==0.4.8 # via @@ -191,39 +181,34 @@ pyasn1-modules==0.2.8 # via service-identity pycparser==2.21 # via cffi -pyjwt==2.4.0 +pyjwt==2.6.0 # via djangorestframework-simplejwt pymysql==1.0.2 # via -r requirements-server.in pyopenssl==23.0.0 # via twisted -pyparsing==3.0.6 - # via packaging -pyrsistent==0.18.0 +pyrsistent==0.19.3 # via jsonschema python-dateutil==2.8.2 # via # botocore # pandas -pytz==2021.3 +pytz==2022.7.1 # via # celery # django + # djangorestframework # drf-yasg # pandas -requests==2.26.0 +requests==2.28.2 # via # azure-core # coreapi - # msrest - # requests-oauthlib -requests-oauthlib==1.3.1 - # via msrest ruamel-yaml==0.17.21 # via drf-yasg ruamel-yaml-clib==0.2.7 # via ruamel-yaml -s3transfer==0.5.0 +s3transfer==0.6.0 # via boto3 service-identity==21.1.0 # via twisted @@ -236,28 +221,29 @@ six==1.16.0 # pathlib2 # python-dateutil # service-identity -sqlalchemy==2.0.3 +sqlalchemy==2.0.5.post1 # via -r requirements-server.in -sqlparse==0.4.2 +sqlparse==0.4.3 # via # django # django-debug-toolbar -twisted[tls]==22.10.0rc1 +twisted[tls]==22.10.0 # via # -r requirements-server.in # daphne -txaio==21.2.1 +txaio==23.1.1 # via autobahn -typing-extensions==4.4.0 +typing-extensions==4.5.0 # via # azure-core + # azure-storage-blob # sqlalchemy # twisted uritemplate==4.1.1 # via # coreapi # drf-yasg -urllib3==1.26.7 +urllib3==1.26.15 # via # botocore # requests @@ -266,11 +252,11 @@ vine==5.0.0 # amqp # celery # kombu -wcwidth==0.2.5 +wcwidth==0.2.6 # via prompt-toolkit -whitenoise==5.3.0 +whitenoise==6.4.0 # via -r requirements-server.in -zope-interface==5.4.0 +zope-interface==5.5.2 # via twisted # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements-worker.txt b/requirements-worker.txt index 37d47a4f3..7a27721b8 100644 --- a/requirements-worker.txt +++ b/requirements-worker.txt @@ -10,50 +10,48 @@ anytree==2.8.0 # via oasislmf argparsetree==0.0.6 # via oasislmf -arrow==1.2.1 +arrow==1.2.3 # via jinja2-time -attrs==21.2.0 +attrs==22.2.0 # via # fiona # jsonschema # pytest -azure-core==1.26.0 +azure-core==1.26.3 # via # -r requirements-worker.in # azure-storage-blob - # msrest -azure-storage-blob==12.14.1 +azure-storage-blob==12.15.0 # via -r requirements-worker.in billiard==3.6.4.0 # via celery binaryornot==0.4.4 # via cookiecutter -boto3==1.20.20 +boto3==1.26.89 # via -r requirements-worker.in -botocore==1.23.20 +botocore==1.29.89 # via # boto3 # s3transfer celery==5.2.7 # via -r requirements-worker.in -certifi==2022.12.07 +certifi==2022.12.7 # via # fiona - # msrest # pyproj # requests -cffi==1.15.0 +cffi==1.15.1 # via cryptography chainmap==1.0.3 # via oasislmf -chardet==4.0.0 +chardet==5.1.0 # via # binaryornot # oasislmf # ods-tools -charset-normalizer==2.0.9 +charset-normalizer==3.1.0 # via requests -click==8.0.3 +click==8.1.3 # via # celery # click-didyoumean @@ -72,65 +70,65 @@ click-repl==0.2.0 # via celery cligj==0.7.2 # via fiona -configparser==5.2.0 +configparser==5.3.0 # via -r requirements-worker.in cookiecutter==2.1.1 # via oasislmf -cramjam==2.5.0 +cramjam==2.6.2 # via fastparquet -cryptography==39.0.1 +cryptography==39.0.2 # via azure-storage-blob -fasteners==0.16.3 +exceptiongroup==1.1.1 + # via pytest +fasteners==0.18 # via -r requirements-worker.in -fastparquet==0.8.0 +fastparquet==2023.2.0 # via oasislmf -fiona==1.8.21 +fiona==1.9.1 # via geopandas forex-python==1.8 # via oasislmf -fsspec==2021.11.1 +fsspec==2023.3.0 # via fastparquet -geopandas==0.10.2 +geopandas==0.12.2 # via oasislmf -greenlet==1.1.2 +greenlet==2.0.2 # via sqlalchemy -idna==3.3 +idna==3.4 # via requests -iniconfig==1.1.1 +iniconfig==2.0.0 # via pytest isodate==0.6.1 - # via msrest -jinja2==3.0.3 + # via azure-storage-blob +jinja2==3.1.2 # via # cookiecutter # jinja2-time jinja2-time==0.2.0 # via cookiecutter -jmespath==0.10.0 +jmespath==1.0.1 # via # boto3 # botocore joblib==1.2.0 # via scikit-learn -jsonschema==4.2.1 +jsonschema==4.17.3 # via oasislmf kombu==5.2.4 # via celery -llvmlite==0.38.1 +llvmlite==0.39.1 # via numba -markupsafe==2.0.1 +markupsafe==2.1.2 # via jinja2 -msgpack==1.0.3 +msgpack==1.0.5 # via oasislmf -msrest==0.7.1 - # via azure-storage-blob munch==2.5.0 # via fiona -numba==0.55.2 +numba==0.56.4 # via # -r requirements-worker.in # oasislmf -numexpr==2.8.1 +numexpr==2.8.4 # via oasislmf numpy==1.22.4 # via @@ -142,15 +140,15 @@ numpy==1.22.4 # pyarrow # scikit-learn # scipy + # shapely oasislmf[extra]==1.27.1 # via -r requirements-worker.in -oauthlib==3.2.2 - # via requests-oauthlib -ods-tools==3.0.2 +ods-tools==3.0.3 # via oasislmf -packaging==21.3 +packaging==23.0 # via - # numexpr + # fastparquet + # geopandas # pytest pandas==1.5.3 # via @@ -158,105 +156,96 @@ pandas==1.5.3 # geopandas # oasislmf # ods-tools -pathlib2==2.3.6 +pathlib2==2.3.7.post1 # via -r requirements-worker.in pluggy==1.0.0 # via pytest -prompt-toolkit==3.0.23 +prompt-toolkit==3.0.38 # via click-repl psycopg2-binary==2.9.5 # via -r requirements-worker.in -py==1.11.0 - # via pytest -pyarrow==8.0.0 +pyarrow==11.0.0 # via oasislmf pycparser==2.21 # via cffi pymysql==1.0.2 # via -r requirements-worker.in -pyparsing==3.0.6 - # via packaging -pyproj==3.3.0 +pyproj==3.4.1 # via geopandas -pyrsistent==0.18.0 +pyrsistent==0.19.3 # via jsonschema -pytest==6.2.5 +pytest==7.2.2 # via -r requirements-worker.in python-dateutil==2.8.2 # via # arrow # botocore # pandas -python-slugify==5.0.2 +python-slugify==8.0.1 # via cookiecutter -pytz==2021.3 +pytz==2022.7.1 # via # celery # oasislmf # pandas pyyaml==6.0 # via cookiecutter -requests==2.26.0 +requests==2.28.2 # via # azure-core # cookiecutter # forex-python - # msrest # oasislmf - # requests-oauthlib # requests-toolbelt -requests-oauthlib==1.3.1 - # via msrest -requests-toolbelt==0.9.1 +requests-toolbelt==0.10.1 # via oasislmf -rtree==0.9.7 +rtree==1.0.1 # via oasislmf -s3transfer==0.5.0 +s3transfer==0.6.0 # via boto3 -scikit-learn==1.2.1 +scikit-learn==1.2.2 # via oasislmf -scipy==1.10.0 +scipy==1.10.1 # via # oasislmf # scikit-learn -shapely==1.8.2 +shapely==2.0.1 # via # geopandas # oasislmf shutilwhich==1.1.0 # via oasislmf -simplejson==3.18.1 +simplejson==3.18.3 # via forex-python six==1.16.0 # via # anytree # azure-core # click-repl - # fasteners - # fiona # isodate # munch # pathlib2 # python-dateutil -sqlalchemy==2.0.3 +sqlalchemy==2.0.5.post1 # via -r requirements-worker.in -tabulate==0.8.9 +tabulate==0.9.0 # via oasislmf tblib==1.7.0 # via oasislmf text-unidecode==1.3 # via python-slugify -threadpoolctl==3.0.0 +threadpoolctl==3.1.0 # via scikit-learn -toml==0.10.2 +tomli==2.0.1 # via pytest -tqdm==4.62.3 +tqdm==4.65.0 # via oasislmf -typing-extensions==4.4.0 +typing-extensions==4.5.0 # via # azure-core + # azure-storage-blob # sqlalchemy -urllib3==1.26.7 +urllib3==1.26.15 # via # botocore # requests @@ -265,7 +254,7 @@ vine==5.0.0 # amqp # celery # kombu -wcwidth==0.2.5 +wcwidth==0.2.6 # via prompt-toolkit # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements.txt b/requirements.txt index 77bbf0d7d..ba562e2de 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,14 +10,14 @@ anytree==2.8.0 # via oasislmf argparsetree==0.0.6 # via oasislmf -arrow==1.2.1 +arrow==1.2.3 # via jinja2-time -asgiref==3.4.1 +asgiref==3.6.0 # via # channels # daphne # django -attrs==21.2.0 +attrs==22.2.0 # via # automat # fiona @@ -26,16 +26,15 @@ attrs==21.2.0 # pytest # service-identity # twisted -autobahn==21.11.1 +autobahn==23.1.2 # via daphne -automat==20.2.0 +automat==22.10.0 # via twisted -azure-core==1.26.0 +azure-core==1.26.3 # via # -r ./requirements-worker.in # azure-storage-blob - # msrest -azure-storage-blob==12.14.1 +azure-storage-blob==12.15.0 # via # -r ./requirements-worker.in # django-storages @@ -43,46 +42,50 @@ backports-tempfile==1.0 # via -r requirements.in backports-weakref==1.0.post1 # via backports-tempfile -beautifulsoup4==4.10.0 +beautifulsoup4==4.11.2 # via webtest billiard==3.6.4.0 # via celery binaryornot==0.4.4 # via cookiecutter -boto3==1.20.20 +boto3==1.26.89 # via # -r ./requirements-server.in # -r ./requirements-worker.in -botocore==1.23.20 +botocore==1.29.89 # via # boto3 # s3transfer +build==0.10.0 + # via pip-tools +cachetools==5.3.0 + # via tox celery==5.2.7 # via # -r ./requirements-server.in # -r ./requirements-worker.in -certifi==2022.12.07 +certifi==2022.12.7 # via # fiona - # msrest # pyproj # requests -cffi==1.15.0 +cffi==1.15.1 # via cryptography chainmap==1.0.3 # via # -r ./requirements-server.in # oasislmf -channels==3.0.4 +channels==4.0.0 # via -r ./requirements-server.in -chardet==4.0.0 +chardet==5.1.0 # via # binaryornot # oasislmf # ods-tools -charset-normalizer==2.0.9 + # tox +charset-normalizer==3.1.0 # via requests -click==8.0.3 +click==8.1.3 # via # celery # click-didyoumean @@ -102,7 +105,9 @@ click-repl==0.2.0 # via celery cligj==0.7.2 # via fiona -configparser==5.2.0 +colorama==0.4.6 + # via tox +configparser==5.3.0 # via # -r ./requirements-server.in # -r ./requirements-worker.in @@ -118,25 +123,23 @@ coreschema==0.0.4 # via # coreapi # drf-yasg -coverage[toml]==7.1.0 +coverage[toml]==7.2.1 # via # -r requirements.in # pytest-cov -cramjam==2.5.0 +cramjam==2.6.2 # via fastparquet -cryptography==39.0.1 +cryptography==39.0.2 # via # autobahn # azure-storage-blob # pyopenssl # service-identity -daphne==3.0.2 - # via - # -r ./requirements-server.in - # channels +daphne==4.0.0 + # via -r ./requirements-server.in distlib==0.3.6 # via virtualenv -django==3.2.17 +django==3.2.18 # via # -r ./requirements-server.in # channels @@ -150,55 +153,59 @@ django==3.2.17 # drf-nested-routers # drf-yasg # model-mommy -django-cleanup==5.2.0 +django-cleanup==7.0.0 # via -r ./requirements-server.in -django-debug-toolbar==3.2.2 +django-debug-toolbar==3.8.1 # via # -r ./requirements-server.in # -r requirements.in -django-filter==21.1 +django-filter==22.1 # via -r ./requirements-server.in -django-model-utils==4.2.0 +django-model-utils==4.3.1 # via -r ./requirements-server.in django-request-logging==0.7.5 # via -r ./requirements-server.in -django-storages[azure]==1.12.3 +django-storages[azure]==1.13.2 # via -r ./requirements-server.in -django-webtest==1.9.8 +django-webtest==1.9.10 # via -r requirements.in -djangorestframework==3.12.4 +djangorestframework==3.14.0 # via # -r ./requirements-server.in # djangorestframework-simplejwt # drf-nested-routers # drf-yasg -djangorestframework-simplejwt==5.0.0 +djangorestframework-simplejwt==5.2.2 # via -r ./requirements-server.in drf-nested-routers==0.93.4 # via -r ./requirements-server.in drf-yasg==1.21.5 # via -r ./requirements-server.in -fasteners==0.16.3 +exceptiongroup==1.1.1 + # via + # hypothesis + # pytest +fasteners==0.18 # via # -r ./requirements-worker.in # -r requirements.in -fastparquet==0.8.0 +fastparquet==2023.2.0 # via oasislmf filelock==3.9.0 # via # tox # virtualenv -fiona==1.8.21 +fiona==1.9.1 # via geopandas -flake8==4.0.1 +flake8==6.0.0 # via -r requirements.in forex-python==1.8 # via oasislmf -fsspec==2022.1.0 +fsspec==2023.3.0 # via fastparquet -geopandas==0.10.2 +geopandas==0.12.2 # via oasislmf -greenlet==1.1.2 +greenlet==2.0.2 # via sqlalchemy gunicorn==20.1.0 # via -r ./requirements-server.in @@ -206,67 +213,65 @@ hyperlink==21.0.0 # via # autobahn # twisted -hypothesis==6.30.1 +hypothesis==6.68.2 # via -r requirements.in -idna==3.3 +idna==3.4 # via # hyperlink # requests # twisted -incremental==21.3.0 +incremental==22.10.0 # via twisted inflection==0.5.1 # via drf-yasg -iniconfig==1.1.1 +iniconfig==2.0.0 # via pytest isodate==0.6.1 - # via msrest + # via azure-storage-blob itypes==1.2.0 # via coreapi -jinja2==3.0.3 +jinja2==3.1.2 # via # cookiecutter # coreschema # jinja2-time jinja2-time==0.2.0 # via cookiecutter -jmespath==0.10.0 +jmespath==1.0.1 # via # boto3 # botocore joblib==1.2.0 # via scikit-learn -jsonpickle==2.0.0 +jsonpickle==3.0.1 # via -r ./requirements-server.in -jsonschema==4.2.1 +jsonschema==4.17.3 # via # -r ./requirements-server.in # oasislmf kombu==5.2.4 # via celery -llvmlite==0.38.1 +llvmlite==0.39.1 # via numba -markdown==3.3.6 +markdown==3.4.1 # via -r ./requirements-server.in -markupsafe==2.0.1 +markupsafe==2.1.2 # via jinja2 -mccabe==0.6.1 +mccabe==0.7.0 # via flake8 -mock==4.0.3 +mock==5.0.1 # via -r requirements.in model-mommy==2.0.0 # via -r requirements.in -msgpack==1.0.3 +msgpack==1.0.5 # via oasislmf -msrest==0.7.1 - # via azure-storage-blob munch==2.5.0 # via fiona -numba==0.55.2 +numba==0.56.4 # via # -r ./requirements-worker.in # oasislmf -numexpr==2.8.0 +numexpr==2.8.4 # via oasislmf numpy==1.22.4 # via @@ -278,17 +283,20 @@ numpy==1.22.4 # pyarrow # scikit-learn # scipy + # shapely oasislmf[extra]==1.27.1 # via -r ./requirements-worker.in -oauthlib==3.2.2 - # via requests-oauthlib -ods-tools==3.0.2 +ods-tools==3.0.3 # via # -r ./requirements-server.in # oasislmf -packaging==21.3 +packaging==23.0 # via + # build # drf-yasg + # fastparquet + # geopandas + # pyproject-api # pytest # tox pandas==1.5.3 @@ -298,31 +306,27 @@ pandas==1.5.3 # geopandas # oasislmf # ods-tools -pathlib2==2.3.6 +pathlib2==2.3.7.post1 # via # -r ./requirements-server.in # -r ./requirements-worker.in -pep517==0.12.0 - # via pip-tools -pip-tools==6.4.0 +pip-tools==6.12.3 # via -r requirements.in -platformdirs==2.4.0 - # via virtualenv +platformdirs==3.1.1 + # via + # tox + # virtualenv pluggy==1.0.0 # via # pytest # tox -prompt-toolkit==3.0.23 +prompt-toolkit==3.0.38 # via click-repl psycopg2-binary==2.9.5 # via # -r ./requirements-server.in # -r ./requirements-worker.in -py==1.11.0 - # via - # pytest - # tox -pyarrow==8.0.0 +pyarrow==11.0.0 # via # -r ./requirements-server.in # oasislmf @@ -332,13 +336,13 @@ pyasn1==0.4.8 # service-identity pyasn1-modules==0.2.8 # via service-identity -pycodestyle==2.8.0 +pycodestyle==2.10.0 # via flake8 pycparser==2.21 # via cffi -pyflakes==2.4.0 +pyflakes==3.0.1 # via flake8 -pyjwt==2.4.0 +pyjwt==2.6.0 # via djangorestframework-simplejwt pymysql==1.0.2 # via @@ -348,76 +352,75 @@ pyopenssl==23.0.0 # via # -r requirements.in # twisted -pyparsing==3.0.6 - # via packaging -pyproj==3.3.0 +pyproj==3.4.1 # via geopandas -pyrsistent==0.18.0 +pyproject-api==1.5.1 + # via tox +pyproject-hooks==1.0.0 + # via build +pyrsistent==0.19.3 # via jsonschema -pytest==6.2.5 +pytest==7.2.2 # via # -r ./requirements-worker.in # -r requirements.in # pytest-cov # pytest-django -pytest-cov==3.0.0 +pytest-cov==4.0.0 # via -r requirements.in -pytest-django==4.5.1 +pytest-django==4.5.2 # via -r requirements.in python-dateutil==2.8.2 # via # arrow # botocore # pandas -python-slugify==5.0.2 +python-slugify==8.0.1 # via cookiecutter -pytz==2021.3 +pytz==2022.7.1 # via # celery # django + # djangorestframework # drf-yasg # oasislmf # pandas pyyaml==6.0 # via cookiecutter -requests==2.26.0 +requests==2.28.2 # via # -r requirements.in # azure-core # cookiecutter # coreapi # forex-python - # msrest # oasislmf - # requests-oauthlib # requests-toolbelt -requests-oauthlib==1.3.1 - # via msrest -requests-toolbelt==0.9.1 +requests-toolbelt==0.10.1 # via oasislmf -rtree==0.9.7 +rtree==1.0.1 # via oasislmf ruamel-yaml==0.17.21 # via drf-yasg ruamel-yaml-clib==0.2.7 # via ruamel-yaml -s3transfer==0.5.0 +s3transfer==0.6.0 # via boto3 -scikit-learn==1.2.1 +scikit-learn==1.2.2 # via oasislmf -scipy==1.10.0 +scipy==1.10.1 # via # oasislmf # scikit-learn service-identity==21.1.0 # via twisted -shapely==1.8.2 +shapely==2.0.1 # via # geopandas # oasislmf shutilwhich==1.1.0 # via oasislmf -simplejson==3.18.1 +simplejson==3.18.3 # via forex-python six==1.16.0 # via @@ -425,62 +428,60 @@ six==1.16.0 # automat # azure-core # click-repl - # fasteners - # fiona # isodate # munch # pathlib2 # python-dateutil # service-identity - # tox sortedcontainers==2.4.0 # via hypothesis -soupsieve==2.3.1 +soupsieve==2.4 # via beautifulsoup4 -sqlalchemy==2.0.3 +sqlalchemy==2.0.5.post1 # via # -r ./requirements-server.in # -r ./requirements-worker.in -sqlparse==0.4.2 +sqlparse==0.4.3 # via # django # django-debug-toolbar -tabulate==0.8.9 +tabulate==0.9.0 # via oasislmf tblib==1.7.0 # via oasislmf text-unidecode==1.3 # via python-slugify -threadpoolctl==3.0.0 +threadpoolctl==3.1.0 # via scikit-learn -toml==0.10.2 +tomli==2.0.1 # via + # build + # coverage + # pyproject-api + # pyproject-hooks # pytest # tox -tomli==1.2.2 - # via - # coverage - # pep517 -tox==3.24.4 +tox==4.4.7 # via -r requirements.in -tqdm==4.62.3 +tqdm==4.65.0 # via oasislmf -twisted[tls]==22.10.0rc1 +twisted[tls]==22.10.0 # via # -r ./requirements-server.in # daphne -txaio==21.2.1 +txaio==23.1.1 # via autobahn -typing-extensions==4.4.0 +typing-extensions==4.5.0 # via # azure-core + # azure-storage-blob # sqlalchemy # twisted uritemplate==4.1.1 # via # coreapi # drf-yasg -urllib3==1.26.7 +urllib3==1.26.15 # via # botocore # requests @@ -489,21 +490,21 @@ vine==5.0.0 # amqp # celery # kombu -virtualenv==20.19.0 +virtualenv==20.21.0 # via tox waitress==2.1.2 # via webtest -wcwidth==0.2.5 +wcwidth==0.2.6 # via prompt-toolkit webob==1.8.7 # via webtest webtest==3.0.0 # via django-webtest -wheel==0.38.1 +wheel==0.38.4 # via pip-tools -whitenoise==5.3.0 +whitenoise==6.4.0 # via -r ./requirements-server.in -zope-interface==5.4.0 +zope-interface==5.5.2 # via twisted # The following packages are considered to be unsafe in a requirements file: diff --git a/scripts/update-packages.sh b/scripts/update-packages.sh index fcd33b819..53905650d 100755 --- a/scripts/update-packages.sh +++ b/scripts/update-packages.sh @@ -12,6 +12,7 @@ pkg_list=( joblib numpy oasislmf + pytest oauthlib ods-tools pandas From 51c09c5cce57821d29218f3ffba01881bb8f479c Mon Sep 17 00:00:00 2001 From: sambles Date: Wed, 15 Mar 2023 15:43:26 +0000 Subject: [PATCH 11/33] Move json settings schema to ods-tools (#765) * Blap settings schema * test ods-tools settings schema * Fix settings validation * Add CI option to git install Oasislmf/ods-tools * Fix indents * Try package git install in first stage of image builds * Take piwind workflow from default branch * Unit test fix * Fix unit tests * pep * flake * Force retest * Add ods_tools build option to Platform testing * Fix * Switch from tox to pytest * Fix * Fix wkflow input * Force retest * Fix the unittest * Force retest This reverts commit 9e01294d0b2e93a9487b1c2eb385da1b2c2dd24b. --- .github/workflows/build-images.yml | 19 + .github/workflows/build-schema.yml | 31 +- .github/workflows/test-images.yml | 18 +- .github/workflows/test-python.yml | 40 +- Dockerfile.api_server | 11 +- Dockerfile.model_worker | 17 + Dockerfile.model_worker_debian | 16 + .../analyses/tests/test_analysis_api.py | 98 +- .../portfolios/tests/test_portfolio.py | 7 +- .../oasisapi/schemas/analysis_settings.json | 861 ------------ .../oasisapi/schemas/model_settings.json | 1236 ----------------- src/server/oasisapi/schemas/serializers.py | 69 +- 12 files changed, 201 insertions(+), 2222 deletions(-) delete mode 100644 src/server/oasisapi/schemas/analysis_settings.json delete mode 100644 src/server/oasisapi/schemas/model_settings.json diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 6c1dac5a0..546432ffb 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -17,6 +17,14 @@ on: description: 'Severities of vulnerabilities to scanned for [LOW, MEDIUM, HIGH, CRITICAL, SKIP]' required: false default: 'CRITICAL,HIGH' + oasislmf_branch: + description: 'If set, pip install oasislmf branch [git ref]' + required: false + type: string + ods_branch: + description: 'If set, pip install ods-tools branch [git ref]' + required: false + type: string workflow_call: inputs: @@ -35,6 +43,14 @@ on: required: false default: 'CRITICAL,HIGH' type: string + oasislmf_branch: + description: 'If set, pip install oasislmf branch [git ref]' + required: false + type: string + ods_branch: + description: 'If set, pip install ods-tools branch [git ref]' + required: false + type: string outputs: server_image: @@ -118,6 +134,9 @@ jobs: context: . push: ${{ env.DOCKER_PUSH }} tags: ${{ env.IMAGE_REPO }}:${{ matrix.tag }} + build-args: | + oasislmf_branch=${{ inputs.oasislmf_branch }} + ods_tools_branch=${{ inputs.ods_branch }} - name: Test image efficiency if: env.SEVERITY != 'SKIP' diff --git a/.github/workflows/build-schema.yml b/.github/workflows/build-schema.yml index 19bbd030d..30cd026e1 100644 --- a/.github/workflows/build-schema.yml +++ b/.github/workflows/build-schema.yml @@ -12,11 +12,27 @@ on: - develop - backports** workflow_dispatch: + inputs: + ods_branch: + description: 'If set, pip install ods-tools branch [git ref]' + required: false workflow_call: - + inputs: + ods_branch: + description: 'If set, pip install ods-tools branch [git ref]' + required: false + type: string jobs: + ods_tools: + #if: inputs.ods_branch != '' + uses: OasisLMF/ODS_Tools/.github/workflows/build.yml@develop + secrets: inherit + with: + ods_branch: ${{ github.event_name != 'workflow_dispatch' && 'develop' || inputs.ods_branch }} + schema: + needs: ods_tools env: SCHEMA: 'reports/openapi-schema.json' runs-on: ubuntu-22.04 @@ -31,6 +47,19 @@ jobs: - name: Install requirments run: pip install -r requirements-server.txt + - name: Download package + if: needs.ods_tools.outputs.whl_filename != '' + uses: actions/download-artifact@v3 + with: + name: bin_package + path: ${{ github.workspace }}/ + + - name: Install package + if: needs.ods_tools.outputs.whl_filename != '' + run: | + pip uninstall ods-tools -y + pip install ${{ needs.ods_tools.outputs.whl_filename }} + - name: Generate OpenAPI run: | test -d $(dirname ${{ env.SCHEMA }}) || mkdir -p $(dirname ${{ env.SCHEMA }}) diff --git a/.github/workflows/test-images.yml b/.github/workflows/test-images.yml index 6837e405b..d59e0150f 100644 --- a/.github/workflows/test-images.yml +++ b/.github/workflows/test-images.yml @@ -18,19 +18,22 @@ on: last_release: description: "Test backwards compatibility with platform ver [semvar]" required: false - piwind_branch: description: "Check Results from Piwind branch [git ref]" required: true default: 'develop' - pytest_options: description: "Pytest optional args [-k ]" required: false - cve_severity: description: 'Severities of vulnerabilities to scanned for [LOW, MEDIUM, HIGH, CRITICAL, SKIP]' required: false + oasislmf_branch: + description: 'If set, pip install oasislmf branch [git ref]' + required: false + ods_branch: + description: 'If set, pip install ods-tools branch [git ref]' + required: false env: pre_release: 'true' # look for pre-release when testing last released platform version @@ -43,7 +46,8 @@ jobs: docker_push: true ignore_unfixed: true cve_severity: ${{ github.event_name != 'workflow_dispatch' && 'CRITICAL,HIGH' || inputs.cve_severity }} - + oasislmf_branch: ${{ github.event_name != 'workflow_dispatch' && 'develop' || inputs.oasislmf_branch }} + ods_branch: ${{ github.event_name != 'workflow_dispatch' && 'develop' || inputs.ods_branch }} setup: runs-on: ubuntu-latest @@ -134,7 +138,7 @@ jobs: name: Worker Debian secrets: inherit needs: [setup] - uses: OasisLMF/OasisPiWind/.github/workflows/integration.yml@master + uses: OasisLMF/OasisPiWind/.github/workflows/integration.yml@develop with: piwind_branch: ${{ needs.setup.outputs.piwind_branch }} server_image: ${{ needs.setup.outputs.build_server_img }} @@ -149,7 +153,7 @@ jobs: name: Worker Compatibility (${{ needs.setup.outputs.release_tag }}) secrets: inherit needs: [setup] - uses: OasisLMF/OasisPiWind/.github/workflows/integration.yml@master + uses: OasisLMF/OasisPiWind/.github/workflows/integration.yml@develop with: piwind_branch: ${{ needs.setup.outputs.piwind_branch }} server_image: 'coreoasis/api_server' @@ -164,7 +168,7 @@ jobs: name: Storage Compatibility (S3) secrets: inherit needs: [setup] - uses: OasisLMF/OasisPiWind/.github/workflows/integration.yml@master + uses: OasisLMF/OasisPiWind/.github/workflows/integration.yml@develop with: piwind_branch: ${{ needs.setup.outputs.piwind_branch }} server_image: ${{ needs.setup.outputs.build_server_img }} diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index d28fafef6..b318e914e 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -12,9 +12,21 @@ on: - develop - backports** workflow_dispatch: + inputs: + ods_branch: + description: 'If set, pip install ods-tools branch [git ref]' + required: false jobs: + ods_tools: + #if: inputs.ods_branch != '' + uses: OasisLMF/ODS_Tools/.github/workflows/build.yml@develop + secrets: inherit + with: + ods_branch: ${{ github.event_name != 'workflow_dispatch' && 'develop' || inputs.ods_branch }} + unittest: + needs: ods_tools env: JUNIT_REPORT: pytest_report.xml runs-on: ubuntu-22.04 @@ -25,18 +37,28 @@ jobs: uses: actions/setup-python@v4 with: python-version: '3.10' - - run: pip install tox + - run: pip install -r requirements.txt - # mimic python version from dockerfile - #- name: Set up Python ${{ matrix.cfg.python-version }} - # run: | - # sudo apt-get update && sudo apt-get upgrade -y - # sudo apt-get install -y --no-install-recommends python3 python3-pip + - name: Download package + if: needs.ods_tools.outputs.whl_filename != '' + uses: actions/download-artifact@v3 + with: + name: bin_package + path: ${{ github.workspace }}/ - - name: Run Tox + - name: Install package + if: needs.ods_tools.outputs.whl_filename != '' run: | - pip install tox - tox -e py + pip uninstall ods-tools -y + pip install ${{ needs.ods_tools.outputs.whl_filename }} + + - name: Run Pytest + run: pytest --cov-config=tox.ini --junitxml=${{ github.workspace }}/pytest_report.xml --cov=src --cov-report=xml --cov-report=term + + # - name: Run Tox + # run: | + # pip install tox + # tox -e py - name: Generate Report uses: dorny/test-reporter@v1 diff --git a/Dockerfile.api_server b/Dockerfile.api_server index c9dfafa6d..7e4d93037 100755 --- a/Dockerfile.api_server +++ b/Dockerfile.api_server @@ -5,6 +5,15 @@ ENV DEBIAN_FRONTEND noninteractive COPY ./requirements-server.txt ./requirements-server.txt RUN apt-get update && apt-get install -y --no-install-recommends gcc build-essential python3 python3-pip python3-dev libmariadbclient-dev-compat && rm -rf /var/lib/apt/lists/* RUN pip install --user --no-warn-script-location -r ./requirements-server.txt && pip install --no-warn-script-location --user mysqlclient + +# Install ODS-Tools from git branch (Optional) 'docker build --build-arg ods_tools_branch=develop' +ARG ods_tools_branch +RUN if [ ! -z "$ods_tools_branch" ] ; then \ + apt update && apt install -y git; \ + pip uninstall ods-tools -y; \ + pip install --user --no-warn-script-location -v git+https://git@github.com/OasisLMF/ODS_Tools.git@${ods_tools_branch}#egg=ods-tools; \ + fi + USER server # ---- STAGE 2 ---- @@ -18,8 +27,6 @@ RUN adduser --home /home/server --shell /bin/bash --disabled-password --gecos "" COPY --chown=server:server --from=build-packages /root/.local /home/server/.local RUN mkdir -p /var/log/oasis /shared-fs && chmod 777 -R /var/log/oasis -RUN echo 'server ALL=(ALL) NOPASSWD:/usr/local/bin/update_hosts' >> /etc/sudoers - WORKDIR /var/www/oasis COPY ./src/startup_server.sh /usr/local/bin/startup COPY ./src/utils/wait-for-it.sh /usr/local/bin/wait-for-it diff --git a/Dockerfile.model_worker b/Dockerfile.model_worker index b8d8fdaa8..ede5b191a 100755 --- a/Dockerfile.model_worker +++ b/Dockerfile.model_worker @@ -6,6 +6,23 @@ ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y libspatialindex-dev git curl g++ build-essential libtool autoconf automake python3-dev python3 python3-pip pkg-config COPY ./requirements-worker.txt ./requirements-worker.txt RUN pip3 install --user --no-warn-script-location -r ./requirements-worker.txt + +# Install MDK from git branch (Optional) 'docker build --build-arg oasislmf_branch=develop' +ARG oasislmf_branch +RUN if [ ! -z "$oasislmf_branch" ] ; then \ + apt update && apt install -y git; \ + pip uninstall oasislmf -y; \ + pip install --user --no-warn-script-location -v git+https://git@github.com/OasisLMF/OasisLMF.git@${oasislmf_branch}#egg=oasislmf[extra]; \ + fi + +# Install ODS-Tools from git branch (Optional) 'docker build --build-arg ods_tools_branch=develop' +ARG ods_tools_branch +RUN if [ ! -z "$ods_tools_branch" ] ; then \ + apt update && apt install -y git; \ + pip uninstall ods-tools -y; \ + pip install --user --no-warn-script-location -v git+https://git@github.com/OasisLMF/ODS_Tools.git@${ods_tools_branch}#egg=ods-tools; \ + fi + USER worker # ---- STAGE 2 ---- diff --git a/Dockerfile.model_worker_debian b/Dockerfile.model_worker_debian index f43de388b..7a2e09303 100644 --- a/Dockerfile.model_worker_debian +++ b/Dockerfile.model_worker_debian @@ -8,6 +8,22 @@ WORKDIR /home/worker COPY ./requirements-worker.txt ./requirements.txt RUN pip install -r ./requirements.txt +# Install MDK from git branch (Optional) 'docker build --build-arg oasislmf_branch=develop' +ARG oasislmf_branch +RUN if [ ! -z "$oasislmf_branch" ] ; then \ + apt update && apt install -y git; \ + pip uninstall oasislmf -y; \ + pip install -v git+https://git@github.com/OasisLMF/OasisLMF.git@${oasislmf_branch}#egg=oasislmf[extra]; \ + fi + +# Install ODS-Tools from git branch (Optional) 'docker build --build-arg ods_tools_branch=develop' +ARG ods_tools_branch +RUN if [ ! -z "$ods_tools_branch" ] ; then \ + apt update && apt install -y git; \ + pip uninstall ods-tools -y; \ + pip install -v git+https://git@github.com/OasisLMF/ODS_Tools.git@${ods_tools_branch}#egg=ods-tools; \ + fi + # Copy startup script + server config COPY ./src/startup_worker.sh ./startup.sh COPY ./src/startup_tester.sh ./runtest diff --git a/src/server/oasisapi/analyses/tests/test_analysis_api.py b/src/server/oasisapi/analyses/tests/test_analysis_api.py index d84a3e1fa..cb3123f07 100644 --- a/src/server/oasisapi/analyses/tests/test_analysis_api.py +++ b/src/server/oasisapi/analyses/tests/test_analysis_api.py @@ -728,29 +728,28 @@ def test_settings_json_is_not_valid___response_is_400(self): user = fake_user() analysis = fake_analysis() json_data = { - "analysis_settings": { - "analysis_tag": "test_analysis", - "model_supplier_id": "OasisIM", - "model_name_id": "1", - "number_of_samples": -1, - "gul_threshold": 0, - "model_settings": { - "use_random_number_file": True, - "event_occurrence_file_id": "1" - }, - "gul_output": True, - "gul_summaries": [ - { - "id": 1, - "summarycalc": True, - "eltcalc": True, - "aalcalc": "Not-A-Boolean", - "pltcalc": True, - "lec_output": False - } - ], - "il_output": False - } + "version": "3", + "analysis_tag": "test_analysis", + "model_supplier_id": "OasisIM", + "model_name_id": "1", + "number_of_samples": -1, + "gul_threshold": 0, + "model_settings": { + "use_random_number_file": True, + "event_occurrence_file_id": "1" + }, + "gul_output": True, + "gul_summaries": [ + { + "id": 1, + "summarycalc": True, + "eltcalc": True, + "aalcalc": "Not-A-Boolean", + "pltcalc": True, + "lec_output": False + } + ], + "il_output": False } response = self.app.post( @@ -776,32 +775,31 @@ def test_settings_json_is_uploaded___can_be_retrieved(self): user = fake_user() analysis = fake_analysis() json_data = { - "analysis_settings": { - "source_tag": "test_source", - "analysis_tag": "test_analysis", - "model_supplier_id": "OasisIM", - "model_name_id": "1", - "number_of_samples": 10, - "gul_threshold": 0, - "model_settings": { - "use_random_number_file": True, - "event_occurrence_file_id": "1" - }, - "gul_output": True, - "gul_summaries": [ - { - "id": 1, - "summarycalc": True, - "eltcalc": True, - "aalcalc": True, - "pltcalc": True, - "lec_output": False - } - ], - "il_output": False, - 'model_version_id': '1', - 'module_supplier_id': 'OasisIM' - } + "version": "3", + "source_tag": "test_source", + "analysis_tag": "test_analysis", + "model_supplier_id": "OasisIM", + "model_name_id": "1", + "number_of_samples": 10, + "gul_threshold": 0, + "model_settings": { + "use_random_number_file": True, + "event_occurrence_file_id": "1" + }, + "gul_output": True, + "gul_summaries": [ + { + "id": 1, + "summarycalc": True, + "eltcalc": True, + "aalcalc": True, + "pltcalc": True, + "lec_output": False + } + ], + "il_output": False, + 'model_version_id': '1', + 'model_supplier_id': 'OasisIM' } self.app.post( @@ -819,7 +817,7 @@ def test_settings_json_is_uploaded___can_be_retrieved(self): 'Authorization': 'Bearer {}'.format(AccessToken.for_user(user)) }, ) - self.assertEqual(json.loads(response.body), json_data['analysis_settings']) + self.assertEqual(json.loads(response.body), json_data) self.assertEqual(response.content_type, 'application/json') diff --git a/src/server/oasisapi/portfolios/tests/test_portfolio.py b/src/server/oasisapi/portfolios/tests/test_portfolio.py index 26827cbc0..e0c27d346 100644 --- a/src/server/oasisapi/portfolios/tests/test_portfolio.py +++ b/src/server/oasisapi/portfolios/tests/test_portfolio.py @@ -896,8 +896,8 @@ def test_reinsurance_info_file_is_uploaded_as_parquet___file_can_be_retrieved(se """ SCOPE_DATA_VALID = """ReinsNumber,PortNumber,AccNumber,PolNumber,LocGroup,LocNumber,CedantName,ProducerName,LOB,CountryCode,ReinsTag,CededPercent,OEDVersion -1,1,A11111,,,10002082047,,,,,,0.1,2.0.0 -1,1,A11111,,,10002082048,,,,,,0.2,2.0.0 +1,1,A11111,,,10002082047,,,,GB,,0.1,2.0.0 +1,1,A11111,,,10002082048,,,,GB,,0.2,2.0.0 """ LOCATION_DATA_INVALID = """Port,AccNumber,LocNumb,IsTenant,BuildingID,CountryCode,Latitude,Longitude,Street,PostalCode,OccupancyCode,ConstructionCode,LocPerilsCovered,BuildingTIV,OtherTIV,ContentsTIV,BITIV,LocCurrency,OEDVersion @@ -1091,7 +1091,8 @@ def test_location_file__is_invalid__response_is_400(self): ['location', "column 'Port' is not a valid oed field"], ['location', "column 'LocNumb' is not a valid oed field"], ['location', "column 'Street' is not a valid oed field"], - ['location', 'LocPerilsCovered has invalid perils.\n AccNumber LocPerilsCovered\n1 A11111 XXYA'] + ['location', 'LocPerilsCovered has invalid perils.\n AccNumber LocPerilsCovered\n1 A11111 XXYA'], + ['location', 'invalid ConstructionCode.\n AccNumber ConstructionCode\n3 A11111 -1'] ]) def test_account_file__is_invalid__response_is_400(self): diff --git a/src/server/oasisapi/schemas/analysis_settings.json b/src/server/oasisapi/schemas/analysis_settings.json deleted file mode 100644 index c939655bc..000000000 --- a/src/server/oasisapi/schemas/analysis_settings.json +++ /dev/null @@ -1,861 +0,0 @@ -{ - "type": "object", - "title": "Analysis settings.", - "description": "Specifies the model settings and outputs for an analysis.", - "properties": { - "source_tag": { - "type": "string", - "minLength": 1, - "title": "Source Tag", - "description": "Labels the origin of the analysis." - }, - "analysis_tag": { - "type": "string", - "minLength": 1, - "title": "Analysis Tag", - "description": "Labels the analysis with an identifier." - }, - "model_supplier_id": { - "type": "string", - "title": "Model supplier ID", - "description": "Identifier for the model vendor/module supplier." - }, - "model_name_id": { - "type": "string", - "title": "Model name ID", - "description": "Identifier for the model." - }, - "number_of_samples": { - "type": "integer", - "minimum": 0, - "title": "Number of samples.", - "description": "The number of samples generated per event.", - "default": 100 - }, - "gul_threshold": { - "type": "number", - "minimum": 0, - "title": "Ground-up loss threshold", - "description": "The threshold at which groun-up losses will be capped.", - "default": 0 - }, - "return_periods": { - "type": "array", - "minItems": 1, - "title": "User set return periods", - "description": "List of return periods as integers '[10, 100, 1000 .. etc]'", - "items": { - "type": "integer", - "minimum": 1 - } - }, - "event_ids": { - "type": "array", - "minItems": 1, - "items": { - "type": "integer", - "minimum": 1 - }, - "title": "User set event ids", - "description": "List of event ids as integers '[1, 5, 123 .. etc]'" - }, - "quantiles": { - "type": "array", - "minItems": 1, - "items": { - "type": "number", - "minimum": 0.0, - "maximum": 1.0 - }, - "title": "User set quantile points", - "description": "List of quantiles as float values '[0.0, 0.2, 0.4 .. etc]'" - }, - "model_settings": { - "type": "object", - "title": "Model settings", - "description": "Model specific settings.", - "properties": { - "use_random_number_file": { - "type": "boolean", - "title": "Use random number file", - "description": "If true use a pre-generated set of random number, if false generate random numbers dynamically." - }, - "event_set": { - "type": "string", - "title": "Event set file ID.", - "description": "Identifier for the event set file that is used for output calculations.", - "default": 1 - }, - "event_occurrence_id": { - "type": "string", - "title": "Event occurrence file ID.", - "description": "Identifier for the event occurrence file that is used for output calculations.", - "default": 1 - } - } - }, - "gul_output": { - "type": "boolean", - "title": "Produce GUL output", - "description": "If true generate ground-up loss outputs as per specified gul-summaries.", - "default": false - }, - "gul_summaries": { - "title": "Ground-up loss summary outputs", - "description": "Specified which outputs should be generated for which summary sets, for ground-up losses.", - "type": "array", - "uniqueItems": false, - "items": { - "type": "object", - "properties": { - "id": { - "type": "integer", - "multipleOf": 1, - "title": "Summary ID", - "description": "Identifier for the summary set.", - "minimum": 1, - "maximum": 9 - }, - "oed_fields": { - "type": "array", - "items": { - "type": "string" - }, - "title": "OED fields", - "description": "A list of OED fields used to group the generated losses." - }, - "summarycalc": { - "type": "boolean", - "title": "Summary calculation flag", - "description": "If true, output summary calculations by level for the summary set.", - "default": false - }, - "eltcalc": { - "type": "boolean", - "title": "ELT calculation flag", - "description": "If true, output the event loss table by level for the summary set.", - "default": false - }, - "aalcalc": { - "type": "boolean", - "title": "AAL calculation flag", - "description": "If true, output the average annual loss by level for the summary set.", - "default": false - }, - "pltcalc": { - "type": "boolean", - "title": "PLT calculation flag", - "description": "If true, output the period loss table by level for the summary set.", - "default": false - }, - "lec_output": { - "type": "boolean", - "title": "LEC calculation flag", - "description": "If true, output loss exceed curves by level for the summary set.", - "default": false - }, - "leccalc": { - "type": "object", - "title": "LEC calculation settings", - "description": "Specifies which loss exceedence curve types will be outputed for the summary level", - "properties": { - "return_period_file": { - "type": "boolean", - "title": "Return period file flag", - "description": "If true, a file listing the return periods will be provided as part of the analysis inputs. If false, a default set will be used.", - "default": false - }, - "full_uncertainty_aep": { - "type": "boolean", - "title": "Full uncertainty AEP flag", - "description": "If true and LEC output is true, output the full uncertainty aggregate loss exceedence curve by level for the summary set.", - "default": true - }, - "full_uncertainty_oep": { - "type": "boolean", - "title": "Full uncertainty OEP flag", - "description": "If true and LEC output is true, output the full uncertainty occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_aep": { - "type": "boolean", - "title": "Wheatsheaf AEP flag", - "description": "If true and LEC output is true, output the wheatsheaf aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_oep": { - "type": "boolean", - "title": "Wheatsheaf OEP flag", - "description": "If true and LEC output is true, output the wheatsheaf occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_mean_aep": { - "type": "boolean", - "title": "Wheatsheaf mean AEP flag", - "description": "If true and LEC output is true, output the wheatsheaf mean aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_mean_oep": { - "type": "boolean", - "title": "Wheatsheaf mean OEP schema.", - "description": "If true and LEC output is true, output the wheatsheaf occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "sample_mean_aep": { - "type": "boolean", - "title": "Sample mean AEP flag", - "description": "If true and LEC output is true, output the sample mean aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "sample_mean_oep": { - "type": "boolean", - "title": "Sample mean OEP schema.", - "description": "If true and LEC output is true, output the sample occurrence loss exceedence curve by level for the summary set.", - "default": false - } - } - }, - "ord_output": { - "type": "object", - "title": "ORD output settings", - "description": "", - "properties": { - "elt_sample": { - "type": "boolean", - "title": "SELT", - "description": "Sample Event Loss Table (ORD Output flag)", - "default": false - }, - "elt_quantile": { - "type": "boolean", - "title": "QELT", - "description": "Quantile Event Loss Table (ORD Output flag)", - "default": false - }, - "elt_moment": { - "type": "boolean", - "title": "MELT", - "description": "Moment Event Loss Table (ORD Output flag)", - "default": false - }, - "plt_sample": { - "type": "boolean", - "title": "SPLT", - "description": "Sample Period Loss Table (ORD Output flag)", - "default": false - }, - "plt_quantile": { - "type": "boolean", - "title": "QPLT", - "description": "Quantile Period Loss Table (ORD Output flag)", - "default": false - }, - "plt_moment": { - "type": "boolean", - "title": "MPLT", - "description": "Moment Period Loss Table (ORD Output flag)", - "default": false - }, - "alt_period": { - "type": "boolean", - "title": "PALT", - "description": "Period Average Loss Table (ORD Output flag)", - "default": false - }, - "alct_convergence": { - "type": "boolean", - "title": "ALCT", - "description": "Average Loss Convergence Table (ORD Output flag), requires alt_period set to true", - "default": false - }, - "alct_confidence": { - "type": "number", - "minimum": 0.0, - "maximum": 1.0, - "title": "Confidence level for ALCT", - "description": "Set confidence level for ALCT", - "default": 0.95 - }, - "ept_full_uncertainty_aep": { - "type": "boolean", - "title": "EPT - Full Uncertainty AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_full_uncertainty_oep": { - "type": "boolean", - "title": "EPT - Full Uncertainty OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_mean_sample_aep": { - "type": "boolean", - "title": "EPT - Mean Sample AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_mean_sample_oep": { - "type": "boolean", - "title": "EPT - Mean Sample OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_per_sample_mean_aep": { - "type": "boolean", - "title": "EPT - Per Sample Mean AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_per_sample_mean_oep": { - "type": "boolean", - "title": "EPT - Per Sample Mean OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "psept_aep": { - "type": "boolean", - "title": "PSEPT - Aggregate Loss", - "description": "Per Sample Exceedance Probability Table, (option) when enable this metric is added to the PSEPT file", - "default": false - }, - "psept_oep": { - "type": "boolean", - "title": "PSEPT - Occurrence Loss", - "description": "Per Sample Exceedance Probability Table (option), when enable this metric is added to the PSEPT file", - "default": false - }, - "return_period_file": { - "type": "boolean", - "title": "ORD Return period file flag", - "description": "If true, a file listing the return periods will be used for EPT and PSEPT files. If false, a default set will be used.", - "default": false - }, - "parquet_format": { - "type": "boolean", - "title": "Parquet output flag", - "description": "Write output files in parquet format rather than csv format.", - "default": false - } - } - } - }, - "required": [ - "id" - ] - } - }, - "il_output": { - "type": "boolean", - "title": "Produce il output", - "description": "If true generate insured loss outputs as per specified il-summaries.", - "default": false - }, - "il_summaries": { - "title": "Insured loss summary outputs", - "description": "Specified which outputs should be generated for which summary sets, for insured losses.", - "type": "array", - "uniqueItems": false, - "items": { - "type": "object", - "properties": { - "id": { - "type": "integer", - "multipleOf": 1, - "title": "Summary ID", - "description": "Identifier for the summary set.", - "minimum": 1, - "maximum": 9 - }, - "oed_fields": { - "type": "array", - "items": { - "type": "string" - }, - "title": "OED fields", - "description": "A list of OED fields used to group the generated losses." - }, - "summarycalc": { - "type": "boolean", - "title": "Summary calculation flag", - "description": "If true, output summary calculations by level for the summary set.", - "default": false - }, - "eltcalc": { - "type": "boolean", - "title": "ELT calculation flag", - "description": "If true, output the event loss table by level for the summary set.", - "default": false - }, - "aalcalc": { - "type": "boolean", - "title": "AAL calculation flag", - "description": "If true, output the average annual loss by level for the summary set.", - "default": false - }, - "pltcalc": { - "type": "boolean", - "title": "PLT calculation flag", - "description": "If true, output the period loss table by level for the summary set.", - "default": false - }, - "lec_output": { - "type": "boolean", - "title": "LEC calculation flag", - "description": "If true, output loss exceed curves by level for the summary set.", - "default": false - }, - "leccalc": { - "type": "object", - "title": "LEC calculation settings", - "description": "Specifies which loss exceedence curve types will be outputed for the summary level", - "properties": { - "return_period_file": { - "type": "boolean", - "title": "Return period file flag", - "description": "If true, a file listing the return periods will be provided as part of the analysis inputs. If false, a default set will be used.", - "default": false - }, - "full_uncertainty_aep": { - "type": "boolean", - "title": "Full uncertainty AEP flag", - "description": "If true and LEC output is true, output the full uncertainty aggregate loss exceedence curve by level for the summary set.", - "default": true - }, - "full_uncertainty_oep": { - "type": "boolean", - "title": "Full uncertainty OEP flag", - "description": "If true and LEC output is true, output the full uncertainty occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_aep": { - "type": "boolean", - "title": "Wheatsheaf AEP flag", - "description": "If true and LEC output is true, output the wheatsheaf aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_oep": { - "type": "boolean", - "title": "Wheatsheaf OEP flag", - "description": "If true and LEC output is true, output the wheatsheaf occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_mean_aep": { - "type": "boolean", - "title": "Wheatsheaf mean AEP flag", - "description": "If true and LEC output is true, output the wheatsheaf mean aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_mean_oep": { - "type": "boolean", - "title": "Wheatsheaf mean OEP schema.", - "description": "If true and LEC output is true, output the wheatsheaf occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "sample_mean_aep": { - "type": "boolean", - "title": "Sample mean AEP flag", - "description": "If true and LEC output is true, output the sample mean aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "sample_mean_oep": { - "type": "boolean", - "title": "Sample mean OEP schema.", - "description": "If true and LEC output is true, output the sample occurrence loss exceedence curve by level for the summary set.", - "default": false - } - } - }, - "ord_output": { - "type": "object", - "title": "ORD output settings", - "description": "", - "properties": { - "elt_sample": { - "type": "boolean", - "title": "SELT", - "description": "Sample Event Loss Table (ORD Output flag)", - "default": false - }, - "elt_quantile": { - "type": "boolean", - "title": "QELT", - "description": "Quantile Event Loss Table (ORD Output flag)", - "default": false - }, - "elt_moment": { - "type": "boolean", - "title": "MELT", - "description": "Moment Event Loss Table (ORD Output flag)", - "default": false - }, - "plt_sample": { - "type": "boolean", - "title": "SPLT", - "description": "Sample Period Loss Table (ORD Output flag)", - "default": false - }, - "plt_quantile": { - "type": "boolean", - "title": "QPLT", - "description": "Quantile Period Loss Table (ORD Output flag)", - "default": false - }, - "plt_moment": { - "type": "boolean", - "title": "MPLT", - "description": "Moment Period Loss Table (ORD Output flag)", - "default": false - }, - "alt_period": { - "type": "boolean", - "title": "PALT", - "description": "Period Average Loss Table (ORD Output flag)", - "default": false - }, - "alct_convergence": { - "type": "boolean", - "title": "ALCT", - "description": "Average Loss Convergence Table (ORD Output flag), requires alt_period set to true", - "default": false - }, - "alct_confidence": { - "type": "number", - "minimum": 0.0, - "maximum": 1.0, - "title": "Confidence level for ALCT", - "description": "Set confidence level for ALCT", - "default": 0.95 - }, - "ept_full_uncertainty_aep": { - "type": "boolean", - "title": "EPT - Full Uncertainty AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_full_uncertainty_oep": { - "type": "boolean", - "title": "EPT - Full Uncertainty OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_mean_sample_aep": { - "type": "boolean", - "title": "EPT - Mean Sample AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_mean_sample_oep": { - "type": "boolean", - "title": "EPT - Mean Sample OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_per_sample_mean_aep": { - "type": "boolean", - "title": "EPT - Per Sample Mean AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_per_sample_mean_oep": { - "type": "boolean", - "title": "EPT - Per Sample Mean OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "psept_aep": { - "type": "boolean", - "title": "PSEPT - Aggregate Loss", - "description": "Per Sample Exceedance Probability Table, (option) when enable this metric is added to the PSEPT file", - "default": false - }, - "psept_oep": { - "type": "boolean", - "title": "PSEPT - Occurrence Loss", - "description": "Per Sample Exceedance Probability Table (option), when enable this metric is added to the PSEPT file", - "default": false - }, - "return_period_file": { - "type": "boolean", - "title": "ORD Return period file flag", - "description": "If true, a file listing the return periods will be used for EPT and PSEPT files. If false, a default set will be used.", - "default": false - }, - "parquet_format": { - "type": "boolean", - "title": "Parquet output flag", - "description": "Write output files in parquet format rather than csv format.", - "default": false - } - } - } - }, - "required": [ - "id" - ] - } - }, - "ri_output": { - "type": "boolean", - "title": "Produce ri output", - "description": "If true generate reinsurance net loss outputs as per specified ri-summaries.", - "default": false - }, - "ri_summaries": { - "title": "Reinsurance net loss summary outputs", - "description": "Specified which outputs should be generated for which summary sets, for reinsurance net losses.", - "type": "array", - "uniqueItems": false, - "items": { - "type": "object", - "properties": { - "id": { - "type": "integer", - "multipleOf": 1, - "title": "Summary ID", - "description": "Identifier for the summary set.", - "minimum": 1, - "maximum": 9 - }, - "oed_fields": { - "type": "array", - "items": { - "type": "string" - }, - "title": "OED fields", - "description": "A list of OED fields used to group the generated losses." - }, - "summarycalc": { - "type": "boolean", - "title": "Summary calculation flag", - "description": "If true, output summary calculations by level for the summary set.", - "default": false - }, - "eltcalc": { - "type": "boolean", - "title": "ELT calculation flag", - "description": "If true, output the event loss table by level for the summary set.", - "default": false - }, - "aalcalc": { - "type": "boolean", - "title": "AAL calculation flag", - "description": "If true, output the average annual loss by level for the summary set.", - "default": false - }, - "pltcalc": { - "type": "boolean", - "title": "PLT calculation flag", - "description": "If true, output the period loss table by level for the summary set.", - "default": false - }, - "lec_output": { - "type": "boolean", - "title": "LEC calculation flag", - "description": "If true, output loss exceed curves by level for the summary set.", - "default": false - }, - "leccalc": { - "type": "object", - "title": "LEC calculation settings", - "description": "Specifies which loss exceedence curve types will be outputed for the summary level", - "properties": { - "return_period_file": { - "type": "boolean", - "title": "Return period file flag", - "description": "If true, a file listing the return periods will be provided as part of the analysis inputs. If false, a default set will be used.", - "default": false - }, - "full_uncertainty_aep": { - "type": "boolean", - "title": "Full uncertainty AEP flag", - "description": "If true and LEC output is true, output the full uncertainty aggregate loss exceedence curve by level for the summary set.", - "default": true - }, - "full_uncertainty_oep": { - "type": "boolean", - "title": "Full uncertainty OEP flag", - "description": "If true and LEC output is true, output the full uncertainty occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_aep": { - "type": "boolean", - "title": "Wheatsheaf AEP flag", - "description": "If true and LEC output is true, output the wheatsheaf aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_oep": { - "type": "boolean", - "title": "Wheatsheaf OEP flag", - "description": "If true and LEC output is true, output the wheatsheaf occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_mean_aep": { - "type": "boolean", - "title": "Wheatsheaf mean AEP flag", - "description": "If true and LEC output is true, output the wheatsheaf mean aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "wheatsheaf_mean_oep": { - "type": "boolean", - "title": "Wheatsheaf mean OEP schema.", - "description": "If true and LEC output is true, output the wheatsheaf occurrence loss exceedence curve by level for the summary set.", - "default": false - }, - "sample_mean_aep": { - "type": "boolean", - "title": "Sample mean AEP flag", - "description": "If true and LEC output is true, output the sample mean aggregate loss exceedence curve by level for the summary set.", - "default": false - }, - "sample_mean_oep": { - "type": "boolean", - "title": "Sample mean OEP schema.", - "description": "If true and LEC output is true, output the sample occurrence loss exceedence curve by level for the summary set.", - "default": false - } - } - }, - "ord_output": { - "type": "object", - "title": "ORD output settings", - "description": "", - "properties": { - "elt_sample": { - "type": "boolean", - "title": "SELT", - "description": "Sample Event Loss Table (ORD Output flag)", - "default": false - }, - "elt_quantile": { - "type": "boolean", - "title": "QELT", - "description": "Quantile Event Loss Table (ORD Output flag)", - "default": false - }, - "elt_moment": { - "type": "boolean", - "title": "MELT", - "description": "Moment Event Loss Table (ORD Output flag)", - "default": false - }, - "plt_sample": { - "type": "boolean", - "title": "SPLT", - "description": "Sample Period Loss Table (ORD Output flag)", - "default": false - }, - "plt_quantile": { - "type": "boolean", - "title": "QPLT", - "description": "Quantile Period Loss Table (ORD Output flag)", - "default": false - }, - "plt_moment": { - "type": "boolean", - "title": "MPLT", - "description": "Moment Period Loss Table (ORD Output flag)", - "default": false - }, - "alt_period": { - "type": "boolean", - "title": "PALT", - "description": "Period Average Loss Table (ORD Output flag)", - "default": false - }, - "alct_convergence": { - "type": "boolean", - "title": "ALCT", - "description": "Average Loss Convergence Table (ORD Output flag), requires alt_period set to true", - "default": false - }, - "alct_confidence": { - "type": "number", - "minimum": 0.0, - "maximum": 1.0, - "title": "Confidence level for ALCT", - "description": "Set confidence level for ALCT", - "default": 0.95 - }, - "ept_full_uncertainty_aep": { - "type": "boolean", - "title": "EPT - Full Uncertainty AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_full_uncertainty_oep": { - "type": "boolean", - "title": "EPT - Full Uncertainty OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_mean_sample_aep": { - "type": "boolean", - "title": "EPT - Mean Sample AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_mean_sample_oep": { - "type": "boolean", - "title": "EPT - Mean Sample OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_per_sample_mean_aep": { - "type": "boolean", - "title": "EPT - Per Sample Mean AEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "ept_per_sample_mean_oep": { - "type": "boolean", - "title": "EPT - Per Sample Mean OEP", - "description": "Exceedance Probability Table (option), when enable this metric is added to the EPT file", - "default": false - }, - "psept_aep": { - "type": "boolean", - "title": "PSEPT - Aggregate Loss", - "description": "Per Sample Exceedance Probability Table, (option) when enable this metric is added to the PSEPT file", - "default": false - }, - "psept_oep": { - "type": "boolean", - "title": "PSEPT - Occurrence Loss", - "description": "Per Sample Exceedance Probability Table (option), when enable this metric is added to the PSEPT file", - "default": false - }, - "return_period_file": { - "type": "boolean", - "title": "ORD Return period file flag", - "description": "If true, a file listing the return periods will be used for EPT and PSEPT files. If false, a default set will be used.", - "default": false - }, - "parquet_format": { - "type": "boolean", - "title": "Parquet output flag", - "description": "Write output files in parquet format rather than csv format.", - "default": false - } - } - } - }, - "required": [ - "id" - ] - } - }, - "full_correlation": { - "type": "boolean", - "title": "Produce fully correlated output", - "description": "If true generate losses for fully correlated output, i.e. no independence between groups, in addition to losses for default output.", - "default": false - } - }, - "required": [ - "model_supplier_id", - "model_name_id", - "model_settings", - "gul_output", - "gul_summaries" - ] -} diff --git a/src/server/oasisapi/schemas/model_settings.json b/src/server/oasisapi/schemas/model_settings.json deleted file mode 100644 index ec071b561..000000000 --- a/src/server/oasisapi/schemas/model_settings.json +++ /dev/null @@ -1,1236 +0,0 @@ -{ - "type":"object", - "title":"Model settings", - "description":"Specifies the model resource schema", - "additionalProperties":false, - "properties":{ - "name": { - "type":"string", - "title":"Model Name", - "description":"Name of the model associated with this settings file (optional)", - "minLength":1 - }, - "description": { - "type":"string", - "title":"Model Description", - "description":"Short Description of the catastrophe model (optional)", - "minLength":1 - }, - "model_supplier_id": { - "type":"string", - "title":"Model Supplier ID", - "description":"The supplier ID for the model. (optional)", - "minLength":1 - }, - "model_name_id": { - "type":"string", - "title":"Model Name ID", - "description":"The model ID for the model. (optional)", - "minLength":1 - }, - "model_version_id": { - "type":"string", - "title":"Model Version ID", - "description":"The version ID for the model. (optional)", - "minLength":1 - }, - "model_configurable": { - "type":"boolean", - "title":"Model configurable", - "description":"Marks this model as a 'complex model' with configuration options at the input file generation stage. (optional)" - }, - "model_default_samples":{ - "type":"integer", - "title":"Default number of samples", - "description":"The default number of samples generated per event. (optional)", - "minimum":0 - }, - "model_settings":{ - "type":"object", - "uniqueItems":false, - "title":"Model setting options", - "description":"Runtime settings available to a model", - "additionalProperties":false, - "properties":{ - "event_set":{ - "title":"Event set selector", - "description":"The 'id' field from options is used as a file suffix' events_.bin", - "type":"object", - "uniqueItems":false, - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection" - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"string", - "title":"Default Event set", - "description":"Initial setting for event set" - }, - "options":{ - "type":"array", - "title":"Selection options for events", - "description":"Array of possible event sets", - "items":{ - "type":"object", - "title":"Selection option element", - "description":"Event sets option", - "additionalProperties":false, - "properties":{ - "id":{ - "type":"string", - "title":"event set suffix", - "description":"String value used to select an event set", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Event set description", - "description":"UI description for selection", - "minLength":1 - }, - "number_of_events":{ - "type":"integer", - "title":"The number of events", - "description":"The total number of events in the set. (optional)", - "minimum":0 - }, - "valid_occurrence_ids":{ - "type":"array", - "title":"Supported occurrence files", - "description":"An optional list of viable occurrence file ids to use with this event set", - "items":{ - "type":"string", - "minLength":1 - } - }, - "valid_perspectives":{ - "type":"array", - "title":"Supported loss perspectives ", - "description":"If set, this event set only supports the given output perspectives. 'gul': ground up losses, 'il': insured losses, 'ri': reinsurance losses.", - "items":{ - "type":"string", - "enum":[ - "gul", - "il", - "ri" - ] - } - }, - "valid_metrics":{ - "type":"array", - "title":"Supported output metrics", - "description":"If set, this event set only supports the given output metrics, matches the output types set in the summaries section on an `analysis_settings.json` file. Example: 'valid_metrics':['ptl','elt'] means that only valid summary outputs are 'eltcalc' and 'pltcalc'.", - "items":{ - "type":"string", - "enum":[ - "aal", - "elt", - "plt", - "lec", - "aep", - "oep", - "summarycalc", - "full_uncertainty_aep", - "full_uncertainty_oep", - "sample_mean_aep", - "sample_mean_oep", - "wheatsheaf_aep", - "wheatsheaf_mean_aep", - "wheatsheaf_mean_oep", - "wheatsheaf_oep", - - "ord_output", - "elt_sample", - "elt_quantile", - "elt_moment", - "plt_sample", - "plt_quantile", - "plt_moment", - "alt_period", - "alct_convergence", - "ept", - "ept_full_uncertainty_aep", - "ept_full_uncertainty_oep", - "ept_mean_sample_aep", - "ept_mean_sample_oep", - "ept_per_sample_mean_aep", - "ept_per_sample_mean_oep", - "psept", - "psept_aep", - "psept_oep" - ] - } - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - } - }, - "required":[ - "id", - "desc" - ] - } - } - }, - "required":[ - "name", - "desc", - "default", - "options" - ] - }, - "event_occurrence_id":{ - "title":"Occurrence set selector", - "description":"The 'id' from options is used as a file suffix' occurrence_.bin", - "type":"object", - "uniqueItems":false, - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection" - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"string", - "title":"Default occurrence file", - "description":"Initial setting for occurrence" - }, - "options":{ - "type":"array", - "title":"Selection options for occurrence", - "description":"Array of possible occurrence sets", - "items":{ - "type":"object", - "title":"Selection option element", - "description":"Occurrence set options", - "additionalProperties":false, - "properties":{ - "id":{ - "type":"string", - "title":"occurrence set suffix", - "description":"String value used to select an occurrence set", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Occurrence set description", - "description":"UI description for selection", - "minLength":1 - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "max_periods":{ - "type":"integer", - "title":"Max periods", - "description":"Maximum periods for this occurrence set", - "minimum":1 - } - }, - "required":[ - "id", - "desc" - ] - } - } - }, - "required":[ - "name", - "desc", - "default", - "options" - ] - }, - "valid_output_perspectives":{ - "type":"array", - "title":"Globally supported loss perspectives", - "description":"If set, the model only supports the given output perspectives. This can be overridden per event set using the 'valid_perspectives' field. 'gul': ground up losses, 'il': insured losses, 'ri': reinsurance losses.", - "items":{ - "type":"string", - "enum":[ - "gul", - "il", - "ri" - ] - } - }, - "valid_output_metrics":{ - "type":"array", - "title":"Globally supported output metrics", - "description":"If set, the model only supports the given output metrics. This can be overridden per event set using the 'valid_metrics' field. Values must match the output options in the summaries section from the `analysis_settings.json` file. Example: 'valid_output_metrics':['ptl','elt'].", - "items":{ - "type":"string", - "enum":[ - "aal", - "elt", - "plt", - "lec", - "aep", - "oep", - "summarycalc", - "full_uncertainty_aep", - "full_uncertainty_oep", - "sample_mean_aep", - "sample_mean_oep", - "wheatsheaf_aep", - "wheatsheaf_mean_aep", - "wheatsheaf_mean_oep", - "wheatsheaf_oep", - "ord_output", - "elt_sample", - "elt_quantile", - "elt_moment", - "plt_sample", - "plt_quantile", - "plt_moment", - "alt_period", - "alct_convergence", - "ept", - "ept_full_uncertainty_aep", - "ept_full_uncertainty_oep", - "ept_mean_sample_aep", - "ept_mean_sample_oep", - "ept_per_sample_mean_aep", - "ept_per_sample_mean_oep", - "psept", - "psept_aep", - "psept_oep" - ] - } - }, - "string_parameters":{ - "title":"Single string paramters", - "type":"array", - "uniqueItems":true, - "items":{ - "type":"object", - "uniqueItems":false, - "title":"String options", - "description":"User selected string value", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"string", - "title":"Initial string", - "description":"Default 'string' for variable" - } - }, - "required":[ - "name", - "desc", - "default" - ] - } - }, - "list_parameters":{ - "title":"List of strings parameters", - "type":"array", - "uniqueItems":true, - "items":{ - "type":"object", - "uniqueItems":false, - "title":"List options", - "description":"User selected list values", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"array", - "title":"Default List value", - "description":"Default 'list' set for variable", - "items":{ - "type":"string" - } - } - }, - "required":[ - "name", - "desc", - "default" - ] - } - }, - "dictionary_parameters":{ - "title":"Generic dictionary parameters", - "type":"array", - "uniqueItems":true, - "items":{ - "type":"object", - "uniqueItems":false, - "title":"Dictionary option", - "description":"User selected dictionarys", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"object", - "title":"Default dictionary", - "description":"Defaults set for variable" - } - }, - "required":[ - "name", - "desc", - "default" - ] - } - }, - "boolean_parameters":{ - "title":"Boolean parameters", - "type":"array", - "uniqueItems":true, - "items":{ - "type":"object", - "uniqueItems":false, - "title":"Boolean option", - "description":"User selected boolean option", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"boolean", - "title":"Initial value", - "description":"Default 'value' set for variable" - } - }, - "required":[ - "name", - "desc", - "default" - ] - } - }, - "float_parameters":{ - "title":"Bounded float paramters", - "type":"array", - "uniqueItems":true, - "items":{ - "type":"object", - "uniqueItems":false, - "title":"Float option", - "description":"Select float value", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"number", - "title":"Initial value", - "description":"Default 'value' set for float variable" - }, - "max":{ - "type":"number", - "title":"Maximum value", - "description":"Maximum Value for float variable" - }, - "min":{ - "type":"number", - "title":"Minimum value", - "description":"Minimum Value for float variable" - }, - "stepsize":{ - "type":"number", - "title":"Interval step size", - "description":"The slider widget's step interval for adjusting the float parameter." - } - }, - "required":[ - "name", - "desc", - "default", - "max", - "min" - ] - } - }, - "numeric_parameters":{ - "title":"unbounded numeric paramters", - "type":"array", - "description":"WARNING: option flagged for removal, superseded by `integer_parameters` and `float_parameters`", - "uniqueItems":true, - "items":{ - "type":"object", - "uniqueItems":false, - "title":"Numeric option", - "description":"Select float value", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"number", - "title":"Initial value, integer or float", - "description":"Default integer or float 'value' set for variable" - } - }, - "required":[ - "name", - "desc", - "default" - ] - } - }, - "integer_parameters":{ - "title":"Integer paramters", - "type":"array", - "uniqueItems":true, - "items":{ - "type":"object", - "uniqueItems":false, - "title":"Integer option", - "description":"Select float value", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"integer", - "title":"Initial integer value", - "description":"Default integer 'value' set for variable" - } - }, - "required":[ - "name", - "desc", - "default" - ] - } - }, - "dropdown_parameters":{ - "title":"Generic dropdown paramters", - "type":"array", - "uniqueItems":true, - "items":{ - "title":"dropdown option selector", - "description":"The 'id' field is mapped into the analysis settings as 'paramter_name': ''", - "type":"object", - "uniqueItems":false, - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "default":{ - "type":"string", - "title":"Default Event set", - "description":"Initial setting for dropdown option" - }, - "options":{ - "type":"array", - "title":"Selection options", - "description":"Array of possible event sets", - "items":{ - "type":"object", - "title":"Option element", - "description":"Dropdown option", - "additionalProperties":false, - "properties":{ - "id":{ - "type":"string", - "title":"event set suffix", - "description":"String value used to select an event set", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - } - }, - "required":[ - "id", - "desc" - ] - } - } - }, - "required":[ - "name", - "desc", - "default", - "options" - ] - } - }, - "multi_parameter_options":{ - "title":"Multiple Parameter option", - "description":"Sets of parameters with pre-assigned values", - "type":"array", - "uniqueItems":true, - "items":{ - "type":"object", - "uniqueItems":false, - "title":"Parameter group option", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"UI Option", - "description":"UI name for selection", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short group description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "config":{ - "type":"object", - "title":"Parameter Group configuration", - "description":"JSON object holding : pairs" - } - }, - "required":[ - "name", - "desc", - "config" - ] - } - }, - "parameter_groups":{ - "title":"Parameter Groups", - "type":"array", - "uniqueItems":true, - "items":{ - "title":"Grouping element", - "description":"Defines which parameter are related", - "type":"object", - "uniqueItems":false, - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"Group name", - "description":"Reference for the group element", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection", - "minLength":1 - }, - "used_for":{ - "type":"string", - "title":"Where the setting is applied", - "description":"Set if this parameter is ONLY used at input 'generation' or for output 'losses'", - "enum":[ - "all", - "generation", - "losses" - ] - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "priority_id":{ - "type":"integer", - "title":"Display priority", - "description":"Set which parameter groups to display first" - }, - "presentation_order":{ - "type":"array", - "title":"presentation of grouped parameters", - "description":"List of parameters reference by their 'name' property", - "items":{ - "type":"string", - "minItems":1 - } - }, - "collapsible":{ - "title":"Collapsible option for UI", - "description":"Boolean to mark if this parameter group is collapsible", - "type":"boolean" - }, - "default_collapsed":{ - "title":"Default Collapsed State", - "description":"Boolean to mark if parameter group starts collapsed", - "type":"boolean" - } - }, - "required":[ - "name", - "desc", - "priority_id", - "presentation_order" - ] - } - } - } - }, - "lookup_settings":{ - "type":"object", - "title":"Model Lookup options", - "description":"Model lookup section", - "properties":{ - "supported_perils":{ - "type":"array", - "title":"Supported OED perils", - "description":"Valid Peril codes for this model", - "additionalProperties":false, - "minProperties":1, - "items":{ - "type":"object", - "title":"Selection Item", - "description":"", - "additionalProperties":false, - "properties":{ - "id":{ - "type":"string", - "title":"OED peril", - "description":"OED three letter peril code", - "minLength":3, - "maxLength":3 - }, - "desc":{ - "type":"string", - "title":"OED peril description", - "description":"Short string describing the peril", - "minLength":1 - }, - "peril_correlation_group": { - "type": "integer", - "title": "peril correlated group ID", - "description": "the peril correlated group ID", - "minLength": 1 - } - }, - "required":[ - "id", - "desc" - ] - } - } - } - }, - "correlation_settings": { - "type": "array", - "title": "Correlation Settings", - "description": "The Correlation Settings", - "items": { - "type": "object", - "properties": { - "peril_correlation_group": { - "type": "integer", - "title": "Peril Correlation Group", - "description": "The Peril Correlation Group", - "minLength": 1 - }, - "correlation_value": { - "type": "string", - "title": "Correlation Value", - "description": "The Correlation Value" - } - } - } - }, - "data_settings":{ - "type":"object", - "title":"Model data settings", - "description":"Additional data options for a model", - "additionalProperties":false, - "properties":{ - "supported_oed_versions": { - "title":"Supported OED Versions", - "description":"A list of OED releases that this model supports.", - "type":"array", - "items":{ - "type":"string", - "minLength":1 - } - }, - "uses_model_files": { - "type":"boolean", - "title":"Requires Model Data", - "description":"Marks the model as requiring external model data." - }, - "model_data_version": { - "type":"string", - "title":"Model data version", - "description":"Version ID of the Model data.", - "minLength":1 - }, - "keys_data_version": { - "type":"string", - "title":"Keys data version", - "description":"Version ID of the lookup keys data.", - "minLength":0 - }, - "worker_image": { - "type":"string", - "title":"Worker docker image", - "description":"The model worker's docker image name", - "minLength":1 - }, - "worker_version": { - "type":"string", - "title":"Worker image version", - "description":"The model worker's version ID", - "minLength":1 - }, - "docs_version": { - "type":"string", - "title":"Documents version", - "description":"Version ID of the model documents", - "minLength":1 - }, - "test_files_version": { - "type":"string", - "title":"Test files version", - "description":"Version ID of the test files", - "minLength":1 - }, - "countries":{ - "title":"Supported countries", - "description":"A list of country codes that this model covers.", - "type":"array", - "items":{ - "type":"string", - "minLength":1 - } - }, - "mandatory_fields":{ - "title":"Mandatory OED fields", - "description":"List of OED fields that the model requires.", - "type":"array", - "items":{ - "type":"string", - "minLength":1 - } - }, - "additional_assets":{ - "type":"array", - "title":"Additional Assets", - "description":"Data assets available to the model", - "additionalProperties":false, - "items":{ - "type":"object", - "title":"Assets Details", - "description":"Name, version and location of data", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"Asset Name", - "description":"Name of the Asset file", - "minLength":1 - }, - "version":{ - "type":"string", - "title":"Asset version", - "description":"Version of Asset", - "minLength":1 - }, - "path":{ - "type":"string", - "title":"Asset path", - "description":"Filesystem path to the asset", - "minLength":1 - }, - "deploy":{ - "type":"string", - "title":"Deployment target", - "description":"Target for deploying this additional asset", - "minLength":1 - } - }, - "required":[ - "name" - ] - } - }, - "group_fields":{ - "title":"Group OED fields", - "description":"List of OED fields from location file to form groups", - "type":"array", - "items":{ - "type":"string" - } - }, - "datafile_selectors":{ - "type":"array", - "title":"Data_files Selector", - "description":"Select file_ids from the API endpoint `/v1/data_files/` to connect with an analyses run", - "additionalProperties":false, - "items":{ - "type":"object", - "title":"File Selection Parameter", - "description":"", - "additionalProperties":false, - "properties":{ - "name":{ - "type":"string", - "title":"Name reference", - "description":"name used store the selection under", - "minLength":1 - }, - "desc":{ - "type":"string", - "title":"Short description", - "description":"UI description for selection" - }, - "tooltip":{ - "type":"string", - "title":"UI tooltip", - "description":"Long description (optional)" - }, - "model_id":{ - "type":"integer", - "title":"API Model_id reference", - "description":"call to `/v1/models/{id}/data_files/` to filter files by model association" - }, - "allow_multiple":{ - "type":"boolean", - "title":"Multiple Files option", - "description":"Allow the user to link multiple files to an analyses." - }, - "search_filters":{ - "type":"array", - "title":"File options filter", - "description":"", - "additionalProperties":false, - "items":{ - "type":"object", - "title":"File Filters values", - "description":"Build a query string from the following fields", - "additionalProperties":false, - "properties":{ - "user":{ - "type":"string", - "title":"User filter", - "description":"File was uploaded by 'user'", - "minLength":1 - }, - "filename":{ - "type":"string", - "title":"Filename filter", - "description":"Filename exactly matches 'value'", - "minLength":1 - }, - "filename__contains":{ - "type":"string", - "title":"Filename contains filter", - "description":"'value' is a substring of filename", - "minLength":1 - }, - "file_description":{ - "type":"string", - "title":"File description filter", - "description":"File description matches 'value' exactly", - "minLength":1 - }, - "file_description__contains":{ - "type":"string", - "title":"File description contains filter", - "description":"'value' is a substring of a File's description'", - "minLength":1 - }, - "content_type":{ - "type":"string", - "title":"content type filter", - "description":"file exactly matches a MIME type", - "minLength":1 - }, - "content_type__contains":{ - "type":"string", - "title":"Content type contains filter", - "description":"A files MIME type contains 'value'", - "minLength":1 - } - } - } - } - }, - "required":[ - "name", - "desc", - "allow_multiple" - ] - } - } - } - } - }, - "required":[ - "model_settings", - "lookup_settings" - ] -} diff --git a/src/server/oasisapi/schemas/serializers.py b/src/server/oasisapi/schemas/serializers.py index 3a6f1827f..b14bb4791 100644 --- a/src/server/oasisapi/schemas/serializers.py +++ b/src/server/oasisapi/schemas/serializers.py @@ -8,16 +8,17 @@ 'ModelParametersSerializer', ] -import io -import os import json from rest_framework import serializers -import jsonschema +# import jsonschema from jsonschema.exceptions import ValidationError as JSONSchemaValidationError from jsonschema.exceptions import SchemaError as JSONSchemaError +from ods_tools.oed.setting_schema import ModelSettingSchema, AnalysisSettingSchema +from ods_tools.oed.common import OdsException + class TokenObtainPairResponseSerializer(serializers.Serializer): refresh_token = serializers.CharField(read_only=True) @@ -127,14 +128,10 @@ def update_links(link_prefix, d): d[k] = "{}{}".format(link_prefix, link) -def load_json_schema(json_schema_file, link_prefix=None): +def load_json_schema(schema, link_prefix=None): """ Load json schema stored in the .schema dir """ - schema_dir = os.path.dirname(os.path.abspath(__file__)) - schema_fp = os.path.join(schema_dir, json_schema_file) - with io.open(schema_fp, 'r', encoding='utf-8') as f: - schema = json.load(f) if link_prefix: update_links(link_prefix, schema) return schema @@ -147,27 +144,10 @@ def to_internal_value(self, data): def validate_json(self, data): try: - validator = jsonschema.Draft4Validator(self.schema) - validation_errors = [e for e in validator.iter_errors(data)] - - # Iteratre over all errors and raise as single exception - if validation_errors: - exception_msgs = {} - for err in validation_errors: - if err.path: - field = '-'.join([str(e) for e in err.path]) - elif err.schema_path: - field = '-'.join([str(e) for e in err.schema_path]) - else: - field = 'error' - - if field in exception_msgs: - exception_msgs[field].append(err.message) - else: - exception_msgs[field] = [err.message] - raise serializers.ValidationError(exception_msgs) - - except (JSONSchemaValidationError, JSONSchemaError) as e: + vaild, errors = self.schemaClass.validate(data, raise_error=False) + if not vaild: + raise serializers.ValidationError(errors) + except (JSONSchemaValidationError, JSONSchemaError, OdsException) as e: raise serializers.ValidationError(e.message) return self.to_internal_value(json.dumps(data)) @@ -175,14 +155,14 @@ def validate_json(self, data): class ModelParametersSerializer(JsonSettingsSerializer): class Meta: swagger_schema_fields = load_json_schema( - json_schema_file='model_settings.json', + schema=ModelSettingSchema().schema, link_prefix='#/definitions/ModelSettings' ) def __init__(self, *args, **kwargs): super(ModelParametersSerializer, self).__init__(*args, **kwargs) - self.filenmame = 'model_settings.json' - self.schema = load_json_schema('model_settings.json') + self.filenmame = 'model_settings.json' # Store POSTED JSON using this fname + self.schemaClass = ModelSettingSchema() def validate(self, data): return super(ModelParametersSerializer, self).validate_json(data) @@ -191,32 +171,15 @@ def validate(self, data): class AnalysisSettingsSerializer(JsonSettingsSerializer): class Meta: swagger_schema_fields = load_json_schema( - json_schema_file='analysis_settings.json', + schema=AnalysisSettingSchema().schema, link_prefix='#/definitions/AnalysisSettings' ) def __init__(self, *args, **kwargs): super(AnalysisSettingsSerializer, self).__init__(*args, **kwargs) - self.filenmame = 'analysis_settings.json' - self.schema = load_json_schema('analysis_settings.json') + self.filenmame = 'analysis_settings.json' # Store POSTED JSON using this fname + self.schemaClass = AnalysisSettingSchema() def validate(self, data): - if 'analysis_settings' in data: - data = data['analysis_settings'] - - # Note: Workaround for to support workers 1.15.x and older. With the analysis settings schema change the workers with fail - # These are added into existing files as a 'fix' so older workers can run without patching the worker schema - # This *SHOULD* be removed at a later date once older models are not longer used - compatibility_field_map = { - "module_supplier_id": { - "updated_to": "model_supplier_id" - }, - "model_version_id": { - "updated_to": "model_name_id" - }, - } - for key in compatibility_field_map: - if key not in data: - data[key] = data[compatibility_field_map[key]['updated_to']] - + data = self.schemaClass.compatibility(data) return super(AnalysisSettingsSerializer, self).validate_json(data) From 9ac39ebabcce10ca851f04c80ed4f9bab66731da Mon Sep 17 00:00:00 2001 From: sambles Date: Wed, 15 Mar 2023 16:03:13 +0000 Subject: [PATCH 12/33] Add retry to model reg task (#767) --- src/conf/celeryconf.py | 6 ++++++ src/server/oasisapi/analyses/tasks.py | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/conf/celeryconf.py b/src/conf/celeryconf.py index 93c48c0bb..a8fbc1e53 100644 --- a/src/conf/celeryconf.py +++ b/src/conf/celeryconf.py @@ -52,3 +52,9 @@ #: Disable celery task prefetch #: https://docs.celeryproject.org/en/stable/userguide/configuration.html#std-setting-worker_prefetch_multiplier CELERYD_PREFETCH_MULTIPLIER = 1 + +worker_task_kwargs = { + 'autoretry_for': (Exception,), + 'max_retries': 2, # The task will be run max_retries + 1 times + 'default_retry_delay': 5, # A small delay to recover from temporary bad states +} diff --git a/src/server/oasisapi/analyses/tasks.py b/src/server/oasisapi/analyses/tasks.py index d65f619b5..264f34337 100644 --- a/src/server/oasisapi/analyses/tasks.py +++ b/src/server/oasisapi/analyses/tasks.py @@ -32,6 +32,7 @@ from src.server.oasisapi.files.upload import wait_for_blob_copy from ..celery import celery_app +from ....conf import celeryconf as celery_conf logger = get_task_logger(__name__) @@ -274,7 +275,7 @@ def log_worker_monitor(sender, **k): logger.info('AWS_IS_GZIPPED: {}'.format(settings.AWS_IS_GZIPPED)) -@celery_app.task(name='run_register_worker') +@celery_app.task(name='run_register_worker', **celery_conf.worker_task_kwargs) def run_register_worker(m_supplier, m_name, m_id, m_settings, m_version): logger.info('model_supplier: {}, model_name: {}, model_id: {}'.format(m_supplier, m_name, m_id)) try: From f92ab12223046b05848afae83803452d816954fe Mon Sep 17 00:00:00 2001 From: sambles Date: Wed, 15 Mar 2023 17:25:27 +0000 Subject: [PATCH 13/33] Add ods-tools into release notes (#772) * shoehorn ods-tools into release note build, could do with a refactor at sometime not now * pep * Update publish script and release notes * test platform release * Add missing ods-input * less verbose promts * Set version 1.27.2rc1 * Update changelog * Revert "test platform release" This reverts commit 5d250668c1965792bc02b4efd425439ed5c99397. * Revert "Set version 1.27.2rc1" This reverts commit 9595f109aaaae786e6bb5cdf5f3868e2348db795. * Revert "Update changelog" This reverts commit fe9647067d7f644e2e709b4c8e32dbe4f6996ffd. --------- Co-authored-by: awsbuild --- .github/workflows/publish.yml | 70 +++++++++++++++++++++++++++++++++-- .github/workflows/version.yml | 24 +++++++++--- scripts/update-changelog.py | 22 ++++++++++- 3 files changed, 105 insertions(+), 11 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 9380fccb8..b6500437d 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -29,6 +29,14 @@ on: description: '(OVERRIDE) The previous oasislmf version for changelog [semvar]' required: false + ods_tools_release: + description: '(OVERRIDE) The ods-tools version in this release [semvar]' + required: true + + ods_tools_release_prev: + description: '(OVERRIDE) The previous ods-tools version for changelog [semvar]' + required: false + ktools_release: description: '(OVERRIDE) The ktools version in this release [semvar] "v{n}.{n}.{n}"' required: false @@ -46,6 +54,7 @@ jobs: with: platform_version: ${{ inputs.release_tag }} oasislmf_version: ${{ inputs.oasislmf_release }} + ods_tools_version: ${{ inputs.ods_tools_release }} build_schema: uses: ./.github/workflows/build-schema.yml @@ -88,19 +97,25 @@ jobs: env: pre_release: ${{ inputs.pre_release }} release_tag: ${{ inputs.release_tag }} - oasislmf_release: ${{ inputs.oasislmf_release }} prev_release_tag: ${{ inputs.prev_release_tag }} + + oasislmf_release: ${{ inputs.oasislmf_release }} oasislmf_release_prev: ${{ inputs.oasislmf_release_prev }} + ods_tools_release: ${{ inputs.ods_tools_release }} + ods_tools_release_prev: ${{ inputs.ods_tools_release_prev }} + ktools_release: ${{ inputs.ktools_release }} ktools_release_prev: ${{ inputs.ktools_release_prev }} dir_platform: ${{ github.workspace }}/platform dir_oasislmf: ${{ github.workspace }}/oasislmf + dir_ods_tools: ${{ github.workspace }}/ods_tools dir_ktools: ${{ github.workspace }}/ktools branch_platform: ${{ github.ref_name }} branch_oasislmf: 'master' + branch_ods_tools: 'master' branch_ktools: 'master' steps: @@ -159,6 +174,14 @@ jobs: ref: ${{ env.branch_oasislmf }} fetch-depth: 0 + - name: Checkout ODS_Tools + uses: actions/checkout@v3 + with: + path: ${{ env.dir_ods_tools }} + repository: Oasislmf/ODS_Tools + ref: ${{ env.branch_ods_tools }} + fetch-depth: 0 + - name: Checkout Ktools uses: actions/checkout@v3 with: @@ -175,10 +198,10 @@ jobs: tag=$( ${{ env.dir_platform }}/scripts/find_release.sh -p "${{ env.pre_release }}" -t 1) echo "prev_release_tag=$tag" >> $GITHUB_ENV + - name: oasislmf tag (OVERRIDE) working-directory: ${{ env.dir_oasislmf }} - #if: ! inputs.oasislmf_release == '' - if: env.oasislmf_release != '' # TESTING + if: env.oasislmf_release != '' run: | git checkout ${{ env.branch_oasislmf }} git reset --hard $(git rev-list -n 1 ${{ env.oasislmf_release }} ) @@ -197,6 +220,29 @@ jobs: tag=$( ${{ env.dir_platform }}/scripts/find_release.sh -p "${{ env.pre_release }}" -t 2) echo "oasislmf_release_prev=$tag" >> $GITHUB_ENV + + - name: ods_tools tag (OVERRIDE) + working-directory: ${{ env.dir_ods_tools }} + if: env.ods_tools_release != '' + run: | + git checkout ${{ env.branch_ods_tools }} + git reset --hard $(git rev-list -n 1 ${{ env.ods_tools_release }} ) + + - name: Find 'ods_tools_release' + if: inputs.ods_tools_release == '' + working-directory: ${{ env.dir_ods_tools }} + run: | + tag=$( ${{ env.dir_platform }}/scripts/find_release.sh -p "${{ env.pre_release }}" -t 1) + echo "ods_tools_release=$tag" >> $GITHUB_ENV + + - name: Find 'ods_tools_release_prev' + if: inputs.ods_tools_release_prev == '' + working-directory: ${{ env.dir_ods_tools }} + run: | + tag=$( ${{ env.dir_platform }}/scripts/find_release.sh -p "${{ env.pre_release }}" -t 2) + echo "ods_tools_release_prev=$tag" >> $GITHUB_ENV + + - name: Ktools tag (OVERRIDE) working-directory: ${{ env.dir_ktools }} #if: ! inputs.ktools_release == '' @@ -219,6 +265,8 @@ jobs: tag=$( ${{ env.dir_platform }}/scripts/find_release.sh -p "${{ env.pre_release }}" -t 2 -v 'v') echo "ktools_release_prev=$tag" >> $GITHUB_ENV + + ## TAG & assests - name: Login to Docker Hub uses: docker/login-action@v2 @@ -281,10 +329,11 @@ jobs: echo "BUILD_VER: $BUILD_VER stored in image '${{ needs.build_piwind.outputs.piwind_image }}' dosn't match RELEASE_TAG: $RELEASE_VER" && exit $ERROR_CODE fi - - name: Get ktools & oasislmf versions + - name: Get component versions continue-on-error: true run: | docker run --entrypoint "oasislmf" ${{ needs.build_images.outputs.worker_image }} 'version' > OASISLMF_VERSION + docker run --entrypoint "python3" ${{ needs.build_images.outputs.worker_image }} '-cimport ods_tools; print(ods_tools.__version__)' > ODS_VERSION docker run --entrypoint "eve" ${{ needs.build_images.outputs.worker_image }} '-v' 2> KTOOLS_VERSION - name: 'store OASISLMF_VERSION' @@ -294,6 +343,13 @@ jobs: path: OASISLMF_VERSION retention-days: 5 + - name: 'store ODS_VERSION' + uses: actions/upload-artifact@v3 + with: + name: ODS_VERSION + path: ODS_VERSION + retention-days: 5 + - name: 'store KTOOLS_VERSION' uses: actions/upload-artifact@v3 with: @@ -304,6 +360,9 @@ jobs: - name: Check for Oasislmf version match run: test "$(cat OASISLMF_VERSION)" = ${{ env.oasislmf_release }} || exit 1 + - name: Check for ods-tools version match + run: test "$(cat ODS_VERSION)" = ${{ env.ods_tools_release }} || exit 1 + - name: Check for ktools version match run: | KTOOL_VER=$(head -1 KTOOLS_VERSION | grep -oP "(\d+)\.(\d+)\.(\d+)rc(\d+)|(\d+)\.(\d+)\.(\d+)") @@ -365,6 +424,9 @@ jobs: --lmf-repo-path ${{ env.dir_oasislmf }} \ --lmf-from-tag ${{ env.oasislmf_release_prev }} \ --lmf-to-tag ${{ env.oasislmf_release }} \ + --ods-repo-path ${{ env.dir_ods_tools }} \ + --ods-from-tag ${{ env.ods_tools_release_prev }} \ + --ods-to-tag ${{ env.ods_tools_release }} \ --ktools-repo-path ${{ env.dir_ktools }} \ --ktools-from-tag ${{ env.ktools_release_prev }} \ --ktools-to-tag ${{ env.ktools_release }} \ diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 48227af39..4d9c683e6 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -4,25 +4,28 @@ on: workflow_dispatch: inputs: platform_version: - description: 'Update version, semvar, input "{n}.{n}.{n}" or for pre-release "{n}.{n}.{n}rc{n}" [3.0.0, 3.0.0rc1] ' + description: 'Update platform version [semvar]' required: true - default: "" oasislmf_version: - description: 'Update the package version, semvar, input "{n}.{n}.{n}" or for pre-release "{n}.{n}.{n}rc{n}" [3.0.0, 3.0.0rc1] ' + description: 'Update oasislmf package [semvar]' + required: false + ods_tools_version: + description: 'Update the ods-tools package [semvar]' required: false - default: "" workflow_call: inputs: platform_version: description: 'Update version, semvar, input "{n}.{n}.{n}" or for pre-release "{n}.{n}.{n}rc{n}" [3.0.0, 3.0.0rc1] ' required: true - default: "" type: string oasislmf_version: description: 'Update the package version' required: false - default: "" + type: string + ods_tools_version: + description: 'Update the package version' + required: false type: string jobs: @@ -92,6 +95,15 @@ jobs: git add $fl.txt done + - name: Update ods-tools Version + if: inputs.ods_tools_version != '' + run: | + requ_list=( 'requirements-server' 'requirements-worker' 'requirements') + for fl in "${requ_list[@]}"; do + pip-compile --upgrade-package ods-tools==${{ inputs.ods_tools_version }} $fl.in + git add $fl.txt + done + - name: Git Commit run: | [[ -z $(git status -s) ]] || git commit -m "Set version ${{ inputs.platform_version }}" diff --git a/scripts/update-changelog.py b/scripts/update-changelog.py index f275b7747..9472e85e8 100755 --- a/scripts/update-changelog.py +++ b/scripts/update-changelog.py @@ -324,12 +324,13 @@ def create_changelog(self, github_data, format_markdown=False): changelog_lines = list(map(lambda l: l + "\n", changelog_lines)) return changelog_lines - def release_plat_header(self, tag_platform=None, tag_oasislmf=None, tag_oasisui=None, tag_ktools=None): + def release_plat_header(self, tag_platform=None, tag_oasislmf=None, tag_ods=None, tag_oasisui=None, tag_ktools=None): """ Create the header for the OasisPlatform release notes """ t_plat = tag_platform if tag_platform else self._get_tag('OasisPlatform') t_lmf = tag_oasislmf if tag_oasislmf else self._get_tag('OasisLMF') + t_ods = tag_ods if tag_ods else self._get_tag('ODS_Tools') t_ktools = tag_ktools if tag_ktools else self._get_tag('ktools') t_ui = tag_oasisui if tag_oasisui else self._get_tag('OasisUI') @@ -344,6 +345,7 @@ def release_plat_header(self, tag_platform=None, tag_oasislmf=None, tag_oasisui= plat_header.append(f'* [coreoasis/oasisui_proxy:{t_ui}](https://hub.docker.com/r/coreoasis/oasisui_proxy/tags?name={t_ui})\n') plat_header.append('## Components\n') plat_header.append(f'* [oasislmf {t_lmf}](https://github.com/OasisLMF/OasisLMF/releases/tag/{t_lmf})\n') + plat_header.append(f'* [ods-tools {t_ods}](https://github.com/OasisLMF/OasisLMF/releases/tag/{t_ods})\n') plat_header.append(f'* [ktools {t_ktools}](https://github.com/OasisLMF/ktools/releases/tag/{t_ktools})\n') plat_header.append(f'* [Oasis UI {t_ui}](https://github.com/OasisLMF/OasisUI/releases/tag/{t_ui})\n') plat_header.append('\n') @@ -504,6 +506,9 @@ def build_release(repo, from_tag, to_tag, github_token, output_path, local_repo_ @click.option('--lmf-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') @click.option('--lmf-from-tag', default=None, help='Github tag to track changes from') @click.option('--lmf-to-tag', default=None, help='Github tag to track changes to') +@click.option('--ods-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') +@click.option('--ods-from-tag', default=None, help='Github tag to track changes from') +@click.option('--ods-to-tag', default=None, help='Github tag to track changes to') @click.option('--ktools-repo-path', type=click.Path(exists=False), default=None, help=' Path to local git repository, used to skip clone step (optional) ') @click.option('--ktools-from-tag', default=None, help='Github tag to track changes from') @click.option('--ktools-to-tag', default=None, help='Github tag to track changes to') @@ -512,12 +517,19 @@ def build_release(repo, from_tag, to_tag, github_token, output_path, local_repo_ def build_release_platform(platform_repo_path, platform_from_tag, platform_to_tag, + lmf_repo_path, lmf_from_tag, lmf_to_tag, + + ods_repo_path, + ods_from_tag, + ods_to_tag, + ktools_repo_path, ktools_from_tag, ktools_to_tag, + github_token, output_path): """ @@ -525,17 +537,22 @@ def build_release_platform(platform_repo_path, """ logger = logging.getLogger() noteBuilder = ReleaseNotesBuilder(github_token=github_token) + plat_from = platform_from_tag if platform_from_tag else noteBuilder._get_tag(repo_name='OasisPlatform', idx=1) plat_to = platform_to_tag if platform_to_tag else noteBuilder._get_tag(repo_name='OasisPlatform', idx=0) lmf_from = lmf_from_tag if lmf_from_tag else noteBuilder._get_tag(repo_name='OasisLMF', idx=1) lmf_to = lmf_to_tag if lmf_to_tag else noteBuilder._get_tag(repo_name='OasisLMF', idx=0) + ods_from = ods_from_tag if ods_from_tag else noteBuilder._get_tag(repo_name='ODS_Tools', idx=1) + ods_to = ods_to_tag if ods_to_tag else noteBuilder._get_tag(repo_name='ODS_Tools', idx=0) ktools_from = ktools_from_tag if ktools_from_tag else noteBuilder._get_tag(repo_name='ktools', idx=1) ktools_to = ktools_to_tag if ktools_to_tag else noteBuilder._get_tag(repo_name='ktools', idx=0) + ui_to = noteBuilder._get_tag(repo_name='OasisUI', idx=0) # Load github data plat_data = noteBuilder.load_data(repo_name='OasisPlatform', local_path=platform_repo_path, tag_from=plat_from, tag_to=plat_to) lmf_data = noteBuilder.load_data(repo_name='OasisLMF', local_path=lmf_repo_path, tag_from=lmf_from, tag_to=lmf_to) + ods_data = noteBuilder.load_data(repo_name='ODS_Tools', local_path=ods_repo_path, tag_from=ods_from, tag_to=ods_to) ktools_data = noteBuilder.load_data(repo_name='ktools', local_path=ktools_repo_path, tag_from=ktools_from, tag_to=ktools_to) # Add title @@ -547,6 +564,7 @@ def build_release_platform(platform_repo_path, release_notes_data += noteBuilder.release_plat_header( tag_platform=plat_to, tag_oasislmf=lmf_to, + tag_ods=ods_to, tag_ktools=ktools_to, tag_oasisui=ui_to) @@ -554,12 +572,14 @@ def build_release_platform(platform_repo_path, release_notes_data += ["# Changelogs \n", "\n"] release_notes_data += noteBuilder.create_changelog(plat_data, format_markdown=True) release_notes_data += noteBuilder.create_changelog(lmf_data, format_markdown=True) + release_notes_data += noteBuilder.create_changelog(ods_data, format_markdown=True) release_notes_data += noteBuilder.create_changelog(ktools_data, format_markdown=True) # Extract Feature notes from PR's release_notes_data += ["# Release Notes"] release_notes_data += noteBuilder.create_release_notes(plat_data) release_notes_data += noteBuilder.create_release_notes(lmf_data) + release_notes_data += noteBuilder.create_release_notes(ods_data) release_notes_data += noteBuilder.create_release_notes(ktools_data) logger.info("RELEASE NOTES OUTPUT: \n" + "".join(release_notes_data)) From 45822145fecf64e0e29cff4270e6cd4b35d69fc6 Mon Sep 17 00:00:00 2001 From: Sam Gamble Date: Wed, 15 Mar 2023 18:04:59 +0000 Subject: [PATCH 14/33] Fix schema build --- .github/workflows/build-schema.yml | 3 ++- .github/workflows/publish.yml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-schema.yml b/.github/workflows/build-schema.yml index 30cd026e1..4dc59cbd5 100644 --- a/.github/workflows/build-schema.yml +++ b/.github/workflows/build-schema.yml @@ -25,13 +25,14 @@ on: jobs: ods_tools: - #if: inputs.ods_branch != '' + if: inputs.ods_branch != '' uses: OasisLMF/ODS_Tools/.github/workflows/build.yml@develop secrets: inherit with: ods_branch: ${{ github.event_name != 'workflow_dispatch' && 'develop' || inputs.ods_branch }} schema: + if: ${{ ! failure() || ! cancelled() }} needs: ods_tools env: SCHEMA: 'reports/openapi-schema.json' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index b6500437d..5b9262a27 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -163,7 +163,7 @@ jobs: with: path: ${{ env.dir_platform }} repository: Oasislmf/OasisPlatform - ref: ${{ env.branch_platform }} + ref: ${{ github.ref_name }} fetch-depth: 0 - name: Checkout Oasislmf From e9d3a0813b68dd88f2d6aa07b4b08d9b13d2fe4e Mon Sep 17 00:00:00 2001 From: awsbuild Date: Wed, 15 Mar 2023 18:40:23 +0000 Subject: [PATCH 15/33] Set version 1.27.2 --- VERSION | 2 +- requirements-server.txt | 12 +++++++++--- requirements-worker.txt | 9 ++++++--- requirements.txt | 9 ++++++--- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/VERSION b/VERSION index 5db08bf2d..457f03854 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.27.0 +1.27.2 diff --git a/requirements-server.txt b/requirements-server.txt index 131a44510..cbcad5243 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -145,8 +145,12 @@ jmespath==1.0.1 # botocore jsonpickle==3.0.1 # via -r requirements-server.in +jsonref==1.1.0 + # via ods-tools jsonschema==4.17.3 - # via -r requirements-server.in + # via + # -r requirements-server.in + # ods-tools kombu==5.2.4 # via celery markdown==3.4.1 @@ -157,10 +161,12 @@ numpy==1.24.2 # via # pandas # pyarrow -ods-tools==3.0.3 +ods-tools==3.0.4 # via -r requirements-server.in packaging==23.0 - # via drf-yasg + # via + # drf-yasg + # ods-tools pandas==1.5.3 # via # -r requirements-server.in diff --git a/requirements-worker.txt b/requirements-worker.txt index 7a27721b8..e75f9f3b5 100644 --- a/requirements-worker.txt +++ b/requirements-worker.txt @@ -112,8 +112,10 @@ jmespath==1.0.1 # botocore joblib==1.2.0 # via scikit-learn +jsonref==1.1.0 + # via ods-tools jsonschema==4.17.3 - # via oasislmf + # via ods-tools kombu==5.2.4 # via celery llvmlite==0.39.1 @@ -141,14 +143,15 @@ numpy==1.22.4 # scikit-learn # scipy # shapely -oasislmf[extra]==1.27.1 +oasislmf[extra]==1.27.2 # via -r requirements-worker.in -ods-tools==3.0.3 +ods-tools==3.0.4 # via oasislmf packaging==23.0 # via # fastparquet # geopandas + # ods-tools # pytest pandas==1.5.3 # via diff --git a/requirements.txt b/requirements.txt index ba562e2de..dc9059821 100644 --- a/requirements.txt +++ b/requirements.txt @@ -245,10 +245,12 @@ joblib==1.2.0 # via scikit-learn jsonpickle==3.0.1 # via -r ./requirements-server.in +jsonref==1.1.0 + # via ods-tools jsonschema==4.17.3 # via # -r ./requirements-server.in - # oasislmf + # ods-tools kombu==5.2.4 # via celery llvmlite==0.39.1 @@ -284,9 +286,9 @@ numpy==1.22.4 # scikit-learn # scipy # shapely -oasislmf[extra]==1.27.1 +oasislmf[extra]==1.27.2 # via -r ./requirements-worker.in -ods-tools==3.0.3 +ods-tools==3.0.4 # via # -r ./requirements-server.in # oasislmf @@ -296,6 +298,7 @@ packaging==23.0 # drf-yasg # fastparquet # geopandas + # ods-tools # pyproject-api # pytest # tox From 55bcd44eab66032038d23f757feeb2374fdb6b22 Mon Sep 17 00:00:00 2001 From: sambles Date: Thu, 16 Mar 2023 17:07:27 +0000 Subject: [PATCH 16/33] Search all repo for tags, but limit the scope by env (#775) * Search all repo for tags, but limit the scope by env * Draft * Update test-images * tidy * Raise S3 clinet errors when uploading model settings * Worker not triggering retry --- .github/workflows/test-images.yml | 12 +++++++++-- scripts/find_latest.sh | 29 +++++++++++++++++++++++++++ src/conf/celeryconf.py | 2 +- src/server/oasisapi/analyses/tasks.py | 4 ++++ 4 files changed, 44 insertions(+), 3 deletions(-) create mode 100755 scripts/find_latest.sh diff --git a/.github/workflows/test-images.yml b/.github/workflows/test-images.yml index d59e0150f..8b8755789 100644 --- a/.github/workflows/test-images.yml +++ b/.github/workflows/test-images.yml @@ -37,6 +37,8 @@ on: env: pre_release: 'true' # look for pre-release when testing last released platform version + semver_major: '1' # Search for published images but limited to {semvar_major}.x.x + semver_minor: '27' # Search for published images but limited to x.{semvar_minor}.x jobs: build_images: @@ -69,12 +71,18 @@ jobs: with: fetch-depth: 0 - - name: Load last released tag + - name: Load latest release tag id: released_images run: | - if [[ -z "${{ inputs.last_release }}" ]]; then + # Find the latest release tag only from current branch + if [[ "${{ github.ref_name }}" == backports/ ]]; then tag=$( ./scripts/find_release.sh -p "${{ env.pre_release }}") echo "prev_release_tag=$tag" >> $GITHUB_OUTPUT + # Find tags release accross all branches, limited to matching semver + elif [[ -z "${{ inputs.last_release }}" ]]; then + tag=$( ./scripts/find_latest.sh -j "${{ env.semver_major }}" -i "${{ env.semver_minor }}" ) + echo "prev_release_tag=$tag" >> $GITHUB_OUTPUT + # Don't search, use the given input else echo "prev_release_tag=${{ inputs.last_release }}" >> $GITHUB_OUTPUT fi diff --git a/scripts/find_latest.sh b/scripts/find_latest.sh new file mode 100755 index 000000000..3d2f88877 --- /dev/null +++ b/scripts/find_latest.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +tag_select='1' +major='\d+' +minor='\d+' + +print_usage() { + echo "find_latest.sh - Find release tags from repo" + echo " " + echo "find_latest.sh [options] application [arguments]" + echo " " + echo "options:" + echo " --help show brief help" + echo " -j= " + echo " -i= " + exit 0 +} + +while getopts 'j:i:' flag; do + case "${flag}" in + j) major=${OPTARG} ;; + i) minor=${OPTARG} ;; + *) print_usage + exit 1 ;; + esac +done + +release_tags=( $(git tag --sort=creatordate | grep -oP "^($major)\.($minor)\.(\d+)$") ) +echo "${release_tags[-$tag_select]}" diff --git a/src/conf/celeryconf.py b/src/conf/celeryconf.py index a8fbc1e53..4c1f410d5 100644 --- a/src/conf/celeryconf.py +++ b/src/conf/celeryconf.py @@ -56,5 +56,5 @@ worker_task_kwargs = { 'autoretry_for': (Exception,), 'max_retries': 2, # The task will be run max_retries + 1 times - 'default_retry_delay': 5, # A small delay to recover from temporary bad states + 'default_retry_delay': 6, # A small delay to recover from temporary bad states } diff --git a/src/server/oasisapi/analyses/tasks.py b/src/server/oasisapi/analyses/tasks.py index 264f34337..4accfdcba 100644 --- a/src/server/oasisapi/analyses/tasks.py +++ b/src/server/oasisapi/analyses/tasks.py @@ -313,6 +313,8 @@ def run_register_worker(m_supplier, m_name, m_id, m_settings, m_version): except Exception as e: logger.info('Failed to update model settings:') logger.exception(str(e)) + if isinstance(e, S3_ClientError): + raise e # Update model version info if m_version: @@ -330,6 +332,8 @@ def run_register_worker(m_supplier, m_name, m_id, m_settings, m_version): except Exception as e: logger.exception(str(e)) logger.exception(model) + if isinstance(e, S3_ClientError): + raise e @celery_app.task(name='set_task_status') From cb0751f3fcbf7e328f2baade4ebf7d45614de4f6 Mon Sep 17 00:00:00 2001 From: sambles Date: Thu, 6 Apr 2023 14:05:38 +0100 Subject: [PATCH 17/33] test-ods PR (#783) Remove related_file_to_df and replace with ods-tools More flake8 remove testing breakpoint Fix return parquet and validate portfolio fix return parquet as csv Fix validate portfolio, missing exposure Switch unittest to ods-tool PR Fix unit test comparison and wrap ods exposure load with exception handler pep Revert tests to develop branch --- src/server/oasisapi/files/models.py | 13 ---- src/server/oasisapi/files/serializers.py | 20 +++--- src/server/oasisapi/files/views.py | 18 +++-- src/server/oasisapi/portfolios/models.py | 10 +-- .../portfolios/tests/test_portfolio.py | 69 ++++++++++++++----- 5 files changed, 81 insertions(+), 49 deletions(-) diff --git a/src/server/oasisapi/files/models.py b/src/server/oasisapi/files/models.py index 4f536cfec..bbeed8ec8 100644 --- a/src/server/oasisapi/files/models.py +++ b/src/server/oasisapi/files/models.py @@ -1,8 +1,6 @@ import os -import io from uuid import uuid4 -import pandas as pd from django.conf import settings from django.db import models @@ -10,17 +8,6 @@ from model_utils.models import TimeStampedModel -def related_file_to_df(RelatedFile): - if not RelatedFile: - return None - - RelatedFile.file.seek(0) - if RelatedFile.content_type == 'application/octet-stream': - return pd.read_parquet(io.BytesIO(RelatedFile.read())) - else: - return pd.read_csv(io.BytesIO(RelatedFile.read())) - - def random_file_name(instance, filename): if instance.store_as_filename: return filename diff --git a/src/server/oasisapi/files/serializers.py b/src/server/oasisapi/files/serializers.py index 03f04db66..09c7670d4 100644 --- a/src/server/oasisapi/files/serializers.py +++ b/src/server/oasisapi/files/serializers.py @@ -1,14 +1,16 @@ import logging import hashlib import io + from ods_tools.oed.exposure import OedExposure +from ods_tools.oed.common import OdsException from rest_framework import serializers from rest_framework.exceptions import ValidationError from django.core.files.uploadedfile import UploadedFile from django.conf import settings as django_settings -from .models import RelatedFile, related_file_to_df +from .models import RelatedFile logger = logging.getLogger('root') @@ -58,16 +60,15 @@ def validate(self, attrs): # Create dataframe from file upload if run_validation or convert_to_parquet: try: - uploaded_data_df = related_file_to_df(attrs['file']) - except Exception as e: - raise ValidationError('Failed to read uploaded data [{}]'.format(e)) + uploaded_exposure = OedExposure(**{ + EXPOSURE_ARGS[self.oed_field]: attrs['file'], + 'validation_config': django_settings.PORTFOLIO_VALIDATION_CONFIG + }) + except OdsException as e: + raise ValidationError('Failed to read exposure data, file is corrupted or set with incorrect format', e) # Run OED Validation if run_validation: - uploaded_exposure = OedExposure(**{ - EXPOSURE_ARGS[self.oed_field]: uploaded_data_df, - 'validation_config': django_settings.PORTFOLIO_VALIDATION_CONFIG - }) oed_validation_errors = uploaded_exposure.check() if len(oed_validation_errors) > 0: raise ValidationError(detail=[(error['name'], error['msg']) for error in oed_validation_errors]) @@ -76,7 +77,8 @@ def validate(self, attrs): if convert_to_parquet: try: f = io.open(attrs['file'].name + '.parquet', 'wb+') - uploaded_data_df.to_parquet(f) + exposure_file = getattr(uploaded_exposure, EXPOSURE_ARGS[self.oed_field]) + exposure_file.dataframe.to_parquet(f) in_memory_file = UploadedFile( file=f, name=f.name, diff --git a/src/server/oasisapi/files/views.py b/src/server/oasisapi/files/views.py index b6fa727af..e3e5ce623 100644 --- a/src/server/oasisapi/files/views.py +++ b/src/server/oasisapi/files/views.py @@ -5,9 +5,9 @@ from django.http import StreamingHttpResponse, Http404, QueryDict from rest_framework.response import Response -from .serializers import RelatedFileSerializer +from .serializers import RelatedFileSerializer, EXPOSURE_ARGS -from .models import related_file_to_df +from ods_tools.oed.exposure import OedExposure def _delete_related_file(parent, field): @@ -39,9 +39,12 @@ def _handle_get_related_file(parent, field, file_format): # Parquet format requested and data stored as csv if file_format == 'parquet' and f.content_type == 'text/csv': + exposure = OedExposure(**{ + EXPOSURE_ARGS[field]: f.file, + }) output_buffer = io.BytesIO() - df = related_file_to_df(f) - df.to_parquet(output_buffer, index=False) + exposure_data = getattr(exposure, EXPOSURE_ARGS[field]) + exposure_data.dataframe.to_parquet(output_buffer, index=False) output_buffer.seek(0) response = StreamingHttpResponse(output_buffer, content_type='application/octet-stream') @@ -50,9 +53,12 @@ def _handle_get_related_file(parent, field, file_format): # CSV format requested and data stored as Parquet if file_format == 'csv' and f.content_type == 'application/octet-stream': + exposure = OedExposure(**{ + EXPOSURE_ARGS[field]: f.file, + }) output_buffer = io.BytesIO() - df = related_file_to_df(f) - df.to_csv(output_buffer, index=False) + exposure_data = getattr(exposure, EXPOSURE_ARGS[field]) + exposure_data.dataframe.to_csv(output_buffer, index=False) output_buffer.seek(0) response = StreamingHttpResponse(output_buffer, content_type='text/csv') diff --git a/src/server/oasisapi/portfolios/models.py b/src/server/oasisapi/portfolios/models.py index df701ac92..b1a55c6c2 100644 --- a/src/server/oasisapi/portfolios/models.py +++ b/src/server/oasisapi/portfolios/models.py @@ -10,7 +10,7 @@ from rest_framework.reverse import reverse from rest_framework.exceptions import ValidationError -from ..files.models import RelatedFile, related_file_to_df +from ..files.models import RelatedFile from ods_tools.oed.exposure import OedExposure @@ -67,10 +67,10 @@ def set_portolio_valid(self): def run_oed_validation(self): portfolio_exposure = OedExposure( - location=related_file_to_df(self.location_file), - account=related_file_to_df(self.accounts_file), - ri_info=related_file_to_df(self.reinsurance_info_file), - ri_scope=related_file_to_df(self.reinsurance_scope_file), + location=getattr(self.location_file, 'file', None), + account=getattr(self.accounts_file, 'file', None), + ri_info=getattr(self.reinsurance_info_file, 'file', None), + ri_scope=getattr(self.reinsurance_scope_file, 'file', None), validation_config=settings.PORTFOLIO_VALIDATION_CONFIG) validation_errors = portfolio_exposure.check() diff --git a/src/server/oasisapi/portfolios/tests/test_portfolio.py b/src/server/oasisapi/portfolios/tests/test_portfolio.py index e0c27d346..2acb69eff 100644 --- a/src/server/oasisapi/portfolios/tests/test_portfolio.py +++ b/src/server/oasisapi/portfolios/tests/test_portfolio.py @@ -13,6 +13,7 @@ from hypothesis.strategies import text, binary, sampled_from from mock import patch from rest_framework_simplejwt.tokens import AccessToken +from ods_tools.oed.exposure import OedExposure from ...files.tests.fakes import fake_related_file from ...analysis_models.tests.fakes import fake_analysis_model @@ -444,11 +445,20 @@ def test_accounts_file_is_uploaded_as_parquet___file_can_be_retrieved(self): }, ) - csv_return_data = pd.read_csv(io.StringIO(csv_response.text)) - pd.testing.assert_frame_equal(csv_return_data, test_data) + csv_obj = io.StringIO(csv_response.text) + prq_obj = io.BytesIO(parquet_response.content) + setattr(prq_obj, 'name', 'account.parquet') - prq_return_data = pd.read_parquet(io.BytesIO(parquet_response.content)) - pd.testing.assert_frame_equal(prq_return_data, test_data) + input_data = OedExposure(account=test_data) + return_csv = OedExposure(account=csv_obj) + return_prq = OedExposure(account=prq_obj) + + pd.testing.assert_frame_equal( + return_csv.account.dataframe, + input_data.account.dataframe) + pd.testing.assert_frame_equal( + return_prq.account.dataframe, + input_data.account.dataframe) class PortfolioLocationFile(WebTestMixin, TestCase): @@ -586,11 +596,20 @@ def test_location_file_is_uploaded_as_parquet___file_can_be_retrieved(self): }, ) - csv_return_data = pd.read_csv(io.StringIO(csv_response.text)) - pd.testing.assert_frame_equal(csv_return_data, test_data) + csv_obj = io.StringIO(csv_response.text) + prq_obj = io.BytesIO(parquet_response.content) + setattr(prq_obj, 'name', 'location.parquet') + + input_data = OedExposure(location=test_data) + return_csv = OedExposure(location=csv_obj) + return_prq = OedExposure(location=prq_obj) - prq_return_data = pd.read_parquet(io.BytesIO(parquet_response.content)) - pd.testing.assert_frame_equal(prq_return_data, test_data) + pd.testing.assert_frame_equal( + return_csv.location.dataframe, + input_data.location.dataframe) + pd.testing.assert_frame_equal( + return_prq.location.dataframe, + input_data.location.dataframe) class PortfolioReinsuranceSourceFile(WebTestMixin, TestCase): @@ -729,11 +748,20 @@ def test_reinsurance_scope_file_is_uploaded_as_parquet___file_can_be_retrieved(s }, ) - csv_return_data = pd.read_csv(io.StringIO(csv_response.text)) - pd.testing.assert_frame_equal(csv_return_data, test_data) + csv_obj = io.StringIO(csv_response.text) + prq_obj = io.BytesIO(parquet_response.content) + setattr(prq_obj, 'name', 'ri_scope.parquet') - prq_return_data = pd.read_parquet(io.BytesIO(parquet_response.content)) - pd.testing.assert_frame_equal(prq_return_data, test_data) + input_data = OedExposure(ri_scope=test_data) + return_csv = OedExposure(ri_scope=csv_obj) + return_prq = OedExposure(ri_scope=prq_obj) + + pd.testing.assert_frame_equal( + return_csv.ri_scope.dataframe, + input_data.ri_scope.dataframe) + pd.testing.assert_frame_equal( + return_prq.ri_scope.dataframe, + input_data.ri_scope.dataframe) class PortfolioReinsuranceInfoFile(WebTestMixin, TestCase): @@ -872,11 +900,20 @@ def test_reinsurance_info_file_is_uploaded_as_parquet___file_can_be_retrieved(se }, ) - csv_return_data = pd.read_csv(io.StringIO(csv_response.text)) - pd.testing.assert_frame_equal(csv_return_data, test_data) + csv_obj = io.StringIO(csv_response.text) + prq_obj = io.BytesIO(parquet_response.content) + setattr(prq_obj, 'name', 'ri_info.parquet') + + input_data = OedExposure(ri_info=test_data) + return_csv = OedExposure(ri_info=csv_obj) + return_prq = OedExposure(ri_info=prq_obj) - prq_return_data = pd.read_parquet(io.BytesIO(parquet_response.content)) - pd.testing.assert_frame_equal(prq_return_data, test_data) + pd.testing.assert_frame_equal( + return_csv.ri_info.dataframe, + input_data.ri_info.dataframe) + pd.testing.assert_frame_equal( + return_prq.ri_info.dataframe, + input_data.ri_info.dataframe) LOCATION_DATA_VALID = """PortNumber,AccNumber,LocNumber,IsTenant,BuildingID,CountryCode,Latitude,Longitude,StreetAddress,PostalCode,OccupancyCode,ConstructionCode,LocPerilsCovered,BuildingTIV,OtherTIV,ContentsTIV,BITIV,LocCurrency,OEDVersion From 1272d78e1869c6508aaebf393dbc6012fc307635 Mon Sep 17 00:00:00 2001 From: sambles Date: Thu, 6 Apr 2023 18:13:21 +0100 Subject: [PATCH 18/33] Fix bug in release note build script (#791) --- scripts/update-changelog.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/update-changelog.py b/scripts/update-changelog.py index 9472e85e8..c0100fcea 100755 --- a/scripts/update-changelog.py +++ b/scripts/update-changelog.py @@ -78,7 +78,8 @@ def _get_commit_refs(self, repo_url, local_path, from_tag, to_tag): else: repo = RepositoryMining(repo_url, from_tag=from_tag, to_tag=to_tag) - commit_list = [re.findall(r'#\d+', commit.msg) for commit in repo.traverse_commits()] + commit_titles = [commit.msg.split('\n\n')[0] for commit in repo.traverse_commits()] + commit_list = [re.findall(r'#\d+', title) for title in commit_titles] commit_list = sum(commit_list, []) return set(map(lambda cm: int(cm[1:]), commit_list)) From 6ef220d8d25e3dd8c6abc026df32e07976a856b1 Mon Sep 17 00:00:00 2001 From: sambles Date: Tue, 9 May 2023 11:51:17 +0100 Subject: [PATCH 19/33] Add override for workers to check missing files (#800) --- Dockerfile.model_worker | 1 + Dockerfile.model_worker_debian | 1 + 2 files changed, 2 insertions(+) diff --git a/Dockerfile.model_worker b/Dockerfile.model_worker index ede5b191a..1020a852b 100755 --- a/Dockerfile.model_worker +++ b/Dockerfile.model_worker @@ -68,5 +68,6 @@ ENV PATH=/home/worker/.local/bin:$PATH ENV DEBIAN_FRONTEND=noninteractive ENV OASIS_MEDIA_ROOT=/shared-fs ENV OASIS_ENV_OVERRIDE=true +ENV OASIS_CHECK_MISSING_INPUTS=true ENTRYPOINT ./startup.sh diff --git a/Dockerfile.model_worker_debian b/Dockerfile.model_worker_debian index 7a2e09303..4eaadf42d 100644 --- a/Dockerfile.model_worker_debian +++ b/Dockerfile.model_worker_debian @@ -54,5 +54,6 @@ ENV PATH=/home/worker/.local/bin:$PATH ENV OASIS_MEDIA_ROOT=/shared-fs ENV OASIS_ENV_OVERRIDE=true ENV OASIS_CHECK_MISSING_INPUTS=true +ENV OASIS_CHECK_MISSING_INPUTS=true ENTRYPOINT ./startup.sh From 6c87f695456f9451016f47517447accc527b3072 Mon Sep 17 00:00:00 2001 From: sambles Date: Fri, 12 May 2023 13:51:30 +0100 Subject: [PATCH 20/33] Add workflow to update requirment files (#810) fix names --- .github/workflows/update-python.yml | 49 +++++++++++++++++++++++++++++ scripts/update-packages.sh | 1 + 2 files changed, 50 insertions(+) create mode 100644 .github/workflows/update-python.yml diff --git a/.github/workflows/update-python.yml b/.github/workflows/update-python.yml new file mode 100644 index 000000000..d547a44ef --- /dev/null +++ b/.github/workflows/update-python.yml @@ -0,0 +1,49 @@ +name: Update Requirements + +on: + workflow_dispatch: + workflow_call: + +jobs: + version: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + ref: ${{ github.ref_name }} + fetch-depth: 0 # fetch the whole repo for complete history + + - name: Setup github user + run: | + git config --global user.email ${{ env.GIT_EMAIL }} + git config --global user.name ${{ env.GIT_USERNAME }} + git config --global pull.ff only + env: + GIT_EMAIL: ${{ secrets.BUILD_GIT_EMAIL }} + GIT_USERNAME: ${{ secrets.BUILD_GIT_USERNAME }} + + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install piptools + run: pip install pip-tools + + - name: Update Requirements files + run: | + ./scripts/update-packages.sh + requ_list=( 'requirements-server' 'requirements-worker' 'requirements') + for fl in "${requ_list[@]}"; do + git add $fl.txt + done + + - name: Git Commit + run: | + [[ -z $(git status -s) ]] || git commit -m "Set version ${{ inputs.platform_version }}" + + - name: Push + run: git push + env: + GITHUB_TOKEN: ${{ secrets.BUILD_GIT_TOKEN }} diff --git a/scripts/update-packages.sh b/scripts/update-packages.sh index 53905650d..e8015a4e5 100755 --- a/scripts/update-packages.sh +++ b/scripts/update-packages.sh @@ -34,6 +34,7 @@ for pk in "${pkg_list[@]}"; do PKG_UPDATE=$PKG_UPDATE" --upgrade-package $pk" done +rm requirements-worker.txt requirements-server.txt requirements.txt set -e pip-compile $PKG_UPDATE requirements-worker.in pip-compile $PKG_UPDATE requirements-server.in From e8205c52b107afa5f310ef5e177f747f46872f12 Mon Sep 17 00:00:00 2001 From: Sam Gamble Date: Fri, 12 May 2023 14:08:49 +0100 Subject: [PATCH 21/33] Fix update requ --- .github/workflows/update-python.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-python.yml b/.github/workflows/update-python.yml index d547a44ef..0b359cdec 100644 --- a/.github/workflows/update-python.yml +++ b/.github/workflows/update-python.yml @@ -5,7 +5,7 @@ on: workflow_call: jobs: - version: + update: runs-on: ubuntu-latest steps: - name: Checkout @@ -41,7 +41,7 @@ jobs: - name: Git Commit run: | - [[ -z $(git status -s) ]] || git commit -m "Set version ${{ inputs.platform_version }}" + [[ -z $(git status -s) ]] || git commit -m "Updated Package Requirements" - name: Push run: git push From 9fcc43c7ba3c18399e15078c8d65411bfeccd74e Mon Sep 17 00:00:00 2001 From: sambles Date: Tue, 16 May 2023 12:45:17 +0100 Subject: [PATCH 22/33] Update/remote trig python tests (#818) * Add remote trigger option for python tests * Fix branch select * Fix? --- .github/workflows/test-python.yml | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index b318e914e..4f12807c4 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -11,27 +11,49 @@ on: - master - develop - backports** + workflow_dispatch: inputs: ods_branch: description: 'If set, pip install ods-tools branch [git ref]' required: false + workflow_call: + inputs: + ods_branch: + description: 'If set, pip install ods-tools branch [git ref]' + required: false + type: string + platform_branch: + description: "Platform branch to test (remote trigger) [git ref]" + required: True + type: string + jobs: ods_tools: #if: inputs.ods_branch != '' uses: OasisLMF/ODS_Tools/.github/workflows/build.yml@develop secrets: inherit with: - ods_branch: ${{ github.event_name != 'workflow_dispatch' && 'develop' || inputs.ods_branch }} + ods_branch: ${{ inputs.ods_branch == '' && 'develop' || inputs.ods_branch }} unittest: needs: ods_tools env: JUNIT_REPORT: pytest_report.xml + PLAT_BRANCH: ${{ github.ref }} runs-on: ubuntu-22.04 + steps: - - uses: actions/checkout@v3 + - name: Branch selection (remote trigger) + if: inputs.platform_branch != '' + run: echo "PLAT_BRANCH=${{ inputs.platform_branch }}" >> $GITHUB_ENV + + - name: Checkout + uses: actions/checkout@v3 + with: + repository: OasisLMF/OasisPlatform + ref: ${{ env.PLAT_BRANCH }} - name: Set up Python 3.10 uses: actions/setup-python@v4 From 6a92546e6f8062046b32c2c5b9155bf46a01e7b5 Mon Sep 17 00:00:00 2001 From: sambles Date: Wed, 17 May 2023 13:44:47 +0100 Subject: [PATCH 23/33] Update publish with checkbox to push images with latest tag (#820) * Update publish with checkbox to push images with latest tag * Fix required fields --- .github/workflows/publish.yml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 5b9262a27..99d5da85a 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -4,9 +4,14 @@ on: workflow_dispatch: inputs: pre_release: - description: 'Mark GitHub release as pre-release: [true, false]' - required: true - type: string + description: 'Mark GitHub release as pre-release' + required: false + type: boolean + + push_latest: + description: 'Push images with the "latest" tag' + required: false + type: boolean release_tag: description: 'Release tag, for release: [{n}.{n}.{n}] e.g. 3.0.0, for Pre-Release [{n}.{n}.{n}rc{n}]' @@ -23,7 +28,7 @@ on: oasislmf_release: description: '(OVERRIDE) The oasislmf version in this release [semvar]' - required: true + required: false oasislmf_release_prev: description: '(OVERRIDE) The previous oasislmf version for changelog [semvar]' @@ -95,7 +100,8 @@ jobs: env: - pre_release: ${{ inputs.pre_release }} + pre_release: ${{ inputs.pre_release == '' && 'false' || inputs.pre_release }} + push_latest: ${{ inputs.push_latest == '' && 'false' || inputs.push_latest }} release_tag: ${{ inputs.release_tag }} prev_release_tag: ${{ inputs.prev_release_tag }} @@ -443,7 +449,7 @@ jobs: docker push coreoasis/piwind_worker:${{ env.release_tag }} - name: Push images (Production) - if: ${{ env.pre_release == 'false' && !startsWith(github.ref_name, 'backports/') }} + if: ${{ env.push_latest == 'true' }} run: | docker push coreoasis/api_server:latest docker push coreoasis/model_worker:latest From 5e9878da332fd1499edf141d01a10da3fd5502a6 Mon Sep 17 00:00:00 2001 From: sambles Date: Wed, 31 May 2023 11:06:32 +0100 Subject: [PATCH 24/33] Add option to select packages to update in workflow (#826) --- .github/workflows/update-python.yml | 13 ++++++++++-- scripts/update-packages.sh | 31 ++++++----------------------- 2 files changed, 17 insertions(+), 27 deletions(-) diff --git a/.github/workflows/update-python.yml b/.github/workflows/update-python.yml index 0b359cdec..7e9157391 100644 --- a/.github/workflows/update-python.yml +++ b/.github/workflows/update-python.yml @@ -2,7 +2,16 @@ name: Update Requirements on: workflow_dispatch: + inputs: + package_update_list: + description: 'pass list of packages to update, [django celery "oasislmf=="]' + required: false workflow_call: + inputs: + package_update_list: + description: 'pass list of packages to update, [django celery "oasislmf=="]' + required: false + type: string jobs: update: @@ -33,7 +42,7 @@ jobs: - name: Update Requirements files run: | - ./scripts/update-packages.sh + ./scripts/update-packages.sh ${{ inputs.package_update_list }} requ_list=( 'requirements-server' 'requirements-worker' 'requirements') for fl in "${requ_list[@]}"; do git add $fl.txt @@ -41,7 +50,7 @@ jobs: - name: Git Commit run: | - [[ -z $(git status -s) ]] || git commit -m "Updated Package Requirements" + [[ -z $(git status -s) ]] || git commit -m "Updated Package Requirements: ${{ inputs.package_update_list }}" - name: Push run: git push diff --git a/scripts/update-packages.sh b/scripts/update-packages.sh index e8015a4e5..d67444588 100755 --- a/scripts/update-packages.sh +++ b/scripts/update-packages.sh @@ -1,40 +1,21 @@ +#!/bin/bash + pkg_list=( 'celery==5.*' 'django==3.*' - azure-storage-blob - coverage - cryptography - distlib - django-celery-results - django-request-logging - drf-yasg - filelock - joblib - numpy oasislmf - pytest - oauthlib ods-tools - pandas - parso - pyopenssl - psycopg2-binary - ruamel.yaml - scikit-learn - scipy - sklearn - sqlalchemy - text-unidecode - virtualenv - waitress ) +if [ "$#" -gt 0 ]; then + pkg_list=( "$@" ) +fi + PKG_UPDATE='' for pk in "${pkg_list[@]}"; do PKG_UPDATE=$PKG_UPDATE" --upgrade-package $pk" done -rm requirements-worker.txt requirements-server.txt requirements.txt set -e pip-compile $PKG_UPDATE requirements-worker.in pip-compile $PKG_UPDATE requirements-server.in From b3d1c10246c562500384f7dff4862a16093e9f72 Mon Sep 17 00:00:00 2001 From: sambles Date: Wed, 31 May 2023 11:54:48 +0100 Subject: [PATCH 25/33] Updated Package Requirements: 'django==3.2.19' (#827) Co-authored-by: awsbuild --- requirements-server.txt | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-server.txt b/requirements-server.txt index cbcad5243..c774106bb 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -79,7 +79,7 @@ cryptography==39.0.2 # service-identity daphne==4.0.0 # via -r requirements-server.in -django==3.2.18 +django==3.2.19 # via # -r requirements-server.in # channels diff --git a/requirements.txt b/requirements.txt index dc9059821..4c055557d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -139,7 +139,7 @@ daphne==4.0.0 # via -r ./requirements-server.in distlib==0.3.6 # via virtualenv -django==3.2.18 +django==3.2.19 # via # -r ./requirements-server.in # channels From 1afd945f71823b98a560527a2714ec52adda7633 Mon Sep 17 00:00:00 2001 From: sambles Date: Mon, 12 Jun 2023 11:50:04 +0100 Subject: [PATCH 26/33] Fixes for OpenAPI schema (#835) * Issue 3 - replace uri with url * Fix Stored -> stored * fix portfolio, incorrect parser class when calling schema gen * Add required false for url params * Merge schema POST returns with RelatedFile * Revert "Issue 3 - replace uri with url" This reverts commit 220f0b39768ed8ee4ab0f5d8184bbe1724f563d6. * Update swagger serializers to match portfolio return --- src/server/oasisapi/files/serializers.py | 1 + src/server/oasisapi/portfolios/viewsets.py | 4 +--- src/server/oasisapi/schemas/custom_swagger.py | 2 ++ src/server/oasisapi/schemas/serializers.py | 16 ++++++++-------- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/server/oasisapi/files/serializers.py b/src/server/oasisapi/files/serializers.py index 09c7670d4..29d6313c9 100644 --- a/src/server/oasisapi/files/serializers.py +++ b/src/server/oasisapi/files/serializers.py @@ -38,6 +38,7 @@ def md5_filehash(in_memory_file, chunk_size=4096): class RelatedFileSerializer(serializers.ModelSerializer): class Meta: + ref_name = None model = RelatedFile fields = ( 'created', diff --git a/src/server/oasisapi/portfolios/viewsets.py b/src/server/oasisapi/portfolios/viewsets.py index 29d0f1008..408125b7b 100644 --- a/src/server/oasisapi/portfolios/viewsets.py +++ b/src/server/oasisapi/portfolios/viewsets.py @@ -117,10 +117,8 @@ def get_serializer_class(self): @property def parser_classes(self): - method = self.request.method.lower() upload_views = ['accounts_file', 'location_file', 'reinsurance_info_file', 'reinsurance_scope_file'] - - if method == 'post' and getattr(self, 'action', None) in upload_views: + if getattr(self, 'action', None) in upload_views: return [MultiPartParser] else: return api_settings.DEFAULT_PARSER_CLASSES diff --git a/src/server/oasisapi/schemas/custom_swagger.py b/src/server/oasisapi/schemas/custom_swagger.py index ac6a1ef03..35fe333ac 100644 --- a/src/server/oasisapi/schemas/custom_swagger.py +++ b/src/server/oasisapi/schemas/custom_swagger.py @@ -64,6 +64,7 @@ FILE_FORMAT_PARAM = openapi.Parameter( 'file_format', openapi.IN_QUERY, + required=False, description="File format returned, default is `csv`", type=openapi.TYPE_STRING, enum=['csv', 'parquet'] @@ -72,6 +73,7 @@ FILE_VALIDATION_PARAM = openapi.Parameter( 'validate', openapi.IN_QUERY, + required=False, description="Validate OED files on upload, default `True`", type=openapi.TYPE_BOOLEAN, ) diff --git a/src/server/oasisapi/schemas/serializers.py b/src/server/oasisapi/schemas/serializers.py index b14bb4791..d4ce6d59d 100644 --- a/src/server/oasisapi/schemas/serializers.py +++ b/src/server/oasisapi/schemas/serializers.py @@ -59,9 +59,9 @@ def update(self, instance, validated_data): class LocFileSerializer(serializers.Serializer): - url = serializers.URLField() + uri = serializers.URLField() name = serializers.CharField() - Stored = serializers.CharField() + stored = serializers.CharField() def create(self, validated_data): raise NotImplementedError() @@ -71,9 +71,9 @@ def update(self, instance, validated_data): class AccFileSerializer(serializers.Serializer): - url = serializers.URLField() + uri = serializers.URLField() name = serializers.CharField() - Stored = serializers.CharField() + stored = serializers.CharField() def create(self, validated_data): raise NotImplementedError() @@ -83,9 +83,9 @@ def update(self, instance, validated_data): class ReinsInfoFileSerializer(serializers.Serializer): - url = serializers.URLField() + uri = serializers.URLField() name = serializers.CharField() - Stored = serializers.CharField() + stored = serializers.CharField() def create(self, validated_data): raise NotImplementedError() @@ -95,9 +95,9 @@ def update(self, instance, validated_data): class ReinsScopeFileSerializer(serializers.Serializer): - url = serializers.URLField() + uri = serializers.URLField() name = serializers.CharField() - Stored = serializers.CharField() + stored = serializers.CharField() def create(self, validated_data): raise NotImplementedError() From 19d8d3215781b87491103c1a77f9de1fc4881d63 Mon Sep 17 00:00:00 2001 From: sambles Date: Wed, 5 Jul 2023 10:17:04 +0100 Subject: [PATCH 27/33] Fix CVE-2023-30608 (#842) * Updated Package Requirements: sqlparse==0.4.4 * Updated Package Requirements: pymysql==1.1.0 * retest * Revert "retest" This reverts commit 6e82471271246378fafe8bd990c85ec5403b2d2d. * try pinning mysqlclient * Updated Package Requirements: mysqlclient * Unpin for .in, keep pin in .txt --------- Co-authored-by: awsbuild --- requirements-server.in | 1 + requirements-server.txt | 6 ++++-- requirements-worker.txt | 2 +- requirements.txt | 6 ++++-- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/requirements-server.in b/requirements-server.in index 221c41092..bbefee9ec 100755 --- a/requirements-server.in +++ b/requirements-server.in @@ -26,6 +26,7 @@ psycopg2-binary pyarrow pymysql sqlalchemy +mysqlclient whitenoise twisted>=22.1 gunicorn diff --git a/requirements-server.txt b/requirements-server.txt index c774106bb..0d8cb07b9 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -157,6 +157,8 @@ markdown==3.4.1 # via -r requirements-server.in markupsafe==2.1.2 # via jinja2 +mysqlclient==2.1.1 + # via -r requirements-server.in numpy==1.24.2 # via # pandas @@ -189,7 +191,7 @@ pycparser==2.21 # via cffi pyjwt==2.6.0 # via djangorestframework-simplejwt -pymysql==1.0.2 +pymysql==1.1.0 # via -r requirements-server.in pyopenssl==23.0.0 # via twisted @@ -229,7 +231,7 @@ six==1.16.0 # service-identity sqlalchemy==2.0.5.post1 # via -r requirements-server.in -sqlparse==0.4.3 +sqlparse==0.4.4 # via # django # django-debug-toolbar diff --git a/requirements-worker.txt b/requirements-worker.txt index e75f9f3b5..dbab7b4bd 100644 --- a/requirements-worker.txt +++ b/requirements-worker.txt @@ -171,7 +171,7 @@ pyarrow==11.0.0 # via oasislmf pycparser==2.21 # via cffi -pymysql==1.0.2 +pymysql==1.1.0 # via -r requirements-worker.in pyproj==3.4.1 # via geopandas diff --git a/requirements.txt b/requirements.txt index 4c055557d..a73a02780 100644 --- a/requirements.txt +++ b/requirements.txt @@ -269,6 +269,8 @@ msgpack==1.0.5 # via oasislmf munch==2.5.0 # via fiona +mysqlclient==2.1.1 + # via -r ./requirements-server.in numba==0.56.4 # via # -r ./requirements-worker.in @@ -347,7 +349,7 @@ pyflakes==3.0.1 # via flake8 pyjwt==2.6.0 # via djangorestframework-simplejwt -pymysql==1.0.2 +pymysql==1.1.0 # via # -r ./requirements-server.in # -r ./requirements-worker.in @@ -444,7 +446,7 @@ sqlalchemy==2.0.5.post1 # via # -r ./requirements-server.in # -r ./requirements-worker.in -sqlparse==0.4.3 +sqlparse==0.4.4 # via # django # django-debug-toolbar From 683b00ec19616ed9563ac10faf94764a8da7fb4d Mon Sep 17 00:00:00 2001 From: awsbuild Date: Wed, 5 Jul 2023 12:34:51 +0000 Subject: [PATCH 28/33] Set version 1.28.0 --- VERSION | 2 +- requirements-server.txt | 2 +- requirements-worker.txt | 2 +- requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/VERSION b/VERSION index 457f03854..cfc730712 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.27.2 +1.28.0 diff --git a/requirements-server.txt b/requirements-server.txt index 0d8cb07b9..0d92536a9 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -163,7 +163,7 @@ numpy==1.24.2 # via # pandas # pyarrow -ods-tools==3.0.4 +ods-tools==3.1.0 # via -r requirements-server.in packaging==23.0 # via diff --git a/requirements-worker.txt b/requirements-worker.txt index dbab7b4bd..8c0c53f34 100644 --- a/requirements-worker.txt +++ b/requirements-worker.txt @@ -145,7 +145,7 @@ numpy==1.22.4 # shapely oasislmf[extra]==1.27.2 # via -r requirements-worker.in -ods-tools==3.0.4 +ods-tools==3.1.0 # via oasislmf packaging==23.0 # via diff --git a/requirements.txt b/requirements.txt index a73a02780..497a6a005 100644 --- a/requirements.txt +++ b/requirements.txt @@ -290,7 +290,7 @@ numpy==1.22.4 # shapely oasislmf[extra]==1.27.2 # via -r ./requirements-worker.in -ods-tools==3.0.4 +ods-tools==3.1.0 # via # -r ./requirements-server.in # oasislmf From 84fc483c5622655cd67e8825fba6f093dc381236 Mon Sep 17 00:00:00 2001 From: awsbuild Date: Fri, 14 Jul 2023 09:06:50 +0000 Subject: [PATCH 29/33] Set version 1.28.0 --- requirements-worker.txt | 28 +--------------------------- requirements.txt | 25 ++----------------------- 2 files changed, 3 insertions(+), 50 deletions(-) diff --git a/requirements-worker.txt b/requirements-worker.txt index 8c0c53f34..be7ddbbb2 100644 --- a/requirements-worker.txt +++ b/requirements-worker.txt @@ -10,8 +10,6 @@ anytree==2.8.0 # via oasislmf argparsetree==0.0.6 # via oasislmf -arrow==1.2.3 - # via jinja2-time attrs==22.2.0 # via # fiona @@ -25,8 +23,6 @@ azure-storage-blob==12.15.0 # via -r requirements-worker.in billiard==3.6.4.0 # via celery -binaryornot==0.4.4 - # via cookiecutter boto3==1.26.89 # via -r requirements-worker.in botocore==1.29.89 @@ -46,7 +42,6 @@ chainmap==1.0.3 # via oasislmf chardet==5.1.0 # via - # binaryornot # oasislmf # ods-tools charset-normalizer==3.1.0 @@ -57,8 +52,6 @@ click==8.1.3 # click-didyoumean # click-plugins # click-repl - # cligj - # cookiecutter # fiona click-didyoumean==0.3.0 # via celery @@ -72,8 +65,6 @@ cligj==0.7.2 # via fiona configparser==5.3.0 # via -r requirements-worker.in -cookiecutter==2.1.1 - # via oasislmf cramjam==2.6.2 # via fastparquet cryptography==39.0.2 @@ -100,12 +91,6 @@ iniconfig==2.0.0 # via pytest isodate==0.6.1 # via azure-storage-blob -jinja2==3.1.2 - # via - # cookiecutter - # jinja2-time -jinja2-time==0.2.0 - # via cookiecutter jmespath==1.0.1 # via # boto3 @@ -120,8 +105,6 @@ kombu==5.2.4 # via celery llvmlite==0.39.1 # via numba -markupsafe==2.1.2 - # via jinja2 msgpack==1.0.5 # via oasislmf munch==2.5.0 @@ -143,7 +126,7 @@ numpy==1.22.4 # scikit-learn # scipy # shapely -oasislmf[extra]==1.27.2 +oasislmf[extra]==1.28.0 # via -r requirements-worker.in ods-tools==3.1.0 # via oasislmf @@ -181,22 +164,16 @@ pytest==7.2.2 # via -r requirements-worker.in python-dateutil==2.8.2 # via - # arrow # botocore # pandas -python-slugify==8.0.1 - # via cookiecutter pytz==2022.7.1 # via # celery # oasislmf # pandas -pyyaml==6.0 - # via cookiecutter requests==2.28.2 # via # azure-core - # cookiecutter # forex-python # oasislmf # requests-toolbelt @@ -226,7 +203,6 @@ six==1.16.0 # azure-core # click-repl # isodate - # munch # pathlib2 # python-dateutil sqlalchemy==2.0.5.post1 @@ -235,8 +211,6 @@ tabulate==0.9.0 # via oasislmf tblib==1.7.0 # via oasislmf -text-unidecode==1.3 - # via python-slugify threadpoolctl==3.1.0 # via scikit-learn tomli==2.0.1 diff --git a/requirements.txt b/requirements.txt index 497a6a005..bebae7b6d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,8 +10,6 @@ anytree==2.8.0 # via oasislmf argparsetree==0.0.6 # via oasislmf -arrow==1.2.3 - # via jinja2-time asgiref==3.6.0 # via # channels @@ -46,8 +44,6 @@ beautifulsoup4==4.11.2 # via webtest billiard==3.6.4.0 # via celery -binaryornot==0.4.4 - # via cookiecutter boto3==1.26.89 # via # -r ./requirements-server.in @@ -79,7 +75,6 @@ channels==4.0.0 # via -r ./requirements-server.in chardet==5.1.0 # via - # binaryornot # oasislmf # ods-tools # tox @@ -92,7 +87,6 @@ click==8.1.3 # click-plugins # click-repl # cligj - # cookiecutter # fiona # pip-tools click-didyoumean==0.3.0 @@ -113,8 +107,6 @@ configparser==5.3.0 # -r ./requirements-worker.in constantly==15.1.0 # via twisted -cookiecutter==2.1.1 - # via oasislmf coreapi==2.3.3 # via # -r ./requirements-server.in @@ -231,12 +223,7 @@ isodate==0.6.1 itypes==1.2.0 # via coreapi jinja2==3.1.2 - # via - # cookiecutter - # coreschema - # jinja2-time -jinja2-time==0.2.0 - # via cookiecutter + # via coreschema jmespath==1.0.1 # via # boto3 @@ -288,7 +275,7 @@ numpy==1.22.4 # scikit-learn # scipy # shapely -oasislmf[extra]==1.27.2 +oasislmf[extra]==1.28.0 # via -r ./requirements-worker.in ods-tools==3.1.0 # via @@ -377,11 +364,8 @@ pytest-django==4.5.2 # via -r requirements.in python-dateutil==2.8.2 # via - # arrow # botocore # pandas -python-slugify==8.0.1 - # via cookiecutter pytz==2022.7.1 # via # celery @@ -390,13 +374,10 @@ pytz==2022.7.1 # drf-yasg # oasislmf # pandas -pyyaml==6.0 - # via cookiecutter requests==2.28.2 # via # -r requirements.in # azure-core - # cookiecutter # coreapi # forex-python # oasislmf @@ -454,8 +435,6 @@ tabulate==0.9.0 # via oasislmf tblib==1.7.0 # via oasislmf -text-unidecode==1.3 - # via python-slugify threadpoolctl==3.1.0 # via scikit-learn tomli==2.0.1 From 4555d8a61c79f489cf424951e6361729a85f8ca0 Mon Sep 17 00:00:00 2001 From: awsbuild Date: Fri, 14 Jul 2023 09:08:42 +0000 Subject: [PATCH 30/33] Updated Package Requirements: django==3.2.20 --- requirements-server.txt | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-server.txt b/requirements-server.txt index 0d92536a9..0c14243c8 100644 --- a/requirements-server.txt +++ b/requirements-server.txt @@ -79,7 +79,7 @@ cryptography==39.0.2 # service-identity daphne==4.0.0 # via -r requirements-server.in -django==3.2.19 +django==3.2.20 # via # -r requirements-server.in # channels diff --git a/requirements.txt b/requirements.txt index bebae7b6d..cbaaaf232 100644 --- a/requirements.txt +++ b/requirements.txt @@ -131,7 +131,7 @@ daphne==4.0.0 # via -r ./requirements-server.in distlib==0.3.6 # via virtualenv -django==3.2.19 +django==3.2.20 # via # -r ./requirements-server.in # channels From 1895f9969c875ba5ddb97e8960d3f6707a83f34c Mon Sep 17 00:00:00 2001 From: Sam Gamble Date: Fri, 14 Jul 2023 10:10:06 +0100 Subject: [PATCH 31/33] retest From 1cdcb859f32f5df2ea2bde455902012791d11dae Mon Sep 17 00:00:00 2001 From: Sam Gamble Date: Fri, 14 Jul 2023 10:51:14 +0100 Subject: [PATCH 32/33] Fix ci pub --- .github/workflows/publish.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 99d5da85a..bd3fbd8c0 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -3,10 +3,10 @@ name: Platform Release on: workflow_dispatch: inputs: - pre_release: - description: 'Mark GitHub release as pre-release' - required: false - type: boolean + #pre_release: + # description: 'Mark GitHub release as pre-release' + # required: false + # type: boolean push_latest: description: 'Push images with the "latest" tag' From e8f93359a846109dd4508ae09594733c779ce60f Mon Sep 17 00:00:00 2001 From: awsbuild Date: Fri, 14 Jul 2023 10:08:31 +0000 Subject: [PATCH 33/33] Update changelog --- CHANGELOG.rst | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e33259770..48e5da835 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,35 @@ OasisPlatform Changelog ======================= +`1.28.0`_ + --------- +* [#768](https://github.com/OasisLMF/OasisPlatform/pull/768) - Loosen repository argument in changelog builder +* [#769](https://github.com/OasisLMF/OasisPlatform/pull/769) - 1.27.2 CVE update +* [#771](https://github.com/OasisLMF/OasisPlatform/pull/772) - Add ODS_Tools version summary to release notes builder +* [#775](https://github.com/OasisLMF/OasisPlatform/pull/775) - Search all repo for tags, but limit the scope by env +* [#783](https://github.com/OasisLMF/OasisPlatform/pull/783) - Feature/ods read from stream Plat1 +* [#791](https://github.com/OasisLMF/OasisPlatform/pull/791) - Fix bug in release note build script +* [#795](https://github.com/OasisLMF/OasisPlatform/pull/800) - Add missing check for RI files to workers +* [#810](https://github.com/OasisLMF/OasisPlatform/pull/810) - Add workflow to update requirment files +* [#818](https://github.com/OasisLMF/OasisPlatform/pull/818) - Update/remote trig python tests +* [#820](https://github.com/OasisLMF/OasisPlatform/pull/820) - Update publish with checkbox to push images with latest tag +* [#826](https://github.com/OasisLMF/OasisPlatform/pull/826) - Add option to select packages to update in workflow +* [#827](https://github.com/OasisLMF/OasisPlatform/pull/827) - Updated Package Requirements: 'django==3.2.19' +* [#813, #822](https://github.com/OasisLMF/OasisPlatform/pull/835) - Fixes for OpenAPI schema +* [#842](https://github.com/OasisLMF/OasisPlatform/pull/842) - Fix CVE-2023-30608 +* [#721](https://github.com/OasisLMF/OasisPlatform/pull/721) - Release/1.27.0 +* [#722, #723](https://github.com/OasisLMF/OasisPlatform/pull/724) - Added OED validation on file upload, and updated ods-tools package to 3.0.1 +* [#726](https://github.com/OasisLMF/OasisPlatform/pull/732) - Add Code QL to platform repo +* [#747](https://github.com/OasisLMF/OasisPlatform/pull/747) - Fix/portfolio validate default +* [#749](https://github.com/OasisLMF/OasisPlatform/pull/749) - Set Validation on Upload to false +* [#752](https://github.com/OasisLMF/OasisPlatform/pull/752) - Update packages dev +* [#753](https://github.com/OasisLMF/OasisPlatform/pull/753) - Set ods-tools 3.0.2 +* [#754](https://github.com/OasisLMF/OasisPlatform/pull/762) - Update the model settings schema to include correlation options +* [#763](https://github.com/OasisLMF/OasisPlatform/pull/763) - Fix schema build workflow +* [#765](https://github.com/OasisLMF/OasisPlatform/pull/765) - Move json settings schema to ods-tools +* [#767](https://github.com/OasisLMF/OasisPlatform/pull/767) - Add retry to model reg task +.. _`1.28.0`: https://github.com/OasisLMF/OasisPlatform/compare/1.27.0...1.28.0 + `1.27.0`_ --------- * [#705](https://github.com/OasisLMF/OasisPlatform/pull/710) - CVE updates - Server images