diff --git a/.github/workflows/linting-and-tests.yml b/.github/workflows/linting-and-tests.yml index bc45b41175..98919bb9c3 100644 --- a/.github/workflows/linting-and-tests.yml +++ b/.github/workflows/linting-and-tests.yml @@ -146,7 +146,7 @@ jobs: unit-test-backend-mysql-rabbitmq: name: "Backend Tests: MySQL + RabbitMQ (RBAC enabled: ${{ matrix.rbac_enabled }})" - runs-on: ubuntu-latest + runs-on: ubuntu-latest-8-cores strategy: matrix: rbac_enabled: ["True", "False"] @@ -189,7 +189,7 @@ jobs: unit-test-backend-postgresql-rabbitmq: name: "Backend Tests: PostgreSQL + RabbitMQ (RBAC enabled: ${{ matrix.rbac_enabled }})" - runs-on: ubuntu-latest + runs-on: ubuntu-latest-8-cores strategy: matrix: rbac_enabled: ["True", "False"] @@ -238,7 +238,7 @@ jobs: unit-test-backend-sqlite-redis: name: "Backend Tests: SQLite + Redis (RBAC enabled: ${{ matrix.rbac_enabled }})" - runs-on: ubuntu-latest + runs-on: ubuntu-latest-8-cores strategy: matrix: rbac_enabled: ["True", "False"] diff --git a/CHANGELOG.md b/CHANGELOG.md index 0bb4a2f9e3..5005f4b15b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## Unreleased +## v1.3.102 (2024-02-06) -## v1.3.101 (2024-05-01) +### Fixed + +Maintenance release + +## v1.3.101 (2024-02-05) ### Added diff --git a/docs/sources/oncall-api-reference/alertgroups.md b/docs/sources/oncall-api-reference/alertgroups.md index c41509b998..42931b1890 100644 --- a/docs/sources/oncall-api-reference/alertgroups.md +++ b/docs/sources/oncall-api-reference/alertgroups.md @@ -30,6 +30,8 @@ The above command returns JSON structured in the following way: "created_at": "2020-05-19T12:37:01.430444Z", "resolved_at": "2020-05-19T13:37:01.429805Z", "acknowledged_at": null, + "acknowledged_by": null, + "resolved_by": "UCGEIXI1MR1NZ", "title": "Memory above 90% threshold", "permalinks": { "slack": "https://ghostbusters.slack.com/archives/C1H9RESGA/p135854651500008", @@ -54,6 +56,18 @@ These available filter parameters should be provided as `GET` arguments: `GET {{API_URL}}/api/v1/alert_groups/` +# Alert group details + +```shell +curl "{{API_URL}}/api/v1/alert_groups/I68T24C13IFW1" \ + --request GET \ + --header "Authorization: meowmeowmeow" +``` + +**HTTP request** + +`GET {{API_URL}}/api/v1/alert_groups/` + # Acknowledge an alert group ```shell diff --git a/engine/apps/integrations/tests/test_views.py b/engine/apps/integrations/tests/test_views.py index 6c83e24ff2..111ac85023 100644 --- a/engine/apps/integrations/tests/test_views.py +++ b/engine/apps/integrations/tests/test_views.py @@ -13,6 +13,9 @@ from apps.alerts.models import AlertReceiveChannel from apps.integrations.mixins import AlertChannelDefiningMixin +# https://github.com/pytest-dev/pytest-xdist/issues/432#issuecomment-528510433 +INTEGRATION_TYPES = sorted(AlertReceiveChannel.INTEGRATION_TYPES) + class DatabaseBlocker(_DatabaseBlocker): """Customize pytest_django db blocker to raise OperationalError exception.""" @@ -78,7 +81,7 @@ def test_integration_form_data_too_big(settings, make_organization_and_user, mak "integration_type", [ arc_type - for arc_type in AlertReceiveChannel.INTEGRATION_TYPES + for arc_type in INTEGRATION_TYPES if arc_type not in ["amazon_sns", "grafana", "alertmanager", "grafana_alerting", "maintenance"] ], ) @@ -230,7 +233,7 @@ def test_integration_old_grafana_endpoint( "integration_type", [ arc_type - for arc_type in AlertReceiveChannel.INTEGRATION_TYPES + for arc_type in INTEGRATION_TYPES if arc_type not in ["amazon_sns", "grafana", "alertmanager", "grafana_alerting", "maintenance"] ], ) @@ -264,7 +267,7 @@ def test_integration_universal_endpoint_not_allow_files( "integration_type", [ arc_type - for arc_type in AlertReceiveChannel.INTEGRATION_TYPES + for arc_type in INTEGRATION_TYPES if arc_type not in ["amazon_sns", "grafana", "alertmanager", "grafana_alerting", "maintenance"] ], ) @@ -367,7 +370,7 @@ def test_integration_grafana_endpoint_without_db_has_alerts( "integration_type", [ arc_type - for arc_type in AlertReceiveChannel.INTEGRATION_TYPES + for arc_type in INTEGRATION_TYPES if arc_type not in ["amazon_sns", "grafana", "alertmanager", "grafana_alerting", "maintenance"] ], ) @@ -467,7 +470,7 @@ def test_integration_grafana_endpoint_without_cache_has_alerts( "integration_type", [ arc_type - for arc_type in AlertReceiveChannel.INTEGRATION_TYPES + for arc_type in INTEGRATION_TYPES if arc_type not in ["amazon_sns", "grafana", "alertmanager", "grafana_alerting", "maintenance"] ], ) diff --git a/engine/apps/public_api/serializers/incidents.py b/engine/apps/public_api/serializers/incidents.py index 1a6b2df532..24ec1c6bd8 100644 --- a/engine/apps/public_api/serializers/incidents.py +++ b/engine/apps/public_api/serializers/incidents.py @@ -3,6 +3,7 @@ from apps.alerts.models import AlertGroup from apps.telegram.models.message import TelegramMessage +from common.api_helpers.custom_fields import UserIdField from common.api_helpers.mixins import EagerLoadingMixin @@ -14,6 +15,8 @@ class IncidentSerializer(EagerLoadingMixin, serializers.ModelSerializer): alerts_count = serializers.SerializerMethodField() title = serializers.SerializerMethodField() state = serializers.SerializerMethodField() + acknowledged_by = UserIdField(read_only=True, source="acknowledged_by_user") + resolved_by = UserIdField(read_only=True, source="resolved_by_user") SELECT_RELATED = ["channel", "channel_filter", "slack_message", "channel__organization"] PREFETCH_RELATED = [ @@ -35,7 +38,9 @@ class Meta: "state", "created_at", "resolved_at", + "resolved_by", "acknowledged_at", + "acknowledged_by", "title", "permalinks", ] diff --git a/engine/apps/public_api/tests/test_alert_groups.py b/engine/apps/public_api/tests/test_alert_groups.py index 54c143e067..247239ec30 100644 --- a/engine/apps/public_api/tests/test_alert_groups.py +++ b/engine/apps/public_api/tests/test_alert_groups.py @@ -29,6 +29,11 @@ def construct_expected_response_from_alert_groups(alert_groups): acknowledged_at = alert_group.acknowledged_at.isoformat() acknowledged_at = acknowledged_at[:-6] + "Z" + def user_pk_or_none(alert_group, user_field): + u = getattr(alert_group, user_field) + if u is not None: + return u.public_primary_key + results.append( { "id": alert_group.public_primary_key, @@ -39,6 +44,8 @@ def construct_expected_response_from_alert_groups(alert_groups): "created_at": created_at, "resolved_at": resolved_at, "acknowledged_at": acknowledged_at, + "acknowledged_by": user_pk_or_none(alert_group, "acknowledged_by_user"), + "resolved_by": user_pk_or_none(alert_group, "resolved_by_user"), "title": None, "permalinks": { "slack": None, @@ -95,6 +102,21 @@ def alert_group_public_api_setup( return token, alert_groups, integrations, routes +@pytest.mark.django_db +def test_get_alert_group(alert_group_public_api_setup): + token, _, _, _ = alert_group_public_api_setup + alert_groups = AlertGroup.objects.all().order_by("-started_at") + client = APIClient() + list_response = construct_expected_response_from_alert_groups(alert_groups) + expected_response = list_response["results"][0] + + url = reverse("api-public:alert_groups-detail", kwargs={"pk": expected_response["id"]}) + response = client.get(url, format="json", HTTP_AUTHORIZATION=token) + + assert response.status_code == status.HTTP_200_OK + assert response.json() == expected_response + + @pytest.mark.django_db def test_get_alert_groups(alert_group_public_api_setup): token, _, _, _ = alert_group_public_api_setup diff --git a/engine/apps/public_api/views/incidents.py b/engine/apps/public_api/views/incidents.py index 27fc71b4ea..a015604f83 100644 --- a/engine/apps/public_api/views/incidents.py +++ b/engine/apps/public_api/views/incidents.py @@ -34,7 +34,9 @@ class IncidentByTeamFilter(ByTeamModelFieldFilterMixin, filters.FilterSet): id = filters.CharFilter(field_name="public_primary_key") -class IncidentView(RateLimitHeadersMixin, mixins.ListModelMixin, mixins.DestroyModelMixin, GenericViewSet): +class IncidentView( + RateLimitHeadersMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin, GenericViewSet +): authentication_classes = (ApiTokenAuthentication,) permission_classes = (IsAuthenticated,) diff --git a/engine/apps/telegram/client.py b/engine/apps/telegram/client.py index e295db16dc..81180c9f7b 100644 --- a/engine/apps/telegram/client.py +++ b/engine/apps/telegram/client.py @@ -8,6 +8,7 @@ from apps.alerts.models import AlertGroup from apps.base.utils import live_settings +from apps.telegram.exceptions import AlertGroupTelegramMessageDoesNotExist from apps.telegram.models import TelegramMessage from apps.telegram.renderers.keyboard import TelegramKeyboardRenderer from apps.telegram.renderers.message import TelegramMessageRenderer @@ -157,7 +158,10 @@ def _get_message_and_keyboard( ).first() if alert_group_message is None: - raise Exception("No alert group message found, probably it is not saved to database yet") + raise AlertGroupTelegramMessageDoesNotExist( + f"No alert group message found, probably it is not saved to database yet, " + f"alert group: {alert_group.id}" + ) include_title = message_type == TelegramMessage.LINK_TO_CHANNEL_MESSAGE link = alert_group_message.link diff --git a/engine/apps/telegram/exceptions.py b/engine/apps/telegram/exceptions.py new file mode 100644 index 0000000000..e6439863a5 --- /dev/null +++ b/engine/apps/telegram/exceptions.py @@ -0,0 +1,2 @@ +class AlertGroupTelegramMessageDoesNotExist(Exception): + pass diff --git a/engine/apps/telegram/models/connectors/personal.py b/engine/apps/telegram/models/connectors/personal.py index 3034bf6f76..ae006ed453 100644 --- a/engine/apps/telegram/models/connectors/personal.py +++ b/engine/apps/telegram/models/connectors/personal.py @@ -43,10 +43,15 @@ def notify(self, alert_group: AlertGroup, notification_policy: UserNotificationP telegram_channel = TelegramToOrganizationConnector.get_channel_for_alert_group(alert_group) if telegram_channel is not None: - send_link_to_channel_message_or_fallback_to_full_alert_group.delay( - alert_group_pk=alert_group.pk, - notification_policy_pk=notification_policy.pk, - user_connector_pk=self.pk, + # Call this task with a countdown to avoid unnecessary retry when alert group telegram message hasn't been + # created yet + send_link_to_channel_message_or_fallback_to_full_alert_group.apply_async( + kwargs={ + "alert_group_pk": alert_group.pk, + "notification_policy_pk": notification_policy.pk, + "user_connector_pk": self.pk, + }, + countdown=3, ) else: self.send_full_alert_group(alert_group=alert_group, notification_policy=notification_policy) diff --git a/engine/common/cloud_auth_api/client.py b/engine/common/cloud_auth_api/client.py index 0694944aa0..7939b4ee4d 100644 --- a/engine/common/cloud_auth_api/client.py +++ b/engine/common/cloud_auth_api/client.py @@ -74,9 +74,8 @@ def request_signed_token( headers=headers, json={ "claims": claims, - "extra": { + "accessPolicy": { "scopes": scopes, - "org_id": org_id, }, }, ) diff --git a/engine/common/cloud_auth_api/tests/test_client.py b/engine/common/cloud_auth_api/tests/test_client.py index 4ef028f4d4..94785c6840 100644 --- a/engine/common/cloud_auth_api/tests/test_client.py +++ b/engine/common/cloud_auth_api/tests/test_client.py @@ -1,6 +1,6 @@ import json -from unittest.mock import patch +import httpretty import pytest from rest_framework import status @@ -16,10 +16,10 @@ def configure_cloud_auth_api_client(settings): settings.GRAFANA_CLOUD_AUTH_API_SYSTEM_TOKEN = GRAFANA_CLOUD_AUTH_API_SYSTEM_TOKEN -@patch("common.cloud_auth_api.client.requests") @pytest.mark.django_db @pytest.mark.parametrize("response_status_code", [status.HTTP_200_OK, status.HTTP_401_UNAUTHORIZED]) -def test_request_signed_token(mock_requests, make_organization, response_status_code): +@httpretty.activate(verbose=True, allow_net_connect=False) +def test_request_signed_token(make_organization, response_status_code): mock_auth_token = ",mnasdlkjlakjoqwejroiqwejr" mock_response_text = "error message" @@ -31,25 +31,12 @@ def test_request_signed_token(mock_requests, make_organization, response_status_ scopes = ["incident:write", "foo:bar"] claims = {"vegetable": "carrot", "fruit": "apple"} - class MockResponse: - text = mock_response_text - - def __init__(self, status_code): - self.status_code = status_code - - def json(self): - return { - "data": { - "token": mock_auth_token, - }, - } - - mock_requests.post.return_value = MockResponse(response_status_code) - def _make_request(): return CloudAuthApiClient().request_signed_token(organization, scopes, claims) url = f"{GRAFANA_CLOUD_AUTH_API_URL}/v1/sign" + mock_response = httpretty.Response(json.dumps({"data": {"token": mock_auth_token}}), status=response_status_code) + httpretty.register_uri(httpretty.POST, url, responses=[mock_response]) if response_status_code != status.HTTP_200_OK: with pytest.raises(CloudAuthApiException) as excinfo: @@ -62,25 +49,26 @@ def _make_request(): else: assert _make_request() == mock_auth_token - mock_requests.post.assert_called_once_with( - url, - headers={ - "Authorization": f"Bearer {GRAFANA_CLOUD_AUTH_API_SYSTEM_TOKEN}", - "X-Org-ID": str(org_id), - "X-Realms": json.dumps( - [ - { - "type": "stack", - "identifier": str(stack_id), - }, - ] - ), + last_request = httpretty.last_request() + assert last_request.method == "POST" + assert last_request.url == url + + # assert we're sending the right body + assert json.loads(last_request.body) == { + "claims": claims, + "accessPolicy": { + "scopes": scopes, }, - json={ - "claims": claims, - "extra": { - "scopes": scopes, - "org_id": str(org_id), + } + + # assert we're sending the right headers + assert last_request.headers["Authorization"] == f"Bearer {GRAFANA_CLOUD_AUTH_API_SYSTEM_TOKEN}" + assert last_request.headers["X-Org-ID"] == str(org_id) + assert last_request.headers["X-Realms"] == json.dumps( + [ + { + "type": "stack", + "identifier": str(stack_id), }, - }, + ] ) diff --git a/engine/conftest.py b/engine/conftest.py index 0d32842991..92f1a7c9a7 100644 --- a/engine/conftest.py +++ b/engine/conftest.py @@ -142,6 +142,21 @@ IS_RBAC_ENABLED = os.getenv("ONCALL_TESTING_RBAC_ENABLED", "True") == "True" +@pytest.fixture(autouse=True) +def isolated_cache(settings): + """ + https://github.com/pytest-dev/pytest-django/issues/527#issuecomment-1115887487 + """ + cache_version = uuid.uuid4().hex + + for name in settings.CACHES.keys(): + settings.CACHES[name]["VERSION"] = cache_version + + from django.test.signals import clear_cache_handlers + + clear_cache_handlers(setting="CACHES") + + @pytest.fixture(autouse=True) def mock_slack_api_call(monkeypatch): def mock_api_call(*args, **kwargs): diff --git a/engine/requirements-dev.txt b/engine/requirements-dev.txt index 810e648037..7389501da9 100644 --- a/engine/requirements-dev.txt +++ b/engine/requirements-dev.txt @@ -12,3 +12,4 @@ types-PyMySQL==1.0.19.7 types-python-dateutil==2.8.19.13 types-requests==2.31.0.1 httpretty==1.1.4 +pytest-xdist[psutil]==3.5.0 diff --git a/engine/settings/ci-test.py b/engine/settings/ci-test.py index eba6152511..a580f420f0 100644 --- a/engine/settings/ci-test.py +++ b/engine/settings/ci-test.py @@ -25,13 +25,12 @@ elif BROKER_TYPE == BrokerTypes.REDIS: CELERY_BROKER_URL = REDIS_URI -# use redis as cache and celery broker on CI tests -if BROKER_TYPE != BrokerTypes.REDIS: - CACHES = { - "default": { - "BACKEND": "django.core.cache.backends.locmem.LocMemCache", - } +# always use in-memory cache for testing.. this makes things alot easier wrt pytest-xdist (parallel test execution) +CACHES = { + "default": { + "BACKEND": "django.core.cache.backends.locmem.LocMemCache", } +} # Dummy Telegram token (fake one) TELEGRAM_TOKEN = "0000000000:XXXXXXXXXXXXXXXXXXXXXXXXXXXX-XXXXXX" diff --git a/engine/tox.ini b/engine/tox.ini index 7f458ba2eb..a9e9863ee8 100644 --- a/engine/tox.ini +++ b/engine/tox.ini @@ -10,6 +10,7 @@ banned-modules = [pytest] # https://pytest-django.readthedocs.io/en/latest/configuring_django.html#order-of-choosing-settings # https://pytest-django.readthedocs.io/en/latest/database.html -addopts = --no-migrations --color=yes --showlocals +# dist=load = "load balance by sending any pending test to any available environment" +addopts = -n auto --dist=load --no-migrations --color=yes --showlocals # https://pytest-django.readthedocs.io/en/latest/faq.html#my-tests-are-not-being-found-why python_files = tests.py test_*.py *_tests.py