From 8dd5758f3cc4e7f6f85c3ee175a8caab6a84f2b8 Mon Sep 17 00:00:00 2001 From: harryob Date: Fri, 13 Jan 2023 09:10:24 +0000 Subject: [PATCH] modernizes unit tests from tg (#2163) --- .github/workflows/ci_suite.yml | 32 +++ .github/workflows/run_unit_tests.yml | 45 ++++ .vscode/extensions.json | 3 +- ColonialMarinesALPHA.dme | 19 +- code/__DEFINES/tests.dm | 15 -- code/__DEFINES/typecheck/generic_types.dm | 1 + code/__DEFINES/unit_tests.dm | 20 ++ code/__HELPERS/logging.dm | 14 +- code/_compile_options.dm | 6 + code/_globalvars/lists/mapping_globals.dm | 2 + code/controllers/subsystem/atoms.dm | 4 + code/game/area/Sulaco.dm | 1 - code/game/area/admin_level.dm | 4 + code/game/area/almayer.dm | 1 - code/game/area/space_station_13_areas.dm | 1 - .../objects/effects/landmarks/landmarks.dm | 15 ++ code/game/objects/effects/spawners/random.dm | 4 +- code/game/runtimes.dm | 5 + code/game/world.dm | 67 ++++-- code/modules/admin/admin_verbs.dm | 3 - code/modules/admin/verbs/construct_env.dm | 141 ----------- code/modules/cm_tech/implements/railgun.dm | 4 + .../mob/living/carbon/xenomorph/Xenomorph.dm | 4 +- code/modules/mob/living/living_defines.dm | 5 + code/modules/test/README.md | 145 ------------ .../test/maps/test_no_blocked_doors.dm | 28 --- .../test/maps/test_no_blocked_windows.dm | 28 --- .../modules/test/maps/test_no_space_inside.dm | 28 --- .../modules/test/maps/test_pipes_connected.dm | 57 ----- code/modules/test/maps/test_set_maps.dm | 2 - code/modules/test/run_all_tests.bat | 2 - code/modules/test/run_map_tests.bat | 2 - .../modules/test/sprites/test_hair_sprites.dm | 16 -- .../test/sprites/test_plating_sprites.dm | 21 -- code/modules/test/sprites/test_set_sprites.dm | 2 - code/modules/test/test_case.dm | 73 ------ code/modules/test/test_manager.dm | 156 ------------ code/modules/test/test_verbs.dm | 58 ----- .../testenv/environments/example/envstring.dm | 44 ---- .../test/testenv/environments/tank_gallery.dm | 78 ------ code/modules/test/testenv/test_area.dm | 5 - code/modules/test/testenv/test_environment.dm | 144 ------------ code/modules/unit_tests/README.md | 76 ++++++ code/modules/unit_tests/_unit_tests.dm | 90 +++++++ code/modules/unit_tests/focus_only_tests.dm | 22 ++ code/modules/unit_tests/resist.dm | 15 ++ code/modules/unit_tests/spawn_humans.dm | 7 + code/modules/unit_tests/spritesheets.dm | 11 + code/modules/unit_tests/subsystem_init.dm | 14 ++ .../modules/unit_tests/tgui_create_message.dm | 28 +++ code/modules/unit_tests/timer_sanity.dm | 3 + code/modules/unit_tests/unit_test.dm | 222 ++++++++++++++++++ dependencies.sh | 2 +- maps/templates/space.dmm | 8 + maps/templates/space.json | 6 + maps/templates/unit_tests.dmm | 79 +++++++ tools/ci/check_required_commits.sh | 7 + tools/ci/run_server.sh | 13 +- tools/deploy.sh | 8 +- 59 files changed, 817 insertions(+), 1099 deletions(-) create mode 100644 .github/workflows/run_unit_tests.yml delete mode 100644 code/__DEFINES/tests.dm create mode 100644 code/__DEFINES/unit_tests.dm delete mode 100644 code/modules/test/README.md delete mode 100644 code/modules/test/maps/test_no_blocked_doors.dm delete mode 100644 code/modules/test/maps/test_no_blocked_windows.dm delete mode 100644 code/modules/test/maps/test_no_space_inside.dm delete mode 100644 code/modules/test/maps/test_pipes_connected.dm delete mode 100644 code/modules/test/maps/test_set_maps.dm delete mode 100644 code/modules/test/run_all_tests.bat delete mode 100644 code/modules/test/run_map_tests.bat delete mode 100644 code/modules/test/sprites/test_hair_sprites.dm delete mode 100644 code/modules/test/sprites/test_plating_sprites.dm delete mode 100644 code/modules/test/sprites/test_set_sprites.dm delete mode 100644 code/modules/test/test_case.dm delete mode 100644 code/modules/test/test_manager.dm delete mode 100644 code/modules/test/test_verbs.dm delete mode 100644 code/modules/test/testenv/environments/example/envstring.dm delete mode 100644 code/modules/test/testenv/environments/tank_gallery.dm delete mode 100644 code/modules/test/testenv/test_area.dm delete mode 100644 code/modules/test/testenv/test_environment.dm create mode 100644 code/modules/unit_tests/README.md create mode 100644 code/modules/unit_tests/_unit_tests.dm create mode 100644 code/modules/unit_tests/focus_only_tests.dm create mode 100644 code/modules/unit_tests/resist.dm create mode 100644 code/modules/unit_tests/spawn_humans.dm create mode 100644 code/modules/unit_tests/spritesheets.dm create mode 100644 code/modules/unit_tests/subsystem_init.dm create mode 100644 code/modules/unit_tests/tgui_create_message.dm create mode 100644 code/modules/unit_tests/timer_sanity.dm create mode 100644 code/modules/unit_tests/unit_test.dm create mode 100644 maps/templates/space.dmm create mode 100644 maps/templates/space.json create mode 100644 maps/templates/unit_tests.dmm create mode 100644 tools/ci/check_required_commits.sh diff --git a/.github/workflows/ci_suite.yml b/.github/workflows/ci_suite.yml index fdfa90c85e1f..4a4901b26b7f 100644 --- a/.github/workflows/ci_suite.yml +++ b/.github/workflows/ci_suite.yml @@ -67,3 +67,35 @@ jobs: source $HOME/BYOND/byond/bin/byondsetup tools/build/build --ci dm -DCIBUILDING -DCITESTING -DALL_MAPS + find_all_maps: + if: "!contains(github.event.head_commit.message, '[ci skip]')" + name: Find Maps to Test + runs-on: ubuntu-20.04 + outputs: + maps: ${{ steps.map_finder.outputs.maps }} + concurrency: + group: find_all_maps-${{ github.ref }} + cancel-in-progress: true + steps: + - uses: actions/checkout@v3 + - name: Find Maps + id: map_finder + run: | + echo "$(ls -mw0 maps/*.json)" > maps_output.txt + sed -i -e s+maps/+\"+g -e s+.json+\"+g maps_output.txt + echo "Maps: $(cat maps_output.txt)" + echo "maps={\"paths\":[$(cat maps_output.txt)]}" >> $GITHUB_OUTPUT + run_all_tests: + if: "!contains(github.event.head_commit.message, '[ci skip]')" + name: Unit Tests + needs: [find_all_maps] + strategy: + fail-fast: false + matrix: + map: ${{ fromJSON(needs.find_all_maps.outputs.maps).paths }} + concurrency: + group: run_all_tests-${{ github.ref }}-${{ matrix.map }} + cancel-in-progress: true + uses: ./.github/workflows/run_unit_tests.yml + with: + map: ${{ matrix.map }} diff --git a/.github/workflows/run_unit_tests.yml b/.github/workflows/run_unit_tests.yml new file mode 100644 index 000000000000..6e2b2188eb52 --- /dev/null +++ b/.github/workflows/run_unit_tests.yml @@ -0,0 +1,45 @@ +# This is a reusable workflow to run unit tests on a single map. +# This is run for every single map in ci_suite.yml. You might want to edit that instead. +name: Run Unit Tests +on: + workflow_call: + inputs: + map: + required: true + type: string + major: + required: false + type: string + minor: + required: false + type: string +jobs: + run_unit_tests: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v3 + - name: Restore BYOND cache + uses: actions/cache@v3 + with: + path: ~/BYOND + key: ${{ runner.os }}-byond-${{ secrets.CACHE_PURGE_KEY }} + - name: Install rust-g + run: | + sudo dpkg --add-architecture i386 + sudo apt update || true + sudo apt install -o APT::Immediate-Configure=false libssl1.1:i386 + bash tools/ci/install_rust_g.sh + - name: Configure version + run: | + echo "BYOND_MAJOR=${{ inputs.major }}" >> $GITHUB_ENV + echo "BYOND_MINOR=${{ inputs.minor }}" >> $GITHUB_ENV + if: ${{ inputs.major }} + - name: Compile Tests + run: | + bash tools/ci/install_byond.sh + source $HOME/BYOND/byond/bin/byondsetup + tools/build/build --ci dm -DCIBUILDING -DANSICOLORS + - name: Run Tests + run: | + source $HOME/BYOND/byond/bin/byondsetup + bash tools/ci/run_server.sh ${{ inputs.map }} diff --git a/.vscode/extensions.json b/.vscode/extensions.json index bb1b817c2971..f836f1244940 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -6,6 +6,7 @@ "EditorConfig.EditorConfig", "anturk.dmi-editor", "dbaeumer.vscode-eslint", - "esbenp.prettier-vscode" + "esbenp.prettier-vscode", + "donkie.vscode-tgstation-test-adapter" ] } diff --git a/ColonialMarinesALPHA.dme b/ColonialMarinesALPHA.dme index 9093b576fc59..cb11ff63d875 100644 --- a/ColonialMarinesALPHA.dme +++ b/ColonialMarinesALPHA.dme @@ -6,6 +6,7 @@ // BEGIN_FILE_DIR #define FILE_DIR . // END_FILE_DIR + // BEGIN_PREFERENCES #define DEBUG // END_PREFERENCES @@ -89,13 +90,13 @@ #include "code\__DEFINES\subsystems.dm" #include "code\__DEFINES\surgery.dm" #include "code\__DEFINES\techtree.dm" -#include "code\__DEFINES\tests.dm" #include "code\__DEFINES\text.dm" #include "code\__DEFINES\tgs.config.dm" #include "code\__DEFINES\tgs.dm" #include "code\__DEFINES\tgui.dm" #include "code\__DEFINES\traits.dm" #include "code\__DEFINES\turf_flags.dm" +#include "code\__DEFINES\unit_tests.dm" #include "code\__DEFINES\urls.dm" #include "code\__DEFINES\vehicle.dm" #include "code\__DEFINES\vendors.dm" @@ -2071,21 +2072,6 @@ #include "code\modules\teleporters\teleporter_admin_verbs.dm" #include "code\modules\teleporters\teleporter_console.dm" #include "code\modules\teleporters\teleporter_landmarks.dm" -#include "code\modules\test\test_case.dm" -#include "code\modules\test\test_manager.dm" -#include "code\modules\test\test_verbs.dm" -#include "code\modules\test\maps\test_no_blocked_doors.dm" -#include "code\modules\test\maps\test_no_blocked_windows.dm" -#include "code\modules\test\maps\test_no_space_inside.dm" -#include "code\modules\test\maps\test_pipes_connected.dm" -#include "code\modules\test\maps\test_set_maps.dm" -#include "code\modules\test\sprites\test_hair_sprites.dm" -#include "code\modules\test\sprites\test_plating_sprites.dm" -#include "code\modules\test\sprites\test_set_sprites.dm" -#include "code\modules\test\testenv\test_area.dm" -#include "code\modules\test\testenv\test_environment.dm" -#include "code\modules\test\testenv\environments\tank_gallery.dm" -#include "code\modules\test\testenv\environments\example\envstring.dm" #include "code\modules\tgchat\cm_shims.dm" #include "code\modules\tgchat\message.dm" #include "code\modules\tgchat\to_chat.dm" @@ -2129,6 +2115,7 @@ #include "code\modules\tgui_panel\external.dm" #include "code\modules\tgui_panel\telemetry.dm" #include "code\modules\tgui_panel\tgui_panel.dm" +#include "code\modules\unit_tests\_unit_tests.dm" #include "code\modules\vehicles\cargo_train.dm" #include "code\modules\vehicles\powerloader.dm" #include "code\modules\vehicles\train.dm" diff --git a/code/__DEFINES/tests.dm b/code/__DEFINES/tests.dm deleted file mode 100644 index fa8ea9dc12e6..000000000000 --- a/code/__DEFINES/tests.dm +++ /dev/null @@ -1,15 +0,0 @@ -#define LOG_TEST(X) world.log << "TEST: " + X - -#define TEST_PASS 1 -#define TEST_FAIL 2 -#define TEST_ERROR 3 - -#define TEST_SET_NAME "setdatum" -#define TEST_ASSERTION_FAIL "testcasefail" - -// Be very careful editing this. The CI test script will grep for this specific string -#define TEST_HOST_SUCCESS "ALL TESTS SUCCEEDED" - -// Bitflags for making certain datums exempt from certain tests -#define MAP_TEST_EXEMPTION_SPACE 1 -#define MAP_TEST_EXEMPTION_APC 2 diff --git a/code/__DEFINES/typecheck/generic_types.dm b/code/__DEFINES/typecheck/generic_types.dm index 423b8a370614..65275abefb4c 100644 --- a/code/__DEFINES/typecheck/generic_types.dm +++ b/code/__DEFINES/typecheck/generic_types.dm @@ -10,3 +10,4 @@ #define issurface(X) (istype(X, /obj/structure/surface)) #define ismovableatom(A) (ismovable(A)) #define isatom(A) (isloc(A)) +#define isfloorturf(A) (istype(A, /turf/open/floor)) diff --git a/code/__DEFINES/unit_tests.dm b/code/__DEFINES/unit_tests.dm new file mode 100644 index 000000000000..92aee0ee8f40 --- /dev/null +++ b/code/__DEFINES/unit_tests.dm @@ -0,0 +1,20 @@ +/// Are tests enabled with no focus? +/// Use this when performing test assertions outside of a unit test, +/// since a focused test means that you're trying to run a test quickly. +/// If a parameter is provided, will check if the focus is on that test name. +/// For example, PERFORM_ALL_TESTS(log_mapping) will only run if either +/// no test is focused, or the focus is log_mapping. +#ifdef UNIT_TESTS +// Bit of a trick here, if focus isn't passed in then it'll check for /datum/unit_test/, which is never the case. +#define PERFORM_ALL_TESTS(focus...) (isnull(GLOB.focused_test) || GLOB.focused_test == /datum/unit_test/##focus) +#else +// UNLINT necessary here so that if (PERFORM_ALL_TESTS()) works +#define PERFORM_ALL_TESTS(...) UNLINT(FALSE) +#endif + +/// ASSERT(), but it only actually does anything during unit tests +#ifdef UNIT_TESTS +#define TEST_ONLY_ASSERT(test, explanation) if(!(test)) {CRASH(explanation)} +#else +#define TEST_ONLY_ASSERT(test, explanation) +#endif diff --git a/code/__HELPERS/logging.dm b/code/__HELPERS/logging.dm index ffc12f6150a0..9df3a32999ba 100644 --- a/code/__HELPERS/logging.dm +++ b/code/__HELPERS/logging.dm @@ -3,6 +3,10 @@ #define SEND_SOUND(target, sound) DIRECT_OUTPUT(target, sound) #define WRITE_FILE(file, text) DIRECT_OUTPUT(file, text) +//This is an external call, "true" and "false" are how rust parses out booleans +#define WRITE_LOG(log, text) rustg_log_write(log, text, "true") +#define WRITE_LOG_NO_FORMAT(log, text) rustg_log_write(log, text, "false") + //print an error message to world.log @@ -12,7 +16,6 @@ /var/global/log_end= world.system_type == UNIX ? ascii2text(13) : "" - /proc/error(msg) world.log << "## ERROR: [msg][log_end]" GLOB.STUI.debug.Add("\[[time_stamp()]]DEBUG: [msg]") @@ -201,9 +204,6 @@ GLOB.STUI.tgui.Add("\[[time_stamp()]]TGUI: [entry]") GLOB.STUI.processing |= STUI_LOG_TGUI -//wrapper macros for easier grepping -#define WRITE_LOG(log, text) rustg_log_write(log, text, "true") - GLOBAL_VAR(config_error_log) GLOBAL_PROTECT(config_error_log) @@ -214,3 +214,9 @@ GLOBAL_PROTECT(config_error_log) /proc/log_admin_private(text) log_admin(text) + +#if defined(UNIT_TESTS) || defined(SPACEMAN_DMM) +/proc/log_test(text) + WRITE_LOG(GLOB.test_log, text) + SEND_TEXT(world.log, text) +#endif diff --git a/code/_compile_options.dm b/code/_compile_options.dm index 38d293874119..23aacf1f5f5e 100644 --- a/code/_compile_options.dm +++ b/code/_compile_options.dm @@ -6,3 +6,9 @@ // If this is uncommented, will attempt to load prof.dll (windows) or libprof.so (unix) // byond-tracy is not shipped with CM code. Build it yourself here: https://github.com/mafemergency/byond-tracy/ //#define BYOND_TRACY + +#ifdef CIBUILDING +#define UNIT_TESTS +#endif + +//#define UNIT_TESTS //If this is uncommented, we do a single run though of the game setup and tear down process with unit tests in between diff --git a/code/_globalvars/lists/mapping_globals.dm b/code/_globalvars/lists/mapping_globals.dm index 095720b8e9bc..ca12912dce0b 100644 --- a/code/_globalvars/lists/mapping_globals.dm +++ b/code/_globalvars/lists/mapping_globals.dm @@ -63,3 +63,5 @@ GLOBAL_LIST_EMPTY_TYPED(objective_landmarks_science, /obj/effect/landmark/object GLOBAL_LIST_EMPTY(comm_tower_landmarks_net_one) GLOBAL_LIST_EMPTY(comm_tower_landmarks_net_two) + +GLOBAL_LIST_EMPTY(landmarks_list) //list of all landmarks created diff --git a/code/controllers/subsystem/atoms.dm b/code/controllers/subsystem/atoms.dm index d8870c3d8cee..86fc43537712 100644 --- a/code/controllers/subsystem/atoms.dm +++ b/code/controllers/subsystem/atoms.dm @@ -69,12 +69,16 @@ SUBSYSTEM_DEF(atoms) BadInitializeCalls[the_type] |= BAD_INIT_QDEL_BEFORE return TRUE + #ifdef UNIT_TESTS var/start_tick = world.time + #endif var/result = A.Initialize(arglist(arguments)) + #ifdef UNIT_TESTS if(start_tick != world.time) BadInitializeCalls[the_type] |= BAD_INIT_SLEPT + #endif var/qdeleted = FALSE diff --git a/code/game/area/Sulaco.dm b/code/game/area/Sulaco.dm index 3ecba8a7e43e..33b8511d7b87 100644 --- a/code/game/area/Sulaco.dm +++ b/code/game/area/Sulaco.dm @@ -2,7 +2,6 @@ /area/shuttle ceiling = CEILING_METAL requires_power = 0 - test_exemptions = MAP_TEST_EXEMPTION_SPACE ambience_exterior = AMBIENCE_ALMAYER ceiling_muffle = FALSE diff --git a/code/game/area/admin_level.dm b/code/game/area/admin_level.dm index 890575bf9995..e2128223d320 100644 --- a/code/game/area/admin_level.dm +++ b/code/game/area/admin_level.dm @@ -124,3 +124,7 @@ icon_state = "green" requires_power = 0 flags_area = AREA_NOTUNNEL + +/area/misc/testroom + requires_power = FALSE + name = "Test Room" diff --git a/code/game/area/almayer.dm b/code/game/area/almayer.dm index 151c1fbdcef7..a5261bcd93fb 100644 --- a/code/game/area/almayer.dm +++ b/code/game/area/almayer.dm @@ -729,7 +729,6 @@ /area/almayer/evacuation/pod18 /area/almayer/evacuation/stranded - test_exemptions = MAP_TEST_EXEMPTION_SPACE //Placeholder. /area/almayer/evacuation/stranded/pod1 diff --git a/code/game/area/space_station_13_areas.dm b/code/game/area/space_station_13_areas.dm index ef1d80ae4e9f..1d49735c1c2f 100644 --- a/code/game/area/space_station_13_areas.dm +++ b/code/game/area/space_station_13_areas.dm @@ -25,7 +25,6 @@ NOTE: there are two lists of areas in the end of this file: centcom and station temperature = TCMB pressure = 0 flags_area = AREA_NOTUNNEL - test_exemptions = MAP_TEST_EXEMPTION_SPACE weather_enabled = FALSE /area/engine diff --git a/code/game/objects/effects/landmarks/landmarks.dm b/code/game/objects/effects/landmarks/landmarks.dm index e37ef24cd2e1..65f64e7cb066 100644 --- a/code/game/objects/effects/landmarks/landmarks.dm +++ b/code/game/objects/effects/landmarks/landmarks.dm @@ -15,8 +15,13 @@ /obj/effect/landmark/Initialize(mapload, ...) . = ..() + GLOB.landmarks_list += src invisibility = invisibility_value +/obj/effect/landmark/Destroy() + GLOB.landmarks_list -= src + return ..() + /obj/effect/landmark/newplayer_start name = "New player start" @@ -451,3 +456,13 @@ /obj/effect/landmark/zombie/infinite infinite_spawns = TRUE + +/// Marks the bottom left of the testing zone. +/// In landmarks.dm and not unit_test.dm so it is always active in the mapping tools. +/obj/effect/landmark/unit_test_bottom_left + name = "unit test zone bottom left" + +/// Marks the top right of the testing zone. +/// In landmarks.dm and not unit_test.dm so it is always active in the mapping tools. +/obj/effect/landmark/unit_test_top_right + name = "unit test zone top right" diff --git a/code/game/objects/effects/spawners/random.dm b/code/game/objects/effects/spawners/random.dm index 46b6363054c5..1306a4669efe 100644 --- a/code/game/objects/effects/spawners/random.dm +++ b/code/game/objects/effects/spawners/random.dm @@ -31,6 +31,8 @@ // creates the random item /obj/effect/spawner/random/proc/spawn_item() var/build_path = item_to_spawn() + if(isnull(build_path)) + return return (new build_path(src.loc)) @@ -142,7 +144,7 @@ desc = "This is a random kit." icon_state = "random_kit" -/obj/effect/spawner/random/toy/item_to_spawn() +/obj/effect/spawner/random/supply_kit/item_to_spawn() return pick(prob(3);/obj/item/storage/box/kit/pursuit,\ prob(3);/obj/item/storage/box/kit/mini_intel,\ prob(3);/obj/item/storage/box/kit/mini_jtac,\ diff --git a/code/game/runtimes.dm b/code/game/runtimes.dm index fd5966cac023..ab268c3bf943 100644 --- a/code/game/runtimes.dm +++ b/code/game/runtimes.dm @@ -12,7 +12,12 @@ GLOBAL_REAL(runtime_hashes, /list) GLOBAL_REAL(early_init_runtimes, /list) GLOBAL_REAL_VAR(early_init_runtimes_count) +GLOBAL_VAR_INIT(total_runtimes, GLOB.total_runtimes || 0) +GLOBAL_VAR_INIT(total_runtimes_skipped, 0) + /world/Error(var/exception/E) + GLOB.total_runtimes++ + ..() if(!runtime_hashes) runtime_hashes = list() diff --git a/code/game/world.dm b/code/game/world.dm index 2b8ab8221f71..c0ce80db1579 100644 --- a/code/game/world.dm +++ b/code/game/world.dm @@ -50,6 +50,10 @@ var/list/reboot_sfx = file2list("config/reboot_sfx.txt") if(CONFIG_GET(flag/log_runtime)) log = file("data/logs/runtime/[time2text(world.realtime,"YYYY-MM-DD-(hh-mm-ss)")]-runtime.log") + #ifdef UNIT_TESTS + GLOB.test_log = "data/logs/tests.log" + #endif + load_admins() jobban_loadbanfile() LoadBans() @@ -67,6 +71,9 @@ var/list/reboot_sfx = file2list("config/reboot_sfx.txt") var/testing_locally = (world.params && world.params["local_test"]) var/running_tests = (world.params && world.params["run_tests"]) + #ifdef UNIT_TESTS + running_tests = TRUE + #endif // Only do offline sleeping when the server isn't running unit tests or hosting a local dev test sleep_offline = (!running_tests && !testing_locally) @@ -80,6 +87,11 @@ var/list/reboot_sfx = file2list("config/reboot_sfx.txt") GLOB.timezoneOffset = text2num(time2text(0,"hh")) * 36000 Master.Initialize(10, FALSE, TRUE) + + #ifdef UNIT_TESTS + HandleTestRun() + #endif + update_status() //Scramble the coords obsfucator @@ -90,29 +102,11 @@ var/list/reboot_sfx = file2list("config/reboot_sfx.txt") if(CONFIG_GET(flag/ToRban)) ToRban_autoupdate() - // Allow the test manager to run all unit tests if this is being hosted just to run unit tests - if(running_tests) - test_executor.host_tests() - // If the server's configured for local testing, get everything set up ASAP. // Shamelessly stolen from the test manager's host_tests() proc if(testing_locally) master_mode = "extended" - // If a test environment was specified, initialize it - if(fexists("test_environment.txt")) - var/test_environment = file2text("test_environment.txt") - - var/env_type = null - for(var/type in subtypesof(/datum/test_environment)) - if("[type]" == test_environment) - env_type = type - break - - if(env_type) - var/datum/test_environment/env = new env_type() - env.initialize() - // Wait for the game ticker to initialize while(!SSticker.initialized) sleep(10) @@ -211,6 +205,11 @@ var/world_topic_spam_protect_time = world.timeofday if(server) //if you set a server location in config.txt, it sends you there instead of trying to reconnect to the same world address. -- NeoFite C << link("byond://[server]") + #ifdef UNIT_TESTS + FinishTestRun() + return + #endif + if(TgsAvailable()) send_tgs_restart() @@ -352,3 +351,35 @@ var/datum/BSQL_Connection/connection var/init = LIBCALL(lib, "init")() if("0" != init) CRASH("[lib] init error: [init]") + +/world/proc/HandleTestRun() + //trigger things to run the whole process + Master.sleep_offline_after_initializations = FALSE + SSticker.request_start() + CONFIG_SET(number/round_end_countdown, 0) + var/datum/callback/cb +#ifdef UNIT_TESTS + cb = CALLBACK(GLOBAL_PROC, GLOBAL_PROC_REF(RunUnitTests)) +#else + cb = VARSET_CALLBACK(SSticker, force_ending, TRUE) +#endif + SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, GLOBAL_PROC_REF(_addtimer), cb, 10 SECONDS)) + +/world/proc/FinishTestRun() + set waitfor = FALSE + var/list/fail_reasons + if(GLOB) + if(GLOB.total_runtimes != 0) + fail_reasons = list("Total runtimes: [GLOB.total_runtimes]") +#ifdef UNIT_TESTS + if(GLOB.failed_any_test) + LAZYADD(fail_reasons, "Unit Tests failed!") +#endif + else + fail_reasons = list("Missing GLOB!") + if(!fail_reasons) + text2file("Success!", "data/logs/ci/clean_run.lk") + else + log_world("Test run failed!\n[fail_reasons.Join("\n")]") + sleep(0) //yes, 0, this'll let Reboot finish and prevent byond memes + qdel(src) //shut it down diff --git a/code/modules/admin/admin_verbs.dm b/code/modules/admin/admin_verbs.dm index 34f121e877de..608248d6f6e0 100644 --- a/code/modules/admin/admin_verbs.dm +++ b/code/modules/admin/admin_verbs.dm @@ -178,9 +178,6 @@ var/list/admin_verbs_debug = list( /client/proc/togglenichelogs, /client/proc/cmd_admin_change_hivenumber, /client/proc/spawn_wave, - /client/proc/run_all_tests, - /client/proc/run_test_set, - /client/proc/run_individual_test, /client/proc/toggle_log_hrefs, /client/proc/matrix_editor, /client/proc/generate_sound_queues, diff --git a/code/modules/admin/verbs/construct_env.dm b/code/modules/admin/verbs/construct_env.dm index 7f579f1fc428..8c14a76428fa 100644 --- a/code/modules/admin/verbs/construct_env.dm +++ b/code/modules/admin/verbs/construct_env.dm @@ -106,144 +106,3 @@ catch (var/ex) to_chat(src, SPAN_NOTICE("Encountered an error whilst constructing the map! [ex]")) message_staff("[key_name_admin(usr)] failed to construct the DMM file.") - - - -/client/proc/construct_env() - set category = "Admin.Events" - set name = "Construct Environment" - - if(!admin_holder || !(admin_holder.rights & R_ADMIN)) - to_chat(usr, "Only administrators may use this command.") - return - - var/input = input(usr, "Enter an environment script.", "Construct Environment") as message|null - if(!input) - return - - var/list/env_script = splittext(input, "===") - if(length(env_script) != 2) - to_chat(usr, SPAN_WARNING("Invalid environment script! The script should contain an environment string and assignments, split by ===")) - return - - // A list of errors that were encountered during script parsing - var/list/errors = null - - var/env_string = trim(env_script[1]) - var/list/assignment_lines = splittext(env_script[2], "\n") - var/list/env_assignments = null - - var/regex/dir_regex = new("\\{(N|S|W|E)\\}$") - - for(var/line in assignment_lines) - // Ignore empty lines - if(!length(line)) - continue - - // Check that the line contains exactly two parts, split by a = - var/list/assignment = splittext(line, "=") - if(length(assignment) != 2) - LAZYADD(errors, "[line]: invalid assignment") - continue - - // Check that the key/character is just 1 character long - var/assignment_key = assignment[1] - if(length(assignment_key) != 1) - LAZYADD(errors, "[line]: assignment key must be one character") - continue - - // Check that the key isn't a prefab/reserved - if(assignment_key in test_env_prefab_types) - LAZYADD(errors, "[line]: assignment key '[assignment_key]' is reserved") - continue - - // Check that the assignment value(s) can be converted to a typepath (and dir) - var/assignments = null - var/list/types_to_spawn = splittext(assignment[2], ",") - if(!types_to_spawn) - LAZYADD(errors, "[line]: found no types to assign key to") - continue - - for(var/path in types_to_spawn) - var/text = trim(path) - - var/dir_to_use = null - if(dir_regex.Find(text)) - switch(dir_regex.group[1]) - if("N") - dir_to_use = NORTH - if("S") - dir_to_use = SOUTH - if("W") - dir_to_use = WEST - if("E") - dir_to_use = EAST - - if(dir_to_use) - text = copytext(text, 1, length(text)-2) - - var/type = text2path(text) - if(!type) - LAZYADD(errors, "[line]: couldn't convert all assignment values to typepaths") - continue - - LAZYSET(assignments, type, dir_to_use) - - // Check that the key hasn't been assigned a value already - var/key_assignment = LAZYACCESS(env_assignments, assignment_key) - if(key_assignment) - LAZYADD(errors, "[line]: assignment key '[assignment_key]' has already been assigned to [key_assignment]") - continue - - // Assign the key/character to a typepath - LAZYSET(env_assignments, assignment_key, assignments) - - // Check that all custom characters in the env string were assigned to a type - for(var/line in splittext(env_string, "\n")) - for(var/char in splittext(line, regex("."), 1, 0, TRUE)) - if(!char) - continue - - // Prefab characters don't count - if(char in test_env_prefab_types || char == "\n") - continue - - // If the key was assigned to a - if(LAZYACCESS(env_assignments, char)) - continue - - LAZYADD(errors, "custom symbol '[char]' was not assigned a typepath") - - // Give up if there were any errors - if(errors) - to_chat(usr, SPAN_WARNING("The environment script could not be executed due to the following errors:")) - for(var/message in errors) - to_chat(usr, message) - return - - var/datum/test_environment/env = new() - - switch(alert("Where would you like to construct the environment?", "Choose Position", "Here", "Custom")) - if("Here") - env.x = usr.x - env.y = usr.y - env.z = usr.z - if("Custom") - var/x = tgui_input_number(usr, "Enter X position", "Environment Position", 0, world.maxx, 0) - if(!x) - return - var/y = tgui_input_number(usr, "Enter Y position", "Environment Position", 0, world.maxy, 0) - if(!y) - return - var/z = tgui_input_number(usr, "Enter Z position", "Environment Position", 0, world.maxz, 0) - if(!z) - return - env.x = x - env.y = y - env.z = z - - env.environment_string = env_string - env.environment_assignments = env_assignments - - to_chat(src, SPAN_NOTICE("Constructing environment...")) - env.initialize() diff --git a/code/modules/cm_tech/implements/railgun.dm b/code/modules/cm_tech/implements/railgun.dm index 31e51c6f4352..8b8faf81908a 100644 --- a/code/modules/cm_tech/implements/railgun.dm +++ b/code/modules/cm_tech/implements/railgun.dm @@ -48,6 +48,10 @@ GLOBAL_DATUM(railgun_eye_location, /datum/coords) /obj/structure/machinery/computer/railgun/Initialize() . = ..() + + if(is_admin_level(SSmapping.ground_start) || is_mainship_level(SSmapping.ground_start)) + return + if(!GLOB.railgun_eye_location) stack_trace("Railgun eye location is not initialised! There is no landmark for it on [SSmapping.configs[GROUND_MAP].map_name]") return INITIALIZE_HINT_QDEL diff --git a/code/modules/mob/living/carbon/xenomorph/Xenomorph.dm b/code/modules/mob/living/carbon/xenomorph/Xenomorph.dm index ff8e47a97947..ef9a7809deb1 100644 --- a/code/modules/mob/living/carbon/xenomorph/Xenomorph.dm +++ b/code/modules/mob/living/carbon/xenomorph/Xenomorph.dm @@ -70,8 +70,8 @@ var/datum/caste_datum/caste // Used to extract determine ALL Xeno stats. var/speaking_key = "x" var/speaking_noise = "alien_talk" - var/slash_verb = "slash" - var/slashes_verb = "slashes" + slash_verb = "slash" + slashes_verb = "slashes" var/slash_sound = "alien_claw_flesh" health = 5 maxHealth = 5 diff --git a/code/modules/mob/living/living_defines.dm b/code/modules/mob/living/living_defines.dm index f9bd16788011..51976509e138 100644 --- a/code/modules/mob/living/living_defines.dm +++ b/code/modules/mob/living/living_defines.dm @@ -93,7 +93,12 @@ var/current_weather_effect_type + + var/slash_verb = "attack" + var/slashes_verb = "attacks" + ///what icon the mob uses for speechbubbles var/bubble_icon = "default" var/bubble_icon_x_offset = 0 var/bubble_icon_y_offset = 0 + diff --git a/code/modules/test/README.md b/code/modules/test/README.md deleted file mode 100644 index 4f340b72a910..000000000000 --- a/code/modules/test/README.md +++ /dev/null @@ -1,145 +0,0 @@ -# Organizing tests -Test cases should be organized in test sets, which are named collections of individual tests. Test results belonging to the same set are grouped together during test execution. Test sets may also be run individually from the debug menu. - -To define a test set: -1. Make a new folder for your test set in `test/`, e.g. `test/mob_death/` -2. Make a new file for your test set definition, e.g. `test/mob_death/test_set_mob_death.dm` -4. Define the test set: -``` -/datum/test_case/mob_death - test_set = "Mob death" -``` -**NOTE**: You should not set the `name` variable for the datum defining a test set. - -# Writing test cases -Define test cases like this, preferably in their own files, e.g. `test/TESTSETNAME/TESTCASENAME.dm`: -``` -/datum/test_case/TESTSETNAME/TESTCASENAME - name = "description of what the test case is testing" - -// Run the test in here -// A *falsy return* from this is interpreted as the test *passing* -// Remember: not returning anything is a falsy return by default -/datum/test_case/TESTSETNAME/TESTCASENAME/test() - ... - -// This is run before test() -// Set up stuff you need in the test here -// This could be spawning mobs, items, etc. -/datum/test_case/TESTSETNAME/TESTCASENAME/setUp() - ... -``` - -`test()` should be a short proc that checks if the program behaved correctly, which can be done by using one or more of the following assertions: - -``` -// Assert that a statement evaluates to true -/datum/test_case/proc/assertTrue(var/statement, var/fail_msg) - -// Assert that a statement evaluates to false -/datum/test_case/proc/assertFalse(var/statement, var/fail_msg) - -// Assert that a equals b -/datum/test_case/proc/assertEquals(var/a, var/b, var/fail_msg) - -// Assert that a > b -/datum/test_case/proc/assertGt(var/a, var/b, var/fail_msg) - -// Assert that a >= b -/datum/test_case/proc/assertGtEq(var/a, var/b, var/fail_msg) - -// Assert that a < b -/datum/test_case/proc/assertLt(var/a, var/b, var/fail_msg) - -// Assert that a <= b -/datum/test_case/proc/assertLtEq(var/a, var/b, var/fail_msg) - -// Assert that a is in the list L -/datum/test_case/proc/assertInList(var/a, var/list/L, var/fail_msg) -``` -The `fail_msg` variable is *technically* optional, but should *always* be used. If the test fails on any assertion, `fail_msg` is output by the test framework as an explanation for why the test failed. - -If you want finer control over when the test should fail, you can use the `fail()` proc to manually fail the test at any time. -``` -// Fails the test with the given message -/datum/test_case/proc/fail(var/fail_msg) -``` - - If the test case requires setup before it runs, for example building a room, spawning a mob, etc. you can do it in the `setUp()` proc of your test case. - -# Running tests -You have a couple of options for running tests. - -There are three in-game debug verbs: -- Run All Tests - Runs *all* test sets, which in turn runs *all* test cases -- Run Test Set - Runs a specific test set, meaning all test cases belonging to the chosen test set -- Run Test Case - Runs a specific test case - -The test manager is also available through the global variable `test_executor`. Use the verbose argument for detailed logs: -- To run all test cases, call: -`test_executor.run_all_tests(var/verbose=FALSE)` - -- To run a specific test set, call: -`test_executor.run_test_set(var/test_set, var/verbose=FALSE)` -**NOTE**: You should be passing the name of the test set into this proc, not the datum defining the test set. - -- To run an individual test, call: -`test_executor.run_individual_test(var/datum/test_case/case, var/verbose=FALSE)` -**NOTE**: You should be passing a test case datum here. The test manager will already have one, but it’s perfectly safe and far easier to just create a new one. - -You can also run all test cases when the server boots up by passing the `run_tests` world parameter to Dream Daemon when starting the server. If this is done, the server will begin the round as soon as possible, run all test cases, then shut down. You can also pass the `verbose_tests` parameter for more detailed test logs. - -# Interpreting test results -The output from the test depends on whether you’re running all tests, a test set or just a single test case. Whether or not the test ran in verbose mode also affects the log output. - -When running individual test cases, `run_individual_test()` will only return a value indicating the result of the test. That is, one of `TEST_PASS`, `TEST_FAIL` or `TEST_ERROR`. These are pretty straightforward, but it should be noted that there is a distinction between a test failing and a test erroring. - -The test can only fail if an assertion in the test case fails, meaning that a test fail indicates you have syntactically valid code, but it produces the wrong behavior. If the test *errored*, it means a runtime occured while the test case was running, either in the feature code or the test case code itself. If a test fails or errors, it will always log details. In the case of a fail, the log will include basic information about what kind of assertion failed, while errors also log the full details of the runtime that occured. - -Output from a single test case looks like this (`verbose` = true): - -`Test case: Undefibbables should not process - pass` - -If you are running a test set, the test manager sums the test results and returns them in a list. The amount of any test result type can be retrieved by indexing `TEST_PASS`, `TEST_FAIL` or `TEST_ERROR` respectively. If the test set is run in verbose mode, it will log how many passes, fails and errors there were in the test set. - -Output from a test set looks something like this (`verbose` = true): - -``` -Test case: Undefibbables should not process - pass -Test case Dead mobs process reagents failed with the following message: -expected 0 >= 1 -Test case: Dead mobs process reagents - fail -... - -Finished test set: Mob death -Pass: 4 -Fail: 1 -Error: 1 -``` - -Running all tests will run every test set, sum the results from each test set, then return them in a list. The amount of any test result type can be retrieved by indexing `TEST_PASS`, `TEST_FAIL` or `TEST_ERROR` respectively. If the tests are run in verbose mode, the only additional log output is which test sets are being run. - -Output from running all tests looks something like this (`verbose` = true): -``` -Running test set: Mob death -Test case: Undefibbables should not process - pass -Test case Dead mobs process reagents failed with the following message: -expected 0 >= 1 -Test case: Dead mobs process reagents - fail -... - -Finished test set: Mob death -Pass: 4 -Fail: 1 -Error: 1 - -Running test set: Something else -... - -Finished 23 test(s) in 381.8 seconds ----------------------------------------- -Pass: 16 -Fail: 6 -Error: 1 ----------------------------------------- -``` diff --git a/code/modules/test/maps/test_no_blocked_doors.dm b/code/modules/test/maps/test_no_blocked_doors.dm deleted file mode 100644 index 24d5a39d0d05..000000000000 --- a/code/modules/test/maps/test_no_blocked_doors.dm +++ /dev/null @@ -1,28 +0,0 @@ -/datum/test_case/map/turf_blocks_doors - name = "Dense turfs shouldn't be inside doors" - -/datum/test_case/map/turf_blocks_doors/test() - // Store all the blocked doors we found - var/list/blocked_doors = list() - - for(var/area/A in all_areas) - if(!A.z) - continue - - for(var/turf/T in A) - if(!T.density) - continue - - var/obj/structure/machinery/door/D = locate() in T - if(!D) - continue - - blocked_doors += "[D] at ([T.x], [T.y], [T.z]) - in [A.name]" - - // Check that no blocked doors were found - if(blocked_doors.len) - var/fail_msg = "found [blocked_doors.len] doors containing dense turfs:\n" - for(var/location in blocked_doors) - fail_msg += "[location]\n" - - fail(fail_msg) diff --git a/code/modules/test/maps/test_no_blocked_windows.dm b/code/modules/test/maps/test_no_blocked_windows.dm deleted file mode 100644 index 266f0de60f43..000000000000 --- a/code/modules/test/maps/test_no_blocked_windows.dm +++ /dev/null @@ -1,28 +0,0 @@ -/datum/test_case/map/turf_blocks_windows - name = "Dense turfs shouldn't be inside windows" - -/datum/test_case/map/turf_blocks_doors/test() - // Store all the blocked windows we found - var/list/blocked_windows = list() - - for(var/area/A in all_areas) - if(!A.z) - continue - - for(var/turf/T in A) - if(!T.density) - continue - - var/obj/structure/window/W = locate() in T - if(!W) - continue - - blocked_windows += "[W] at ([T.x], [T.y], [T.z]) - in [A.name]" - - // Check that no blocked windows were found - if(blocked_windows.len) - var/fail_msg = "found [blocked_windows.len] windows containing dense turfs:\n" - for(var/location in blocked_windows) - fail_msg += "[location]\n" - - fail(fail_msg) diff --git a/code/modules/test/maps/test_no_space_inside.dm b/code/modules/test/maps/test_no_space_inside.dm deleted file mode 100644 index 610b8d666a70..000000000000 --- a/code/modules/test/maps/test_no_space_inside.dm +++ /dev/null @@ -1,28 +0,0 @@ -/datum/test_case/map/space_in_areas - name = "There should be no space turfs in non-space areas" - -/datum/test_case/map/space_in_areas/test() - // Store all the space turfs we found here - var/list/space_turfs = list() - - for(var/area/A in all_areas) - if(!A.z) - continue - - // Don't check areas that are exempt from the test - if(A.test_exemptions & MAP_TEST_EXEMPTION_SPACE) - continue - - for(var/turf/open/space/S in A) - // Only base space turfs - if(!(S.type == /turf/open/space)) - continue - space_turfs += "([S.x], [S.y], [S.z]) - in [A.name]" - - // Check that no space turfs were found - if(space_turfs.len) - var/fail_msg = "found [space_turfs.len] space turfs in non-space areas:\n" - for(var/location in space_turfs) - fail_msg += "[location]\n" - - fail(fail_msg) diff --git a/code/modules/test/maps/test_pipes_connected.dm b/code/modules/test/maps/test_pipes_connected.dm deleted file mode 100644 index 38920bc06527..000000000000 --- a/code/modules/test/maps/test_pipes_connected.dm +++ /dev/null @@ -1,57 +0,0 @@ -/datum/test_case/map/pipes_connected - name = "All pipes should be properly connected" - -/datum/test_case/map/pipes_connected/test() - // Store all unconnected pipes we found - var/list/improper_pipes = list() - - for(var/area/A in all_areas) - if(!A.z) - continue - - for(var/turf/T in A) - var/obj/structure/pipes/P = locate() in T - if(!P) - continue - - // Check if the pipe is intentionally a broken segment - if(findtext(P.icon_state, "exposed")) - continue - - var/fail = FALSE - var/is_special = FALSE - - if(!length(P.connected_to)) - fail = TRUE - - var/check_connections = 0 - for(var/direction in P.valid_directions) - for(var/obj/structure/pipes/target in get_step(P, direction)) - check_connections++ - break - - if(istype(P, /obj/structure/pipes/vents)) - is_special = TRUE - - if(istype(P, /obj/structure/pipes/unary)) - is_special = TRUE - - if(!is_special && check_connections != length(P.valid_directions)) - to_world("failed [check_connections] and [length(P.valid_directions)]") - fail = TRUE - - if(is_special && (check_connections < 1 || 4 < check_connections)) - fail = TRUE - - if(!fail) - continue - - improper_pipes += "([T.x], [T.y], [T.z]) - in [A.name]" - - // Check that there were no improperly connected pipes - if(improper_pipes.len) - var/fail_msg = "found [improper_pipes.len] improperly connected pipe segments:\n" - for(var/location in improper_pipes) - fail_msg += "[location]\n" - - fail(fail_msg) diff --git a/code/modules/test/maps/test_set_maps.dm b/code/modules/test/maps/test_set_maps.dm deleted file mode 100644 index be505ad009b0..000000000000 --- a/code/modules/test/maps/test_set_maps.dm +++ /dev/null @@ -1,2 +0,0 @@ -/datum/test_case/map - test_set = "Map tests" diff --git a/code/modules/test/run_all_tests.bat b/code/modules/test/run_all_tests.bat deleted file mode 100644 index 79e7fb19e64b..000000000000 --- a/code/modules/test/run_all_tests.bat +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -call DreamDaemon ../ColonialMarinesALPHA.dmb 58140 -trusted -params "run_tests=1&verbose_tests=1" diff --git a/code/modules/test/run_map_tests.bat b/code/modules/test/run_map_tests.bat deleted file mode 100644 index c319358e687a..000000000000 --- a/code/modules/test/run_map_tests.bat +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -call DreamDaemon ../ColonialMarinesALPHA.dmb 58140 -trusted -params "run_tests=1&test_set=Map tests&verbose_tests=1" diff --git a/code/modules/test/sprites/test_hair_sprites.dm b/code/modules/test/sprites/test_hair_sprites.dm deleted file mode 100644 index af20ea0eda78..000000000000 --- a/code/modules/test/sprites/test_hair_sprites.dm +++ /dev/null @@ -1,16 +0,0 @@ -/datum/test_case/sprite/hair_sprites_exist - name = "All hair and facial hair styles should have sprites" - -/datum/test_case/sprite/hair_sprites_exist/test() - var/list/failed_styles = list() - var/list/icon_state_per_file_cache = list() - for(var/datum/sprite_accessory/testing_style as anything in subtypesof(/datum/sprite_accessory/hair) + subtypesof(/datum/sprite_accessory/facial_hair)) - var/icon_file = initial(testing_style.icon) - if(!icon_state_per_file_cache["[icon_file]"]) - icon_state_per_file_cache["[icon_file]"] = icon_states(icon_file) - if(!("[initial(testing_style.icon_state)]_s" in icon_state_per_file_cache["[icon_file]"])) - failed_styles += initial(testing_style.name) - - // Check that no space turfs were found - if(length(failed_styles)) - fail("These hairstyles lack sprites: [english_list(failed_styles)]") diff --git a/code/modules/test/sprites/test_plating_sprites.dm b/code/modules/test/sprites/test_plating_sprites.dm deleted file mode 100644 index aacea7ce18b5..000000000000 --- a/code/modules/test/sprites/test_plating_sprites.dm +++ /dev/null @@ -1,21 +0,0 @@ -/datum/test_case/sprite/plating_sprites_exist - name = "All destructable open floor turfs should have plating sprites" - -/datum/test_case/sprite/plating_sprites_exist/test() - var/list/failed_icons = list() - var/list/icon_state_per_file_cache = list() - for(var/turf_type in typesof(/turf/open/floor)) - var/turf/open/floor/F = new turf_type(usr.loc) - if(F.is_grass_floor()) - continue - - var/icon_file = F.icon - if(!icon_state_per_file_cache["[icon_file]"]) - icon_state_per_file_cache["[icon_file]"] = icon_states(icon_file) - if(!("plating" in icon_state_per_file_cache["[icon_file]"])) - failed_icons += icon_file - - message_admins("These icons lack plating sprites: [english_list(failed_icons)]") - - if(length(failed_icons)) - fail("These icons lack plating sprites: [english_list(failed_icons)]") diff --git a/code/modules/test/sprites/test_set_sprites.dm b/code/modules/test/sprites/test_set_sprites.dm deleted file mode 100644 index 8c15f404513f..000000000000 --- a/code/modules/test/sprites/test_set_sprites.dm +++ /dev/null @@ -1,2 +0,0 @@ -/datum/test_case/sprite - test_set = "Sprite Tests" diff --git a/code/modules/test/test_case.dm b/code/modules/test/test_case.dm deleted file mode 100644 index ba62289bd6bd..000000000000 --- a/code/modules/test/test_case.dm +++ /dev/null @@ -1,73 +0,0 @@ -/datum/test_case - // Name of the test - var/name = TEST_SET_NAME - - // A name for the category or set this test case belongs in - var/test_set = "Tests" - - // Whether or not the test should be run - var/should_run = TRUE - -// Fails the test by throwing a special exception, ending it immediately -// @param fail_message - A message to display about why the test failed -/datum/test_case/proc/fail(var/fail_message=null) - if(!fail_message) - fail_message = "no fail message given (write fail messages for your tests!!!)" - - var/exception/fail_exception = EXCEPTION(TEST_ASSERTION_FAIL) - fail_exception.desc = fail_message - - throw fail_exception - -// Assertions -// ---------- -// -// These procs check if a given condition is satisfied. -// If it isn't, it will cancel the test and fail it. - -// Assert that a statement evaluates to true -// @param statement - The statement to evaluate -// @param fail_description - A short message describing why the test failed if it turns out to fail -/datum/test_case/proc/assertTrue(var/statement, var/fail_msg=null) - if(!statement) - if(!fail_msg) - fail_msg = "expected [statement] to be truthy (1)" - - fail(fail_msg) - -// Assert that a statement evaluates to false -/datum/test_case/proc/assertFalse(var/statement, var/fail_msg=null) - assertTrue(!statement, (fail_msg ? fail_msg : "expected [statement] to be falsy (0)")) - -// Assert that a equals b -/datum/test_case/proc/assertEquals(var/a, var/b, var/fail_msg=null) - assertTrue(a == b, (fail_msg ? fail_msg : "expected [a] == [b]")) - -// Assert that a > b -/datum/test_case/proc/assertGt(var/a, var/b, var/fail_msg=null) - assertTrue(a > b, (fail_msg ? fail_msg : "expected [a] > [b]")) - -// Assert that a >= b -/datum/test_case/proc/assertGtEq(var/a, var/b, var/fail_msg=null) - assertTrue(a >= b, (fail_msg ? fail_msg : "expected [a] >= [b]")) - -// Assert that a < b -/datum/test_case/proc/assertLt(var/a, var/b, var/fail_msg=null) - assertTrue(a < b, (fail_msg ? fail_msg : "expected [a] < [b]")) - -// Assert that a <= b -/datum/test_case/proc/assertLtEq(var/a, var/b, var/fail_msg=null) - assertTrue(a <= b, (fail_msg ? fail_msg : "expected [a] <= [b]")) - -// Assert that a is in the list L -/datum/test_case/proc/assertInList(var/a, var/list/L, var/fail_msg=null) - assertTrue(a in L, (fail_msg ? fail_msg : "expected [a] to be in list (L.len=[L.len])")) - -// Set up everything you need for your test here -/datum/test_case/proc/setUp() - return - -// Run the test in here -// A falsy return from this is interpreted as the test passing -/datum/test_case/proc/test() - return 0 diff --git a/code/modules/test/test_manager.dm b/code/modules/test/test_manager.dm deleted file mode 100644 index 7fa91ccdf7f3..000000000000 --- a/code/modules/test/test_manager.dm +++ /dev/null @@ -1,156 +0,0 @@ -var/datum/test_manager/test_executor = new - -/datum/test_manager - // A list of lists containing test sets to run and the individual test cases in each set - var/list/test_sets = null - - var/list/result_descriptions = list("pass", "fail", "error") - -/datum/test_manager/New() - ..() - - test_sets = list() - - for(var/test_type in subtypesof(/datum/test_case)) - var/datum/test_case/case = new test_type() - - // Don't try to run datums defining a set of test cases - // Oh, and those marked for not being run - if(!case.should_run || case.name == TEST_SET_NAME) - qdel(case) - continue - - if(!test_sets[case.test_set]) - test_sets[case.test_set] = list() - - test_sets[case.test_set] += case - -// Runs an individual test case -// @param case - The test case to run -// @param verbose - Whether or not to log the result of the test -// @return success - TEST_PASS if the test succeeded -// TEST_FAIL if the test failed -// TEST_ERROR if the test threw an error -/datum/test_manager/proc/run_individual_test(var/datum/test_case/case, var/verbose=FALSE) - if(!case) - return TEST_FAIL - - // Set the test up, but throw an exception if it fails during setup - try - case.setUp() - catch(var/exception/E) - E.name = "[case.name] threw an exception during the test setup!\n[E.name]" - throw E - - var/result = TEST_FAIL - try - result = case.test() - - // Falsey return is considered a pass because then we don't - // have to explicitly return anything for the test to pass - if(!result) - result = TEST_PASS - else - result = TEST_FAIL - catch(var/exception/TE) - // Exception thrown by an assertion to fail the test - if(TE.name == TEST_ASSERTION_FAIL) - LOG_TEST("[case.name] failed with the following message:") - LOG_TEST("[TE.desc]") - - result = TEST_FAIL - else - LOG_TEST("[case.name] threw an exception during the test routine:") - LOG_TEST(TE.name) - LOG_TEST(TE.desc) - - result = TEST_ERROR - - if(verbose) - LOG_TEST("[case.name] - [result_descriptions[result]]") - - return result - -// Run a set of test cases -// @param test_set - The test set to run -// @param verbose - Whether or not to log the result of each individual test -// @return stats - A list of how many tests passed, failed and errored -/datum/test_manager/proc/run_test_set(var/test_set, var/verbose=FALSE) - if(!test_sets[test_set]) - return null - - var/list/result_counts = list(0, 0, 0) - for(var/datum/test_case/case in test_sets[test_set]) - var/result = run_individual_test(case, verbose) - result_counts[result]++ - - if(verbose) - LOG_TEST("Finished test set: [test_set]") - LOG_TEST("Pass: [result_counts[TEST_PASS]]") - LOG_TEST("Fail: [result_counts[TEST_FAIL]]") - LOG_TEST("Error: [result_counts[TEST_ERROR]]") - - return result_counts - -// Run all sets of test cases. Regardless of the verbose argument, exceptions will be logged! -// @param verbose - Whether or not to log additional information related to the tests -/datum/test_manager/proc/run_all_tests(var/verbose=FALSE) - LOG_TEST("Running tests...") - - var/start = world.time - var/tests_run = 0 - - var/list/total_results = list(0, 0, 0) - for(var/test_set in test_sets) - if(verbose) LOG_TEST("Running test set: [test_set]") - var/list/results = run_test_set(test_set, verbose) - - if(isnull(results)) - throw EXCEPTION("Test set [test_set] failed to return its results!") - - total_results[TEST_PASS] += results[TEST_PASS] - total_results[TEST_FAIL] += results[TEST_FAIL] - total_results[TEST_ERROR] += results[TEST_ERROR] - - tests_run += (results[TEST_PASS] + results[TEST_FAIL] + results[TEST_ERROR]) - - var/test_duration = (world.time - start)/10 - LOG_TEST("Finished [tests_run] test(s) in [test_duration] seconds") - LOG_TEST("----------------------------------------") - LOG_TEST("Pass: [total_results[TEST_PASS]]") - LOG_TEST("Fail: [total_results[TEST_FAIL]]") - LOG_TEST("Error: [total_results[TEST_ERROR]]") - LOG_TEST("----------------------------------------") - - return total_results - -// The server is being run to do tests only, so start the game, run them and shut down -/datum/test_manager/proc/host_tests() - master_mode = "extended" - - // Wait for the game ticker to initialize - while(!SSticker.initialized) - sleep(10) - - // Start the game - SSticker.request_start() - - // Wait for the game to start - while(SSticker.current_state != GAME_STATE_PLAYING) - sleep(10) - - // Run the tests - var/verbose = world.params["verbose_tests"] - var/test_set = world.params["test_set"] - var/list/results = null - if(test_set) - results = run_test_set(test_set, verbose) - else - results = run_all_tests(verbose) - - if(results) - var/all_tests_passed = results[TEST_FAIL] == 0 && results[TEST_ERROR] == 0 - if(all_tests_passed) - world.log << TEST_HOST_SUCCESS - - shutdown() diff --git a/code/modules/test/test_verbs.dm b/code/modules/test/test_verbs.dm deleted file mode 100644 index 6f2d629169f4..000000000000 --- a/code/modules/test/test_verbs.dm +++ /dev/null @@ -1,58 +0,0 @@ -/client/proc/run_all_tests() - set name = "Run All Tests" - set category = "Debug.Tests" - - if(!check_rights(R_DEBUG)) - return - - var/verbose = (alert("Verbose?",,"Yes","No") == "Yes") - - if(alert("Are you sure?",,"Yes","No") != "Yes") - return - - log_admin("[key_name(src)] ran ALL test cases! verbose=[verbose]") - test_executor.run_all_tests(verbose) - -/client/proc/run_test_set() - set name = "Run Test Set" - set category = "Debug.Tests" - - if(!check_rights(R_DEBUG)) - return - - var/set_to_run = tgui_input_list(usr, "Select test set","Test", test_executor.test_sets) - if(!set_to_run) - return - var/verbose = (alert("Verbose?",,"Yes","No") == "Yes") - - log_admin("[key_name(src)] ran the [set_to_run] test set. verbose=[verbose]") - var/list/results = test_executor.run_test_set(set_to_run, verbose) - - to_chat(src, "Test results:") - to_chat(src, "Pass: [results[TEST_PASS]]") - to_chat(src, "Fail: [results[TEST_FAIL]]") - to_chat(src, "Error: [results[TEST_ERROR]]") - -/client/proc/run_individual_test() - set name = "Run Test Case" - set category = "Debug.Tests" - - if(!check_rights(R_DEBUG)) - return - - var/list/all_tests = list() - - for(var/test_set in test_executor.test_sets) - for(var/datum/test_case/case in test_executor.test_sets[test_set]) - all_tests[case.name] = case - - var/test_name = tgui_input_list(usr, "Select test case","Test", all_tests) - if(!test_name) - return - var/verbose = (alert("Verbose?",,"Yes","No") == "Yes") - - log_admin("[key_name(src)] ran the [test_name] test case.") - - var/datum/test_case/case = all_tests[test_name] - var/result = test_executor.run_individual_test(case, verbose) - to_chat(src, "Test result for [test_name]: [test_executor.result_descriptions[result]]") diff --git a/code/modules/test/testenv/environments/example/envstring.dm b/code/modules/test/testenv/environments/example/envstring.dm deleted file mode 100644 index 1ce4990c33ec..000000000000 --- a/code/modules/test/testenv/environments/example/envstring.dm +++ /dev/null @@ -1,44 +0,0 @@ -// Simple test environment that demonstrates usage of environment strings -// This makes a 11x6 room with a spec, a drone and a gun in it -/datum/test_environment/example/envstring - environment_string = {" - HHHHHHHHHHH - HFFFFFFFFFH - HFFgFFFFFFH - HFFFFmFFdFH - HFFFFFFFFFH - HHHHHHHHHHH - "} - - // The mob we're spawning in - var/mob/test_mob = null - -// Spawn in a gun at g and a mob at m -/datum/test_environment/example/envstring/populate(character, char_turf) - // Give em some flooring - new /turf/open/floor/almayer(char_turf) - - switch(character) - if("g") - new /obj/item/weapon/gun/rifle/m41a(char_turf) - if("m") - test_mob = new /mob/living/carbon/human(char_turf) - if("d") - new /mob/living/carbon/Xenomorph/Drone(char_turf) - -// Key into the mob -/datum/test_environment/example/envstring/insert_actors() - set waitfor = FALSE - - // Wait for a client to key in - while(!length(GLOB.clients)) - sleep(10) - - for(var/client/C in GLOB.clients) - var/datum/mind/M = C.mob.mind - M.transfer_to(test_mob) - - // Equip the mob as a spec - arm_equipment(test_mob, /datum/equipment_preset/uscm/specialist_equipped, TRUE, TRUE) - - break diff --git a/code/modules/test/testenv/environments/tank_gallery.dm b/code/modules/test/testenv/environments/tank_gallery.dm deleted file mode 100644 index b2c1a092568e..000000000000 --- a/code/modules/test/testenv/environments/tank_gallery.dm +++ /dev/null @@ -1,78 +0,0 @@ -/datum/test_environment/tank_gallery - environment_string = {" - HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH - HFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFH - HFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFH - HFFaFFFFbFFFFcFFFFdFFFFeFFFFfFFFFgFFFFhFFH - HFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFH - HFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFH - HFFFFFFFFFFFFFFFFFFFFMFFFFFFFFFFFFFFFFFFFH - HFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFH - HFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFH - HFFiFFFFjFFFFkFFFFlFFFFmFFFFnFFFFoFFFFpFFH - HFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFH - HFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFH - HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH - "} - - var/mob/curator = null - -/datum/test_environment/tank_gallery/populate(char, char_turf) - new /turf/open/floor/almayer(char_turf) - - if(char == "M") - curator = new /mob/living/carbon/human(char_turf) - return - - var/list/dirs = list("a" = 1, "b" = 2, "c" = 1, "d" = 2, "e" = 1, "f" = 2, "g" = 1, "h" = 2, "i" = 4, "j" = 8, "k" = 4, "l" = 8, "m" = 4, "n" = 8, "o" = 4, "p" = 8) - - var/list/dir2rotation = list( - "1" = 180, - "2" = 0, - "4" = 90, - "8" = 270 - ) - - var/obj/vehicle/multitile/tank/tank = new /obj/vehicle/multitile/tank(char_turf) - var/obj/item/hardpoint/holder/tank_turret/turret = locate() in tank.hardpoints - switch(char) - if("a", "b", "i", "j") - tank.add_hardpoint(new /obj/item/hardpoint/locomotion/treads) - tank.add_hardpoint(new /obj/item/hardpoint/support/overdrive_enhancer) - turret.add_hardpoint(new /obj/item/hardpoint/primary/autocannon) - turret.add_hardpoint(new /obj/item/hardpoint/secondary/m56cupola) - if("c", "d", "k", "l") - tank.add_hardpoint(new /obj/item/hardpoint/locomotion/treads) - tank.add_hardpoint(new /obj/item/hardpoint/support/overdrive_enhancer) - turret.add_hardpoint(new /obj/item/hardpoint/primary/flamer) - turret.add_hardpoint(new /obj/item/hardpoint/secondary/small_flamer) - if("e", "f", "m", "n") - tank.add_hardpoint(new /obj/item/hardpoint/locomotion/treads) - tank.add_hardpoint(new /obj/item/hardpoint/support/artillery_module) - turret.add_hardpoint(new /obj/item/hardpoint/primary/cannon) - turret.add_hardpoint(new /obj/item/hardpoint/secondary/grenade_launcher) - if("g", "h", "o", "p") - tank.add_hardpoint(new /obj/item/hardpoint/locomotion/treads) - tank.add_hardpoint(new /obj/item/hardpoint/support/weapons_sensor) - turret.add_hardpoint(new /obj/item/hardpoint/primary/minigun) - turret.add_hardpoint(new /obj/item/hardpoint/secondary/towlauncher) - - turret.rotate(dir2rotation["[dirs[char]]"]) - tank.update_icon() - -// Key into the mob -/datum/test_environment/tank_gallery/insert_actors() - set waitfor = FALSE - - // Wait for a client to key in - while(!length(GLOB.clients)) - sleep(10) - - for(var/client/C in GLOB.clients) - var/datum/mind/M = C.mob.mind - M.transfer_to(curator) - - // Equip the mob as a VC - arm_equipment(curator, /datum/equipment_preset/uscm/tank/full, TRUE, TRUE) - - break diff --git a/code/modules/test/testenv/test_area.dm b/code/modules/test/testenv/test_area.dm deleted file mode 100644 index a0c3f161315b..000000000000 --- a/code/modules/test/testenv/test_area.dm +++ /dev/null @@ -1,5 +0,0 @@ -/area/test - name = "Test environment" - ceiling = CEILING_METAL - requires_power = 0 - unlimited_power = 1 diff --git a/code/modules/test/testenv/test_environment.dm b/code/modules/test/testenv/test_environment.dm deleted file mode 100644 index ad298b6bc239..000000000000 --- a/code/modules/test/testenv/test_environment.dm +++ /dev/null @@ -1,144 +0,0 @@ -/* - The big deal with test environments are environment strings. Here's how they work: - - The string as a whole represents a map. Each character determines what will be spawned in that position. - The dimension of the test environment is determined by the largest row/column size. Spaces won't be populated. - - Certain key characters are reserved for common atoms, such as walls and floors. They are: - H - hull wall (indestructible) - W - regular wall (destructible) - F - floor - S - nothing/default turf - - Any other character will be relayed to the populate() proc, where you can spawn stuff you need based on - the character. -*/ - -var/list/test_env_prefab_types = list( - "H" = /turf/closed/wall/almayer/outer, - "W" = /turf/closed/wall/almayer, - "F" = /turf/open/floor/almayer, - "S" = null -) - -/datum/test_environment - // Name of the test environment - var/name = "" - - // Min position of the test environment - var/x = 1 - var/y = 1 - var/z = 0 - - // Environment string for constructing the test environment - var/environment_string = "" - - // If a custom populate proc isn't made, you can use this list for character => type assignments - var/list/environment_assignments = null - -// Initializes the test environment, making it ready for use -/datum/test_environment/proc/initialize() - set waitfor = FALSE - - // Give the environment its own z level if no z is specified - if(!z) - z = ++world.maxz - - if(environment_string) - parse_env_string() - else - // If there is no environment string, manually populate the environment - manual_populate() - - // Wait for the game to start before inserting people - while(SSticker.current_state != GAME_STATE_PLAYING) - sleep(10) - - insert_actors() - -// Constructs a test environment from the datum's environment string -/datum/test_environment/proc/parse_env_string() - var/list/rows = splittext(environment_string, "\n") - var/row_amount = 0 - // Count rows and clean up the list - for(var/i = 1 to length(rows)) - var/row = trim(rows[i]) - - // Empty rows are ignored - if(!length(row) || !row) - rows[i] = null - continue - - rows[i] = row - row_amount++ - // Get rid of the nulled rows - rows -= null - - if(!row_amount) - return - - var/atoms_to_setup = 0 - var/col_amount = 0 - for(var/row in rows) - col_amount = max(col_amount, length(row)) - atoms_to_setup += length(row) - - if(!col_amount) - return - - // Construct the test environment top->bottom - var/cur_x = x - var/cur_y = y + row_amount - for(var/row in rows) - cur_x = x - - // Construct the row left->right - for(var/char in splittext(row, regex("."), 1, 0, TRUE)) - if(!char) - continue - - var/cur_turf = locate(cur_x, cur_y, z) - // Set the area - new /area/test(cur_turf) - - // Construct prefab atoms - if(char in test_env_prefab_types) - var/type = test_env_prefab_types[char] - // Do nothing here - if(!type) - cur_x++ - continue - - new type(cur_turf) - else - // If it's not a prefab, pass the construction job on to the populate() proc - populate(char, cur_turf) - cur_x++ - cur_y-- - -// Populates a given position with the given character -/datum/test_environment/proc/populate(character, char_turf) - if(character in environment_assignments) - var/list/types = environment_assignments[character] - for(var/type in types) - var/atom/A = new type(char_turf) - if(types[type]) - A.setDir(types[type]) - -// Insert yourself into the environment here, for example by keying into a mob -/datum/test_environment/proc/insert_actors() - return - -// You can manually populate the environment in here if you'd rather do that -/datum/test_environment/proc/manual_populate() - return - -// Utility function for constructing empty hulls -/datum/test_environment/proc/construct_room(width, height) - // Construct the outer hull - for(var/turf/T in blockhollow(locate(x, y, z), locate(x + width, y + height, z))) - new /turf/closed/wall/almayer/outer(T) - - // Construct floors - for(var/turf/T in block(locate(x+1, y+1, z), locate(x + width - 1, y + height - 1, z))) - new /turf/open/floor/almayer(T) diff --git a/code/modules/unit_tests/README.md b/code/modules/unit_tests/README.md new file mode 100644 index 000000000000..9fe97b8b16dd --- /dev/null +++ b/code/modules/unit_tests/README.md @@ -0,0 +1,76 @@ +# Unit Tests + +## What is unit testing? + +Unit tests are automated code to verify that parts of the game work exactly as they should. For example, [a test to make sure that the amputation surgery actually amputates the limb](https://github.com/tgstation/tgstation/blob/e416283f162b86345a8623125ab866839b1ac40d/code/modules/unit_tests/surgeries.dm#L1-L13). These are ran every time a PR is made, and thus are very helpful for preventing bugs from cropping up in your code that would've otherwise gone unnoticed. For example, would you have thought to check [that beach boys would still work the same after editing pizza](https://github.com/tgstation/tgstation/pull/53641#issuecomment-691384934)? If you value your time, probably not. + +On their most basic level, when `UNIT_TESTS` is defined, all subtypes of `/datum/unit_test` will have their `Run` proc executed. From here, if `Fail` is called at any point, then the tests will report as failed. + +## How do I write one? +1. Find a relevant file. + +All unit test related code is in `code/modules/unit_tests`. If you are adding a new test for a surgery, for example, then you'd open `surgeries.dm`. If a relevant file does not exist, simply create one in this folder, then `#include` it in `_unit_tests.dm`. + +2. Create the unit test. + +To make a new unit test, you simply need to define a `/datum/unit_test`. + +For example, let's suppose that we are creating a test to make sure a proc `square` correctly raises inputs to the power of two. We'd start with first: + +``` +/datum/unit_test/square/Run() +``` + +This defines our new unit test, `/datum/unit_test/square`. Inside this function, we're then going to run through whatever we want to check. Tests provide a few assertion functions to make this easy. For now, we're going to use `TEST_ASSERT_EQUAL`. + +``` +/datum/unit_test/square/Run() + TEST_ASSERT_EQUAL(square(3), 9, "square(3) did not return 9") + TEST_ASSERT_EQUAL(square(4), 16, "square(4) did not return 16") +``` + +As you can hopefully tell, we're simply checking if the output of `square` matches the output we are expecting. If the test fails, it'll report the error message given as well as whatever the actual output was. + +3. Run the unit test + +Open `code/_compile_options.dm` and uncomment the following line. + +``` +//#define UNIT_TESTS //If this is uncommented, we do a single run though of the game setup and tear down process with unit tests in between +``` + +Then, run tgstation.dmb in Dream Daemon. Don't bother trying to connect, you won't need to. You'll be able to see the outputs of all the tests. You'll get to see which tests failed and for what reason. If they all pass, you're set! + +## How to think about tests + +Unit tests exist to prevent bugs that would happen in a real game. Thus, they should attempt to emulate the game world wherever possible. For example, the [quick swap sanity test](https://github.com/tgstation/tgstation/blob/e416283f162b86345a8623125ab866839b1ac40d/code/modules/unit_tests/quick_swap_sanity.dm) emulates a *real* scenario of the bug it fixed occurring by creating a character and giving it real items. The unrecommended alternative would be to create special test-only items. This isn't a hard rule, the [reagent method exposure tests](https://github.com/tgstation/tgstation/blob/e416283f162b86345a8623125ab866839b1ac40d/code/modules/unit_tests/reagent_mod_expose.dm) create a test-only reagent for example, but do keep it in mind. + +Unit tests should also be just that--testing *units* of code. For example, instead of having one massive test for reagents, there are instead several smaller tests for testing exposure, metabolization, etc. + +## The unit testing API + +You can find more information about all of these from their respective doc comments, but for a brief overview: + +`/datum/unit_test` - The base for all tests to be ran. Subtypes must override `Run()`. `New()` and `Destroy()` can be used for setup and teardown. To fail, use `TEST_FAIL(reason)`. + +`/datum/unit_test/proc/allocate(type, ...)` - Allocates an instance of the provided type with the given arguments. Is automatically destroyed when the test is over. Commonly seen in the form of `var/mob/living/carbon/human/human = allocate(/mob/living/carbon/human/consistent)`. + +`TEST_FAIL(reason)` - Marks a failure at this location, but does not stop the test. + +`TEST_ASSERT(assertion, reason)` - Stops the unit test and fails if the assertion is not met. For example: `TEST_ASSERT(powered(), "Machine is not powered")`. + +`TEST_ASSERT_NOTNULL(a, message)` - Same as `TEST_ASSERT`, but checks if `!isnull(a)`. For example: `TEST_ASSERT_NOTNULL(myatom, "My atom was never set!")`. + +`TEST_ASSERT_NULL(a, message)` - Same as `TEST_ASSERT`, but checks if `isnull(a)`. If not, gives a helpful message showing what `a` was. For example: `TEST_ASSERT_NULL(delme, "Delme was never cleaned up!")`. + +`TEST_ASSERT_EQUAL(a, b, message)` - Same as `TEST_ASSERT`, but checks if `a == b`. If not, gives a helpful message showing what both `a` and `b` were. For example: `TEST_ASSERT_EQUAL(2 + 2, 4, "The universe is falling apart before our eyes!")`. + +`TEST_ASSERT_NOTEQUAL(a, b, message)` - Same as `TEST_ASSERT_EQUAL`, but reversed. + +`TEST_FOCUS(test_path)` - *Only* run the test provided within the parameters. Useful for reducing noise. For example, if we only want to run our example square test, we can add `TEST_FOCUS(/datum/unit_test/square)`. Should *never* be pushed in a pull request--you will be laughed at. + +## Final Notes + +- Writing tests before you attempt to fix the bug can actually speed up development a lot! It means you don't have to go in game and folllow the same exact steps manually every time. This process is known as "TDD" (test driven development). Write the test first, make sure it fails, *then* start work on the fix/feature, and you'll know you're done when your tests pass. If you do try this, do make sure to confirm in a non-testing environment just to double check. +- Make sure that your tests don't accidentally call RNG functions like `prob`. Since RNG is seeded during tests, you may not realize you have until someone else makes a PR and the tests fail! +- Do your best not to change the behavior of non-testing code during tests. While it may sometimes be necessary in the case of situations such as the above, it is still a slippery slope that can lead to the code you're testing being too different from the production environment to be useful. diff --git a/code/modules/unit_tests/_unit_tests.dm b/code/modules/unit_tests/_unit_tests.dm new file mode 100644 index 000000000000..3e1ed5017bc2 --- /dev/null +++ b/code/modules/unit_tests/_unit_tests.dm @@ -0,0 +1,90 @@ +//include unit test files in this module in this ifdef +//Keep this sorted alphabetically + +#if defined(UNIT_TESTS) || defined(SPACEMAN_DMM) + +/// For advanced cases, fail unconditionally but don't return (so a test can return multiple results) +#define TEST_FAIL(reason) (Fail(reason || "No reason", __FILE__, __LINE__)) + +/// Asserts that a condition is true +/// If the condition is not true, fails the test +#define TEST_ASSERT(assertion, reason) if (!(assertion)) { return Fail("Assertion failed: [reason || "No reason"]", __FILE__, __LINE__) } + +/// Asserts that a parameter is not null +#define TEST_ASSERT_NOTNULL(a, reason) if (isnull(a)) { return Fail("Expected non-null value: [reason || "No reason"]", __FILE__, __LINE__) } + +/// Asserts that a parameter is null +#define TEST_ASSERT_NULL(a, reason) if (!isnull(a)) { return Fail("Expected null value but received [a]: [reason || "No reason"]", __FILE__, __LINE__) } + +/// Asserts that the two parameters passed are equal, fails otherwise +/// Optionally allows an additional message in the case of a failure +#define TEST_ASSERT_EQUAL(a, b, message) do { \ + var/lhs = ##a; \ + var/rhs = ##b; \ + if (lhs != rhs) { \ + return Fail("Expected [isnull(lhs) ? "null" : lhs] to be equal to [isnull(rhs) ? "null" : rhs].[message ? " [message]" : ""]", __FILE__, __LINE__); \ + } \ +} while (FALSE) + +/// Asserts that the two parameters passed are not equal, fails otherwise +/// Optionally allows an additional message in the case of a failure +#define TEST_ASSERT_NOTEQUAL(a, b, message) do { \ + var/lhs = ##a; \ + var/rhs = ##b; \ + if (lhs == rhs) { \ + return Fail("Expected [isnull(lhs) ? "null" : lhs] to not be equal to [isnull(rhs) ? "null" : rhs].[message ? " [message]" : ""]", __FILE__, __LINE__); \ + } \ +} while (FALSE) + +/// *Only* run the test provided within the parentheses +/// This is useful for debugging when you want to reduce noise, but should never be pushed +/// Intended to be used in the manner of `TEST_FOCUS(/datum/unit_test/math)` +#define TEST_FOCUS(test_path) ##test_path { focus = TRUE; } + +/// Logs a noticable message on GitHub, but will not mark as an error. +/// Use this when something shouldn't happen and is of note, but shouldn't block CI. +/// Does not mark the test as failed. +#define TEST_NOTICE(source, message) source.log_for_test((##message), "notice", __FILE__, __LINE__) + +/// Constants indicating unit test completion status +#define UNIT_TEST_PASSED 0 +#define UNIT_TEST_FAILED 1 +#define UNIT_TEST_SKIPPED 2 + +#define TEST_PRE 0 +#define TEST_DEFAULT 1 +/// After most test steps, used for tests that run long so shorter issues can be noticed faster +#define TEST_LONGER 10 +/// This must be the last test to run due to the inherent nature of the test iterating every single tangible atom in the game and qdeleting all of them (while taking long sleeps to make sure the garbage collector fires properly) taking a large amount of time. +#define TEST_CREATE_AND_DESTROY INFINITY + +/// Change color to red on ANSI terminal output, if enabled with -DANSICOLORS. +#ifdef ANSICOLORS +#define TEST_OUTPUT_RED(text) "\x1B\x5B1;31m[text]\x1B\x5B0m" +#else +#define TEST_OUTPUT_RED(text) (text) +#endif +/// Change color to green on ANSI terminal output, if enabled with -DANSICOLORS. +#ifdef ANSICOLORS +#define TEST_OUTPUT_GREEN(text) "\x1B\x5B1;32m[text]\x1B\x5B0m" +#else +#define TEST_OUTPUT_GREEN(text) (text) +#endif + +/// A trait source when adding traits through unit tests +#define TRAIT_SOURCE_UNIT_TESTS "unit_tests" + +#include "focus_only_tests.dm" +#include "resist.dm" +#include "spritesheets.dm" +#include "subsystem_init.dm" +#include "tgui_create_message.dm" +#include "timer_sanity.dm" +#include "unit_test.dm" +#include "spawn_humans.dm" + +#undef TEST_ASSERT +#undef TEST_ASSERT_EQUAL +#undef TEST_ASSERT_NOTEQUAL +//#undef TEST_FOCUS - This define is used by vscode unit test extension to pick specific unit tests to run and appended later so needs to be used out of scope here +#endif diff --git a/code/modules/unit_tests/focus_only_tests.dm b/code/modules/unit_tests/focus_only_tests.dm new file mode 100644 index 000000000000..05d07a513cc8 --- /dev/null +++ b/code/modules/unit_tests/focus_only_tests.dm @@ -0,0 +1,22 @@ +/// These tests perform no behavior of their own, and have their tests offloaded onto other procs. +/// This is useful in cases like in build_appearance_list where we want to know if any fail, +/// but is not useful to right a test for. +/// This file exists so that you can change any of these to TEST_FOCUS and only check for that test. +/// For example, change /datum/unit_test/focus_only/invalid_overlays to TEST_FOCUS(/datum/unit_test/focus_only/invalid_overlays), +/// and you will only test the check for invalid overlays in appearance building. +/datum/unit_test/focus_only + +/// Checks that every overlay passed into build_appearance_list exists in the icon +/datum/unit_test/focus_only/invalid_overlays + +/// Checks that every icon sent to the research_designs spritesheet is valid +/datum/unit_test/focus_only/invalid_research_designs + +/// Checks that every icon sent to vending machines is valid +/datum/unit_test/focus_only/invalid_vending_machine_icon_states + +/// Checks that space does not initialize multiple times +/datum/unit_test/focus_only/multiple_space_initialization + +/// Checks that smoothing_groups and canSmoothWith are properly sorted in /atom/Initialize +/datum/unit_test/focus_only/sorted_smoothing_groups diff --git a/code/modules/unit_tests/resist.dm b/code/modules/unit_tests/resist.dm new file mode 100644 index 000000000000..fac2c81b3faa --- /dev/null +++ b/code/modules/unit_tests/resist.dm @@ -0,0 +1,15 @@ +/// Test that stop, drop, and roll lowers fire stacks +/datum/unit_test/stop_drop_and_roll/Run() + var/mob/living/carbon/human/human = allocate(/mob/living/carbon/human) + + TEST_ASSERT_EQUAL(human.fire_stacks, 0, "Human does not have 0 fire stacks pre-ignition") + + human.adjust_fire_stacks(5) + human.IgniteMob() + + TEST_ASSERT_EQUAL(human.fire_stacks, 5, "Human does not have 5 fire stacks pre-resist") + + // Stop, drop, and roll has a sleep call. This would delay the test, and is not necessary. + INVOKE_ASYNC(human, /mob/living/verb/resist) + + TEST_ASSERT(human.fire_stacks < 5, "Human did not lower fire stacks after resisting") diff --git a/code/modules/unit_tests/spawn_humans.dm b/code/modules/unit_tests/spawn_humans.dm new file mode 100644 index 000000000000..71b67ced106a --- /dev/null +++ b/code/modules/unit_tests/spawn_humans.dm @@ -0,0 +1,7 @@ +/datum/unit_test/spawn_humans/Run() + var/locs = block(run_loc_floor_bottom_left, run_loc_floor_top_right) + + for(var/I in 1 to 5) + new /mob/living/carbon/human(pick(locs)) + + sleep(5 SECONDS) diff --git a/code/modules/unit_tests/spritesheets.dm b/code/modules/unit_tests/spritesheets.dm new file mode 100644 index 000000000000..c7c16c6535e8 --- /dev/null +++ b/code/modules/unit_tests/spritesheets.dm @@ -0,0 +1,11 @@ +///Checks if spritesheet assets contain icon states with invalid names +/datum/unit_test/spritesheets + +/datum/unit_test/spritesheets/Run() + for(var/datum/asset/spritesheet/sheet as anything in subtypesof(/datum/asset/spritesheet)) + if(!initial(sheet.name)) //Ignore abstract types + continue + sheet = get_asset_datum(sheet) + for(var/sprite_name in sheet.sprites) + if(!sprite_name) + TEST_FAIL("Spritesheet [sheet.type] has a nameless icon state.") diff --git a/code/modules/unit_tests/subsystem_init.dm b/code/modules/unit_tests/subsystem_init.dm new file mode 100644 index 000000000000..15ba71b6a76f --- /dev/null +++ b/code/modules/unit_tests/subsystem_init.dm @@ -0,0 +1,14 @@ +/// Tests that all subsystems that need to properly initialize. +/datum/unit_test/subsystem_init + +/datum/unit_test/subsystem_init/Run() + for(var/datum/controller/subsystem/subsystem as anything in Master.subsystems) + if(subsystem.flags & SS_NO_INIT) + continue + if(!subsystem.initialized) + var/message = "[subsystem] ([subsystem.type]) is a subsystem meant to initialize but doesn't get set as initialized." + + if (subsystem.flags & SS_OK_TO_FAIL_INIT) + TEST_NOTICE(src, "[message]\nThis subsystem is marked as SS_OK_TO_FAIL_INIT. This is still a bug, but it is non-blocking.") + else + TEST_FAIL(message) diff --git a/code/modules/unit_tests/tgui_create_message.dm b/code/modules/unit_tests/tgui_create_message.dm new file mode 100644 index 000000000000..4d5a4bc0a026 --- /dev/null +++ b/code/modules/unit_tests/tgui_create_message.dm @@ -0,0 +1,28 @@ +/// Test that `TGUI_CREATE_MESSAGE` is correctly implemented +/datum/unit_test/tgui_create_message + +/datum/unit_test/tgui_create_message/Run() + var/type = "something/here" + var/list/payload = list( + "name" = "Terry McTider", + "heads_caved" = 100, + "accomplishments" = list( + "nothing", + "literally nothing", + list( + "something" = "just kidding", + ), + ), + ) + + var/message = TGUI_CREATE_MESSAGE(type, payload) + + // Ensure consistent output to compare by performing a round-trip. + var/output = json_encode(json_decode(url_decode(message))) + + var/expected = json_encode(list( + "type" = type, + "payload" = payload, + )) + + TEST_ASSERT_EQUAL(expected, output, "TGUI_CREATE_MESSAGE didn't round trip properly") diff --git a/code/modules/unit_tests/timer_sanity.dm b/code/modules/unit_tests/timer_sanity.dm new file mode 100644 index 000000000000..dbdf3f6d8e8d --- /dev/null +++ b/code/modules/unit_tests/timer_sanity.dm @@ -0,0 +1,3 @@ +/datum/unit_test/timer_sanity/Run() + TEST_ASSERT(SStimer.bucket_count >= 0, + "SStimer is going into negative bucket count from something") diff --git a/code/modules/unit_tests/unit_test.dm b/code/modules/unit_tests/unit_test.dm new file mode 100644 index 000000000000..30eae6eef44e --- /dev/null +++ b/code/modules/unit_tests/unit_test.dm @@ -0,0 +1,222 @@ +/* + +Usage: +Override /Run() to run your test code + +Call TEST_FAIL() to fail the test (You should specify a reason) + +You may use /New() and /Destroy() for setup/teardown respectively + +You can use the run_loc_floor_bottom_left and run_loc_floor_top_right to get turfs for testing + +*/ + +GLOBAL_DATUM(current_test, /datum/unit_test) +GLOBAL_VAR_INIT(failed_any_test, FALSE) +GLOBAL_VAR(test_log) +/// When unit testing, all logs sent to log_mapping are stored here and retrieved in log_mapping unit test. +GLOBAL_LIST_EMPTY(unit_test_mapping_logs) + +/// The name of the test that is currently focused. +/// Use the PERFORM_ALL_TESTS macro instead. +GLOBAL_VAR_INIT(focused_test, focused_test()) + +/proc/focused_test() + for (var/datum/unit_test/unit_test as anything in subtypesof(/datum/unit_test)) + if (initial(unit_test.focus)) + return unit_test + return null + +/datum/unit_test + //Bit of metadata for the future maybe + var/list/procs_tested + + /// The bottom left floor turf of the testing zone + var/turf/run_loc_floor_bottom_left + + /// The top right floor turf of the testing zone + var/turf/run_loc_floor_top_right + ///The priority of the test, the larger it is the later it fires + var/priority = TEST_DEFAULT + //internal shit + var/focus = FALSE + var/succeeded = TRUE + var/list/allocated + var/list/fail_reasons + + var/static/datum/space_level/reservation + +/proc/cmp_unit_test_priority(datum/unit_test/a, datum/unit_test/b) + return initial(a.priority) - initial(b.priority) + +/datum/unit_test/New() + if (isnull(reservation)) + var/datum/map_template/unit_tests/template = new + reservation = template.load_new_z() + + allocated = new + run_loc_floor_bottom_left = get_turf(locate(/obj/effect/landmark/unit_test_bottom_left) in GLOB.landmarks_list) + run_loc_floor_top_right = get_turf(locate(/obj/effect/landmark/unit_test_top_right) in GLOB.landmarks_list) + + TEST_ASSERT(isfloorturf(run_loc_floor_bottom_left), "run_loc_floor_bottom_left was not a floor ([run_loc_floor_bottom_left])") + TEST_ASSERT(isfloorturf(run_loc_floor_top_right), "run_loc_floor_top_right was not a floor ([run_loc_floor_top_right])") + +/datum/unit_test/Destroy() + QDEL_LIST(allocated) + // clear the test area + for (var/turf/turf in block(locate(1, 1, run_loc_floor_bottom_left.z), locate(world.maxx, world.maxy, run_loc_floor_bottom_left.z))) + for (var/content in turf.contents) + if (istype(content, /obj/effect/landmark)) + continue + qdel(content) + return ..() + +/datum/unit_test/proc/Run() + TEST_FAIL("Run() called parent or not implemented") + +/datum/unit_test/proc/Fail(reason = "No reason", file = "OUTDATED_TEST", line = 1) + succeeded = FALSE + + if(!istext(reason)) + reason = "FORMATTED: [reason != null ? reason : "NULL"]" + + LAZYADD(fail_reasons, list(list(reason, file, line))) + +/// Allocates an instance of the provided type, and places it somewhere in an available loc +/// Instances allocated through this proc will be destroyed when the test is over +/datum/unit_test/proc/allocate(type, ...) + var/list/arguments = args.Copy(2) + if(ispath(type, /atom)) + if (!arguments.len) + arguments = list(run_loc_floor_bottom_left) + else if (arguments[1] == null) + arguments[1] = run_loc_floor_bottom_left + var/instance + // Byond will throw an index out of bounds if arguments is empty in that arglist call. Sigh + if(length(arguments)) + instance = new type(arglist(arguments)) + else + instance = new type() + allocated += instance + return instance + +/datum/unit_test/proc/test_screenshot(name, icon/icon) + if (!istype(icon)) + TEST_FAIL("[icon] is not an icon.") + return + + var/path_prefix = replacetext(replacetext("[type]", "/datum/unit_test/", ""), "/", "_") + name = replacetext(name, "/", "_") + + var/filename = "code/modules/unit_tests/screenshots/[path_prefix]_[name].png" + + if (fexists(filename)) + var/data_filename = "data/screenshots/[path_prefix]_[name].png" + fcopy(icon, data_filename) + log_test("\t[path_prefix]_[name] was found, putting in data/screenshots") + else if (fexists("code")) + // We are probably running in a local build + fcopy(icon, filename) + TEST_FAIL("Screenshot for [name] did not exist. One has been created.") + else + // We are probably running in real CI, so just pretend it worked and move on + fcopy(icon, "data/screenshots_new/[path_prefix]_[name].png") + + log_test("\t[path_prefix]_[name] was put in data/screenshots_new") + +/// Helper for screenshot tests to take an image of an atom from all directions and insert it into one icon +/datum/unit_test/proc/get_flat_icon_for_all_directions(atom/thing, no_anim = TRUE) + var/icon/output = icon('icons/effects/effects.dmi', "nothing") + + for (var/direction in GLOB.cardinals) + var/icon/partial = getFlatIcon(thing, defdir = direction, no_anim = no_anim) + output.Insert(partial, dir = direction) + + return output + +/// Logs a test message. Will use GitHub action syntax found at https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions +/datum/unit_test/proc/log_for_test(text, priority, file, line) + var/map_name = SSmapping.configs[GROUND_MAP] + + // Need to escape the text to properly support newlines. + var/annotation_text = replacetext(text, "%", "%25") + annotation_text = replacetext(annotation_text, "\n", "%0A") + + log_world("::[priority] file=[file],line=[line],title=[map_name]: [type]::[annotation_text]") + +/proc/RunUnitTest(test_path, list/test_results) + if (ispath(test_path, /datum/unit_test/focus_only)) + return + + var/datum/unit_test/test = new test_path + + GLOB.current_test = test + var/duration = REALTIMEOFDAY + + log_world("::group::[test_path]") + test.Run() + + duration = REALTIMEOFDAY - duration + GLOB.current_test = null + GLOB.failed_any_test |= !test.succeeded + + var/list/log_entry = list() + var/list/fail_reasons = test.fail_reasons + + for(var/reasonID in 1 to LAZYLEN(fail_reasons)) + var/text = fail_reasons[reasonID][1] + var/file = fail_reasons[reasonID][2] + var/line = fail_reasons[reasonID][3] + + test.log_for_test(text, "error", file, line) + + // Normal log message + log_entry += "\tFAILURE #[reasonID]: [text] at [file]:[line]" + + var/message = log_entry.Join("\n") + log_test(message) + + var/test_output_desc = "[test_path] [duration / 10]s" + if (test.succeeded) + log_world("[TEST_OUTPUT_GREEN("PASS")] [test_output_desc]") + + log_world("::endgroup::") + + if (!test.succeeded) + log_world("::error::[TEST_OUTPUT_RED("FAIL")] [test_output_desc]") + + test_results[test_path] = list("status" = test.succeeded ? UNIT_TEST_PASSED : UNIT_TEST_FAILED, "message" = message, "name" = test_path) + + qdel(test) + +/proc/RunUnitTests() + CHECK_TICK + + var/list/tests_to_run = subtypesof(/datum/unit_test) + var/list/focused_tests = list() + for (var/_test_to_run in tests_to_run) + var/datum/unit_test/test_to_run = _test_to_run + if (initial(test_to_run.focus)) + focused_tests += test_to_run + if(length(focused_tests)) + tests_to_run = focused_tests + + tests_to_run = sortTim(tests_to_run, GLOBAL_PROC_REF(cmp_unit_test_priority)) + + var/list/test_results = list() + + for(var/unit_path in tests_to_run) + CHECK_TICK //We check tick first because the unit test we run last may be so expensive that checking tick will lock up this loop forever + RunUnitTest(unit_path, test_results) + + var/file_name = "data/unit_tests.json" + fdel(file_name) + file(file_name) << json_encode(test_results) + + SSticker.force_ending = TRUE + //We have to call this manually because del_text can preceed us, and SSticker doesn't fire in the post game + world.Reboot() + +/datum/map_template/unit_tests + name = "Unit Tests Zone" + mappath = "maps/templates/unit_tests.dmm" diff --git a/dependencies.sh b/dependencies.sh index 207584e41c9a..ef00662eaa4d 100644 --- a/dependencies.sh +++ b/dependencies.sh @@ -5,7 +5,7 @@ # byond version export BYOND_MAJOR=514 -export BYOND_MINOR=1560 +export BYOND_MINOR=1588 #rust_g git tag export RUST_G_VERSION=1.2.0 diff --git a/maps/templates/space.dmm b/maps/templates/space.dmm new file mode 100644 index 000000000000..af5077b2d061 --- /dev/null +++ b/maps/templates/space.dmm @@ -0,0 +1,8 @@ +//MAP CONVERTED BY dmm2tgm.py THIS HEADER COMMENT PREVENTS RECONVERSION, DO NOT REMOVE +"a" = ( +/turf/open/space/basic, +/area/space) + +(1,1,1) = {" +a +"} diff --git a/maps/templates/space.json b/maps/templates/space.json new file mode 100644 index 000000000000..2fa9c1ca54dc --- /dev/null +++ b/maps/templates/space.json @@ -0,0 +1,6 @@ +{ + "map_name": "Space", + "map_path": "templates", + "map_file": "space.dmm", + "traits": [{"Marine Main Ship": true}] +} diff --git a/maps/templates/unit_tests.dmm b/maps/templates/unit_tests.dmm new file mode 100644 index 000000000000..ac5c80e5ce8c --- /dev/null +++ b/maps/templates/unit_tests.dmm @@ -0,0 +1,79 @@ +//MAP CONVERTED BY dmm2tgm.py THIS HEADER COMMENT PREVENTS RECONVERSION, DO NOT REMOVE +"a" = ( +/turf/closed/wall/almayer/outer, +/area/misc/testroom) +"m" = ( +/turf/open/floor, +/area/misc/testroom) +"r" = ( +/obj/effect/landmark/unit_test_top_right, +/turf/open/floor, +/area/misc/testroom) +"L" = ( +/obj/effect/landmark/unit_test_bottom_left, +/turf/open/floor, +/area/misc/testroom) + +(1,1,1) = {" +a +a +a +a +a +a +a +"} +(2,1,1) = {" +a +m +m +m +m +L +a +"} +(3,1,1) = {" +a +m +m +m +m +m +a +"} +(4,1,1) = {" +a +m +m +m +m +m +a +"} +(5,1,1) = {" +a +m +m +m +m +m +a +"} +(6,1,1) = {" +a +r +m +m +m +m +a +"} +(7,1,1) = {" +a +a +a +a +a +a +a +"} diff --git a/tools/ci/check_required_commits.sh b/tools/ci/check_required_commits.sh new file mode 100644 index 000000000000..4fc5e44100b6 --- /dev/null +++ b/tools/ci/check_required_commits.sh @@ -0,0 +1,7 @@ +echo "$REQUIRED_COMMITS" | + while IFS=$'\t' read -r commit title; do + if [[ $(git cat-file -t "$commit") != "commit" ]]; then + echo "Missing commit $commit: $title" + exit 1 + fi + done diff --git a/tools/ci/run_server.sh b/tools/ci/run_server.sh index 4d943846f043..6199182fc490 100644 --- a/tools/ci/run_server.sh +++ b/tools/ci/run_server.sh @@ -1,13 +1,18 @@ #!/bin/bash set -euo pipefail +MAP=$1 + +echo Testing $MAP + tools/deploy.sh ci_test -mkdir ci_test/config +mkdir ci_test/data -#test config -cp tools/ci/ci_config.txt ci_test/config/config.txt +#set the map +cp maps/$MAP.json ci_test/data/next_map.json +cp maps/templates/space.json ci_test/data/next_ship.json cd ci_test -DreamDaemon tgstation.dmb -close -trusted -verbose -params "log-directory=ci" +DreamDaemon ColonialMarinesALPHA.dmb -close -trusted -verbose -params "log-directory=ci" cd .. cat ci_test/data/logs/ci/clean_run.lk diff --git a/tools/deploy.sh b/tools/deploy.sh index eddcbd0a168a..9b048b15766c 100755 --- a/tools/deploy.sh +++ b/tools/deploy.sh @@ -13,7 +13,10 @@ mkdir -p \ $1/maps \ $1/icons \ $1/sound \ - $1/strings + $1/config \ + $1/strings \ + $1/nano \ + $1/map_config if [ -d ".git" ]; then mkdir -p $1/.git/logs @@ -25,6 +28,9 @@ cp -r maps/* $1/maps/ cp -r icons/* $1/icons/ cp -r sound/* $1/sound/ cp -r strings/* $1/strings/ +cp -r config/* $1/config/ +cp -r nano/* $1/nano/ +cp -r map_config/* $1/map_config/ #remove .dm files from _maps