diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a8f0c9d700..885a7c120b 100755 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -60,6 +60,7 @@ jobs: COMPILER: ${{ matrix.COMPILER }} batsched: ${{ matrix.batsched }} run: | + echo "Downloading wrench..."; docker pull wrenchproject/wrench-build:${DIST}-${COMPILER}; docker run -m 4g -d -t --name=wrench wrenchproject/wrench-build:${DIST}-${COMPILER} bash; docker exec wrench git clone https://github.com/wrench-project/wrench; @@ -68,12 +69,24 @@ jobs: BRANCH_NAME=$(echo ${GITHUB_REF} | sed 's/refs\/heads\///g'); docker exec -w /home/wrench/wrench wrench git checkout ${BRANCH_NAME}; fi + # build wrench, including unit_tests and examples + echo "Building wrench..."; docker exec wrench mkdir wrench/build; - # build and test wrench docker exec -w /home/wrench/wrench/build wrench cmake -DENABLE_BATSCHED=${batsched} -DCMAKE_VERBOSE_MAKEFILE=ON ..; docker exec -w /home/wrench/wrench/build wrench make all unit_tests examples wrench-daemon; + docker exec -w /home/wrench/wrench/build wrench sudo make install + # run unit_tests + echo "Running tests..."; docker exec -w /home/wrench/wrench/build wrench ./unit_tests; - docker exec -w /home/wrench/wrench/build/examples wrench ./run_all_examples.sh wrench-example-batch-smpi-action; + # run examples + echo "Running examples..."; + #docker exec -w /home/wrench/wrench/build/examples wrench ./run_all_examples.sh wrench-example-batch-smpi-action; + docker exec -w /home/wrench/wrench/build/examples wrench ./run_all_examples.sh + # run wrench-init + echo "Testing wrench-init..."; + docker exec -w /home/wrench/wrench/build wrench python3 ./tools/wrench/wrench-init/wrench-init -a ACTION -d ./wrench-init-generated-simulator + docker exec -w /home/wrench/wrench/build/wrench-init-generated-simulator/build wrench cmake .. + docker exec -w /home/wrench/wrench/build/wrench-init-generated-simulator/build wrench make - name: Documentation Build and Deployment env: diff --git a/CMakeLists.txt b/CMakeLists.txt index 11aa7d5706..d9bc07d1be 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,7 +74,7 @@ set(CMAKE_CXX_STANDARD 17) # build the version number set(WRENCH_VERSION_MAJOR "2") -set(WRENCH_VERSION_MINOR "5") +set(WRENCH_VERSION_MINOR "6") set(WRENCH_VERSION_PATCH "0") set(WRENCH_VERSION_EXTRA "dev") @@ -266,8 +266,6 @@ set(SOURCE_FILES include/wrench/services/compute/batch/batch_schedulers/BatchScheduler.h include/wrench/services/compute/batch/batch_schedulers/homegrown/HomegrownBatchScheduler.h include/wrench/services/storage/storage_helpers/FileTransferThread.h - include/wrench/services/storage/storage_helpers/LogicalFileSystemNoCaching.h - include/wrench/services/storage/storage_helpers/LogicalFileSystemLRUCaching.h src/wrench/logging/TerminalOutput.cpp src/wrench/managers/data_movement_manager/DataMovementManager.cpp src/wrench/managers/data_movement_manager/DataMovementManagerMessage.cpp @@ -314,6 +312,8 @@ set(SOURCE_FILES include/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/BatchJobSet.h src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/ConservativeBackfillingBatchScheduler.cpp include/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/ConservativeBackfillingBatchScheduler.h + src/wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.cpp + include/wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.h src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.cpp include/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.h include/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf_core_level/BatchJobSetCoreLevel.h @@ -461,9 +461,9 @@ set(TEST_FILES test/services/compute_services/bare_metal_compound_jobs/BareMetalComputeServiceMultiActionTests.cpp test/services/compute_services/bare_metal_compound_jobs/BareMetalComputeServiceMultiJobTests.cpp test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceOneTaskTest.cpp - test/services/storage_services/LogicalFileSystem/LogicalFileSystemTest.cpp test/services/storage_services/SimpleStorageService/SimpleStorageServiceCachingTest.cpp test/services/storage_services/SimpleStorageService/SimpleStorageServiceFunctionalTest.cpp + test/services/storage_services/SimpleStorageService/SimpleStorageServiceWrongServiceTest.cpp test/services/storage_services/SimpleStorageService/SimpleStorageServicePerformanceTest.cpp test/services/storage_services/SimpleStorageService/SimpleStorageServiceLimitedConnectionsTest.cpp test/services/storage_services/SimpleStorageService/StorageServiceDeleteRegisterTest.cpp @@ -490,6 +490,7 @@ set(TEST_FILES test/services/compute_services/batch_standard_and_pilot_jobs/HomeGrownTimeLineTest.cpp test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceTest.cpp test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceFCFSTest.cpp + test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceEASYBFTest.cpp test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceCONSERVATIVEBFTest.cpp test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceTraceFileTest.cpp test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceOutputCSVFileTest.cpp diff --git a/RELEASENOTES.md b/RELEASENOTES.md index a9b3fb079f..49673b462a 100755 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -1,9 +1,11 @@ WRENCH Release Notes ------ -### current master branch +### wrench 2.5 -Nothing here yet +- Implementation of the EASY batch scheduling algorithm in BatchComputeService + +- New command-line argument for the wrench-daemon to specify the number of commports ### wrench 2.4 diff --git a/conf/cmake/Examples.cmake b/conf/cmake/Examples.cmake index c54d182d98..317e3d809c 100755 --- a/conf/cmake/Examples.cmake +++ b/conf/cmake/Examples.cmake @@ -11,7 +11,8 @@ add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/basic-examples/cl add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/basic-examples/virtualized-cluster-bag-of-tasks EXCLUDE_FROM_ALL) add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/basic-examples/batch-bag-of-tasks EXCLUDE_FROM_ALL) add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/basic-examples/batch-pilot-job EXCLUDE_FROM_ALL) -add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/real-workflow-example EXCLUDE_FROM_ALL) +add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs EXCLUDE_FROM_ALL) +add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform EXCLUDE_FROM_ALL) add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/condor-grid-example EXCLUDE_FROM_ALL) #add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/workflow_api/basic-examples/io-pagecache EXCLUDE_FROM_ALL) diff --git a/examples/README.md b/examples/README.md index 102555b886..06ebf36030 100755 --- a/examples/README.md +++ b/examples/README.md @@ -96,10 +96,9 @@ to analyze action failures. #### Workflow-API Examples with real workflows and more sophisticated WMS implementations - - `workflow_api/real-workflow-example`: Two simulators, one in which the workflow is executed - using a batch compute service, and another in which the workflow is executed - using a cloud compute service. These simulators take as input workflow description - files from real-world workflow applications. They use the scheduler abstraction - provided by WRENCH to implement complex Workflow Management System. + - `workflow_api/real-workflow-examples`: Two simulators, one in which the workflow is executed + using a batch compute service (with pilot jobs) and a cloud compute services (with VMs), + and another in which the workflow is executed on a single cluster. These simulators take as input workflow description + files from real-world workflow applications, as provided by the WfCommons project. --- diff --git a/examples/run_all_examples.sh.in b/examples/run_all_examples.sh.in index abb3fdf74c..59e3bc07fe 100755 --- a/examples/run_all_examples.sh.in +++ b/examples/run_all_examples.sh.in @@ -133,11 +133,18 @@ echo "${bold}RUNNING: ${INSTALL_DIR}/workflow_api/basic-examples/batch-pilot-job ${INSTALL_DIR}/workflow_api/basic-examples/batch-pilot-job/wrench-example-batch-pilot-job ${INSTALL_DIR}/workflow_api/basic-examples/batch-pilot-job/four_hosts_scratch.xml fi -if [[ "${TO_EXCLUDE}" == *"wrench-example-real-workflow"* ]]; then - echo "${bold}SKIPPING: wrench-example-real-workflow${normal}" +if [[ "${TO_EXCLUDE}" == *"wrench-example-real-workflow-single-cluster"* ]]; then + echo "${bold}SKIPPING: wrench-example-real-workflow-single-cluster${normal}" else -echo "${bold}RUNNING: ${INSTALL_DIR}/workflow_api/real-workflow-example/wrench-example-real-workflow${normal}" -${INSTALL_DIR}/workflow_api/real-workflow-example/wrench-example-real-workflow ${INSTALL_DIR}/workflow_api/real-workflow-example/cloud_batch_platform.xml ${INSTALL_DIR}/workflow_api/real-workflow-example/1000genome-chameleon-2ch-100k-001.json +echo "${bold}RUNNING: ${INSTALL_DIR}/workflow_api/real-workflow-examples/wrench-example-real-workflow-single-cluster${normal}" +${INSTALL_DIR}/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/wrench-example-real-workflow-single-cluster 10 ${INSTALL_DIR}/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/1000genome-chameleon-2ch-100k-001.json +fi + +if [[ "${TO_EXCLUDE}" == *"wrench-example-real-workflow-vms-and-pilots"* ]]; then + echo "${bold}SKIPPING: wrench-example-real-workflow-vms-and-pilots${normal}" +else +echo "${bold}RUNNING: ${INSTALL_DIR}/workflow_api/real-workflow-examples/wrench-example-real-workflow-vms-and-pilots${normal}" +${INSTALL_DIR}/workflow_api/real-workflow-examples/vms-and-pilot-jobs/wrench-example-real-workflow-vms-and-pilots ${INSTALL_DIR}/workflow_api/real-workflow-examples/vms-and-pilot-jobs/cloud_batch_platform.xml ${INSTALL_DIR}/workflow_api/real-workflow-examples/vms-and-pilot-jobs/1000genome-chameleon-2ch-100k-001.json fi if [[ "${TO_EXCLUDE}" == *"wrench-example-condor-grid-universe"* ]]; then diff --git a/examples/workflow_api/basic-examples/bare-metal-bag-of-tasks-programmatic-platform/BareMetalBagOfTasksProgrammaticPlatform.cpp b/examples/workflow_api/basic-examples/bare-metal-bag-of-tasks-programmatic-platform/BareMetalBagOfTasksProgrammaticPlatform.cpp index e069d16fb2..dba2ce12d5 100755 --- a/examples/workflow_api/basic-examples/bare-metal-bag-of-tasks-programmatic-platform/BareMetalBagOfTasksProgrammaticPlatform.cpp +++ b/examples/workflow_api/basic-examples/bare-metal-bag-of-tasks-programmatic-platform/BareMetalBagOfTasksProgrammaticPlatform.cpp @@ -51,16 +51,16 @@ namespace sg4 = simgrid::s4u; class PlatformCreator { public: - PlatformCreator(double link_bw) : link_bw(link_bw) {} + explicit PlatformCreator(double link_bw) : link_bw(link_bw) {} void operator()() const { - create_platform(this->link_bw); + create_platform(); } private: double link_bw; - void create_platform(double link_bw) const { + void create_platform() const { // Create the top-level zone auto zone = sg4::create_full_zone("AS0"); // Create the WMSHost host with its disk diff --git a/examples/workflow_api/real-workflow-example/1000genome-chameleon-2ch-100k-001.json b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/1000genome-chameleon-2ch-100k-001.json similarity index 100% rename from examples/workflow_api/real-workflow-example/1000genome-chameleon-2ch-100k-001.json rename to examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/1000genome-chameleon-2ch-100k-001.json diff --git a/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/CMakeLists.txt b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/CMakeLists.txt new file mode 100755 index 0000000000..a26d5f5c57 --- /dev/null +++ b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/CMakeLists.txt @@ -0,0 +1,36 @@ + +set(CMAKE_CXX_STANDARD 17) + +# Add source to this project's executable. +add_executable(wrench-example-real-workflow-single-cluster + EXCLUDE_FROM_ALL + SimpleWMS.h + SimpleWMS.cpp + SimpleWorkflowSimulator.cpp) + +add_custom_target( + wrench-example-real-workflow-single-cluster-files + COMMAND /bin/sh -c "if [ '${CMAKE_CURRENT_SOURCE_DIR}' != '${CMAKE_CURRENT_BINARY_DIR}' ]; then /bin/cp -f ${CMAKE_CURRENT_SOURCE_DIR}/1000genome-chameleon-2ch-100k-001.json ${CMAKE_CURRENT_BINARY_DIR}/1000genome-chameleon-2ch-100k-001.json ; fi ;" + VERBATIM +) + +add_dependencies(examples wrench-example-real-workflow-single-cluster) +add_dependencies(wrench-example-real-workflow-single-cluster wrench-example-real-workflow-single-cluster-files) +add_dependencies(wrench-example-real-workflow-single-cluster wrenchwfcommonsworkflowparser) + +if (ENABLE_BATSCHED) + target_link_libraries(wrench-example-real-workflow-single-cluster + wrench + wrenchwfcommonsworkflowparser + ${SimGrid_LIBRARY} + ${Boost_LIBRARIES} + ${ZMQ_LIBRARY} + ) +else() + target_link_libraries(wrench-example-real-workflow-single-cluster + wrench + wrenchwfcommonsworkflowparser + ${SimGrid_LIBRARY} + ${Boost_LIBRARIES} + ) +endif() diff --git a/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWMS.cpp b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWMS.cpp new file mode 100755 index 0000000000..1c3aac3f46 --- /dev/null +++ b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWMS.cpp @@ -0,0 +1,180 @@ +/** + * Copyright (c) 2017-2021. The WRENCH Team. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + */ + +#include + +#include "SimpleWMS.h" + +WRENCH_LOG_CATEGORY(simple_wms, "Log category for Simple WMS"); + +namespace wrench { + + /** + * @brief Constructor that creates a Simple WMS with + * a scheduler implementation, and a list of compute services + * + * @param workflow: a workflow to execute + * @param bare_metal_compute_services: bare-metal compute services available to run jobs + * @param storage_service: a storage service available to store files + * @param hostname: the name of the host on which to start the WMS + */ + SimpleWMS::SimpleWMS(const std::shared_ptr &workflow, + const std::set> &bare_metal_compute_services, + const std::shared_ptr &storage_service, + const std::string &hostname) : ExecutionController(hostname, "simple"), + workflow(workflow), + bare_metal_compute_services(bare_metal_compute_services), + storage_service(storage_service) {} + + /** + * @brief main method of the SimpleWMS daemon + * + * @return 0 on completion + * + */ + int SimpleWMS::main() { + TerminalOutput::setThisProcessLoggingColor(TerminalOutput::COLOR_GREEN); + + WRENCH_INFO("Starting on host %s", S4U_Simulation::getHostName().c_str()); + WRENCH_INFO("About to execute a workflow with %lu tasks", this->workflow->getNumberOfTasks()); + + // Create a job manager + this->job_manager = this->createJobManager(); + + // Populate data structure to keep track of idle cores at each compute service + for (auto const &cs : this->bare_metal_compute_services) { + this->core_utilization_map[cs] = cs->getTotalNumCores(false); + } + + + while (true) { + scheduleReadyTasks(workflow->getReadyTasks()); + + // Wait for a workflow execution event, and process it + try { + this->waitForAndProcessNextEvent(); + } catch (ExecutionException &e) { + WRENCH_INFO("Error while getting next execution event (%s)... ignoring and trying again", + (e.getCause()->toString().c_str())); + continue; + } + if (this->workflow->isDone()) { + break; + } + } + + S4U_Simulation::sleep(10); + + WRENCH_INFO("--------------------------------------------------------"); + if (this->workflow->isDone()) { + WRENCH_INFO("Workflow execution is complete!"); + } else { + WRENCH_INFO("Workflow execution is incomplete!"); + } + + WRENCH_INFO("WMS terminating"); + + return 0; + } + + /** + * @brief Process a StandardJobFailedEvent + * + * @param event: a workflow execution event + */ + void SimpleWMS::processEventStandardJobFailure(const std::shared_ptr &event) { + auto job = event->standard_job; + TerminalOutput::setThisProcessLoggingColor(TerminalOutput::COLOR_RED); + WRENCH_INFO("Task %s has failed", (*job->getTasks().begin())->getID().c_str()); + WRENCH_INFO("failure cause: %s", event->failure_cause->toString().c_str()); + TerminalOutput::setThisProcessLoggingColor(TerminalOutput::COLOR_GREEN); + } + + /** + * @brief Process a StandardJobCompletedEvent + * + * @param event: a workflow execution event + */ + void SimpleWMS::processEventStandardJobCompletion(const std::shared_ptr &event) { + auto job = event->standard_job; + TerminalOutput::setThisProcessLoggingColor(TerminalOutput::COLOR_BLUE); + WRENCH_INFO("Task %s has COMPLETED (on service %s)", + (*job->getTasks().begin())->getID().c_str(), + job->getParentComputeService()->getName().c_str()); + TerminalOutput::setThisProcessLoggingColor(TerminalOutput::COLOR_GREEN); + this->core_utilization_map[job->getParentComputeService()]++; + } + + + /** + * @brief Helper method to schedule a task one available compute services. The naive scheduling + * strategy is to pick the task with the most computational work, and run it on + * the compute services with the fastest cores. In this example, all compute services + * are homogeneous, so we just pick the first available. + * + * @param ready_task: the ready tasks to schedule + * @return + */ + void SimpleWMS::scheduleReadyTasks(std::vector> ready_tasks) { + + if (ready_tasks.empty()) { + return; + } + + WRENCH_INFO("Trying to schedule %zu ready tasks", ready_tasks.size()); + // Sort the tasks + std::sort(ready_tasks.begin(), ready_tasks.end(), + [](const std::shared_ptr &x, + const std::shared_ptr &y) { + if (x->getFlops() < y->getFlops()) { + return true; + } else if (x->getFlops() > y->getFlops()) { + return false; + } else { + return (x.get() > y.get()); + } + } + ); + + unsigned long num_tasks_scheduled = 0; + for (auto const &task: ready_tasks) { + bool scheduled = false; + for (auto const &cs: this->bare_metal_compute_services) { + if (this->core_utilization_map[cs] > 0) { + // Specify that ALL files are read/written from the one storage service + std::map, std::shared_ptr> file_locations; + for (auto const &f: task->getInputFiles()) { + file_locations[f] = wrench::FileLocation::LOCATION(this->storage_service, f); + } + for (auto const &f: task->getOutputFiles()) { + file_locations[f] = wrench::FileLocation::LOCATION(this->storage_service, f); + } + try { + auto job = job_manager->createStandardJob(task, file_locations); + WRENCH_INFO( + "Submitting task %s to compute service %s", task->getID().c_str(), + cs->getName().c_str()); + job_manager->submitJob(job, cs); + this->core_utilization_map[cs]--; + num_tasks_scheduled++; + scheduled = true; + } catch (ExecutionException &e) { + WRENCH_INFO("WARNING: Was not able to submit task %s, likely due to the pilot job having expired " + "(I should get a notification of its expiration soon)", + task->getID().c_str()); + } + break; + } + } + if (not scheduled) break; + } + WRENCH_INFO("Was able to schedule %lu out of %zu ready tasks", num_tasks_scheduled, ready_tasks.size()); + } + +}// namespace wrench diff --git a/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWMS.h b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWMS.h new file mode 100755 index 0000000000..259960aa79 --- /dev/null +++ b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWMS.h @@ -0,0 +1,46 @@ +/** + * Copyright (c) 2017-2018. The WRENCH Team. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + */ + +#ifndef WRENCH_EXAMPLE_SIMPLEWMS_H +#define WRENCH_EXAMPLE_SIMPLEWMS_H + +#include "wrench-dev.h" + +namespace wrench { + + /** + * @brief A simple WMS implementation + */ + class SimpleWMS : public ExecutionController { + + public: + SimpleWMS(const std::shared_ptr &workflow, + const std::set> &bare_metal_compute_services, + const std::shared_ptr &storage_service, + const std::string &hostname); + + protected: + void processEventStandardJobCompletion(const std::shared_ptr &event) override; + void processEventStandardJobFailure(const std::shared_ptr &event) override; + + private: + int main() override; + + void scheduleReadyTasks(std::vector> ready_tasks); + + std::shared_ptr workflow; + std::set> bare_metal_compute_services; + std::shared_ptr storage_service; + std::shared_ptr job_manager; + + std::map, unsigned long> core_utilization_map; + }; +}// namespace wrench + +#endif//WRENCH_EXAMPLE_SIMPLEWMS_H diff --git a/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWorkflowSimulator.cpp b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWorkflowSimulator.cpp new file mode 100755 index 0000000000..2a3dbff352 --- /dev/null +++ b/examples/workflow_api/real-workflow-examples/single-cluster-programmatic-platform/SimpleWorkflowSimulator.cpp @@ -0,0 +1,180 @@ +/** + * Copyright (c) 2017. The WRENCH Team. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + */ + +#include +#include "wrench.h" + +#include "SimpleWMS.h" +#include "wrench/tools/wfcommons/WfCommonsWorkflowParser.h" +#include +#include + +#define MBPS (1000 * 1000) + +namespace sg4 = simgrid::s4u; + +/** + * @brief Function to instantiate a simulated platform, instead of + * loading it from an XML file. This function directly uses SimGrid's s4u API + * (see the SimGrid documentation). This function creates a platform that's + * identical to that described in the file two_hosts.xml located in this directory. + */ + +class PlatformCreator { + +public: + PlatformCreator(unsigned long num_compute_hosts) : num_compute_hosts(num_compute_hosts) {} + + void operator()() const { + create_platform(); + } + +private: + unsigned long num_compute_hosts; + + void create_platform() const { + // Create the top-level zone + auto zone = sg4::create_full_zone("AS0"); + + // Create the WMSHost host with its disk + auto wms_host = zone->create_host("WMSHost", "10Gf"); + wms_host->set_core_count(1); + auto wms_host_disk = wms_host->create_disk("hard_drive", + "100MBps", + "100MBps"); + wms_host_disk->set_property("size", "5000GiB"); + wms_host_disk->set_property("mount", "/"); + + // Create a single network link that abstracts the wide-area network + auto network_link = zone->create_link("network_link", 100 * MBPS)->set_latency("20us"); + + // Create the compute hosts and routes to them (could be done as a single cluster) + for (int i=0; i < num_compute_hosts; i++) { + auto compute_host = zone->create_host("ComputeHost_" + std::to_string(i), "1Gf"); + compute_host->set_core_count(1); + sg4::LinkInRoute network_link_in_route{network_link}; + zone->add_route(compute_host, + wms_host, + {network_link_in_route}); + } + + zone->seal(); + } +}; + +/** + * @brief An example that demonstrate how to run a simulation of a simple Workflow + * Management System (WMS) (implemented in SimpleWMS.[cpp|h]). + * + * @param argc: argument count + * @param argv: argument array + * @return 0 if the simulation has successfully completed + */ +int main(int argc, char **argv) { + + /* + * Declaration of the top-level WRENCH simulation object + */ + auto simulation = wrench::Simulation::createSimulation(); + + /* + * Initialization of the simulation, which may entail extracting WRENCH-specific and + * Simgrid-specific command-line arguments that can modify general simulation behavior. + * Two special command-line arguments are --help-wrench and --help-simgrid, which print + * details about available command-line arguments. + */ + simulation->init(&argc, argv); + + /* + * Parsing of the command-line arguments for this WRENCH simulation + */ + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " <# compute hosts> [--log=simple_wms.threshold=info]" << std::endl; + exit(1); + } + + /* The first argument is the workflow description file, written in JSON using WfCommons's WfFormat format */ + char *workflow_file = argv[2]; + + /* The second argument is the number of compute hosts to simulate */ + int num_compute_hosts; + try { + num_compute_hosts = std::atoi(argv[1]); + } catch (std::invalid_argument &e) { + std::cerr << "Invalid number of compute hosts\n"; + exit(1); + } + + /* Reading and parsing the workflow description file to create a wrench::Workflow object */ + std::cerr << "Loading workflow..." << std::endl; + std::shared_ptr workflow; + workflow = wrench::WfCommonsWorkflowParser::createWorkflowFromJSON(workflow_file, "100Gf", true); + std::cerr << "The workflow has " << workflow->getNumberOfTasks() << " tasks " << std::endl; + std::cerr.flush(); + + /* Reading and parsing the platform description file to instantiate a simulated platform */ + std::cerr << "Instantiating SimGrid platform programmatically ..." << std::endl; + PlatformCreator platform_creator(num_compute_hosts); + simulation->instantiatePlatform(platform_creator); + + /* Get a vector of all the hosts in the simulated platform */ + std::vector hostname_list = wrench::Simulation::getHostnameList(); + + /* Instantiate a storage service, to be started on the WMShost */ + std::cerr << "Instantiating a SimpleStorageService on WMSHost " << std::endl; + auto storage_service = simulation->add(wrench::SimpleStorageService::createSimpleStorageService({"WMSHost"}, {"/"})); + + /* Create a list of compute services */ + std::set> compute_services; + + /* Create a bare-metal compute service on each compute host in the platform */ + for (auto const &hostname : wrench::Simulation::getHostnameList()) { + if (hostname != "WMSHost") { + auto cs = simulation->add(new wrench::BareMetalComputeService({hostname}, {hostname}, "", {}, {})); + compute_services.insert(cs); + } + } + + /* Instantiate a WMS (which is an ExecutionController really), to be started on some host (wms_host), which is responsible + * for executing the workflow. + * + * The WMS implementation is in SimpleWMS.[cpp|h]. + */ + std::cerr << "Instantiating a WMS on WMSHost..." << std::endl; + auto wms = simulation->add( + new wrench::SimpleWMS(workflow, compute_services, storage_service, {"WMSHost"})); + + /* It is necessary to store, or "stage", input files for the first task(s) of the workflow on some storage + * service, so that workflow execution can be initiated. The getInputFiles() method of the Workflow class + * returns the set of all files that are not generated by workflow tasks, and thus are only input files. + * These files are then staged on the storage service. + */ + std::cerr << "Staging input files..." << std::endl; + for (auto const &f: workflow->getInputFiles()) { + try { + storage_service->createFile(f); + } catch (std::runtime_error &e) { + std::cerr << "Exception: " << e.what() << std::endl; + return 0; + } + } + + /* Launch the simulation. This call only returns when the simulation is complete. */ + std::cerr << "Launching the Simulation..." << std::endl; + try { + simulation->launch(); + } catch (std::runtime_error &e) { + std::cerr << "Exception: " << e.what() << std::endl; + return 0; + } + std::cerr << "Simulation done!" << std::endl; + std::cerr << "Workflow completed at time: " << workflow->getCompletionDate() << std::endl; + + return 0; +} diff --git a/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/1000genome-chameleon-2ch-100k-001.json b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/1000genome-chameleon-2ch-100k-001.json new file mode 100644 index 0000000000..ae537ac970 --- /dev/null +++ b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/1000genome-chameleon-2ch-100k-001.json @@ -0,0 +1,2217 @@ +{ + "name": "1000genome-20200401T035039Z-0", + "description": "Trace generated with wrench-pegasus-parser.py from http://wrench-project.org", + "createdAt": "2020-04-01T20:22:32.420180Z", + "schemaVersion": "1.5", + "author": { + "name": "rafsilva", + "email": "support@wrench-project.org" + }, + "workflow": { + "specification": { + "tasks": [ + { + "name": "individuals_ID0000001", + "id": "individuals_ID0000001", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-1-1001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000002", + "id": "individuals_ID0000002", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-1001-2001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000003", + "id": "individuals_ID0000003", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-2001-3001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000004", + "id": "individuals_ID0000004", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-3001-4001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000005", + "id": "individuals_ID0000005", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-4001-5001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000006", + "id": "individuals_ID0000006", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-5001-6001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000007", + "id": "individuals_ID0000007", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-6001-7001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000008", + "id": "individuals_ID0000008", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-7001-8001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000009", + "id": "individuals_ID0000009", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-8001-9001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000010", + "id": "individuals_ID0000010", + "children": [ + "individuals_merge_ID0000011" + ], + "inputFiles": [ + "ALL.chr21.100000.vcf", + "columns.txt" + ], + "outputFiles": [ + "chr21n-9001-10001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_merge_ID0000011", + "id": "individuals_merge_ID0000011", + "children": [ + "mutation_overlap_ID0000025", + "frequency_ID0000026", + "mutation_overlap_ID0000027", + "frequency_ID0000028", + "mutation_overlap_ID0000029", + "frequency_ID0000030", + "mutation_overlap_ID0000031", + "frequency_ID0000032", + "mutation_overlap_ID0000033", + "frequency_ID0000034", + "mutation_overlap_ID0000035", + "frequency_ID0000036", + "mutation_overlap_ID0000037", + "frequency_ID0000038" + ], + "inputFiles": [ + "chr21n-4001-5001.tar.gz", + "chr21n-9001-10001.tar.gz", + "chr21n-5001-6001.tar.gz", + "chr21n-7001-8001.tar.gz", + "chr21n-6001-7001.tar.gz", + "chr21n-1001-2001.tar.gz", + "chr21n-8001-9001.tar.gz", + "chr21n-1-1001.tar.gz", + "chr21n-3001-4001.tar.gz", + "chr21n-2001-3001.tar.gz" + ], + "outputFiles": [ + "chr21n.tar.gz" + ], + "parents": [ + "individuals_ID0000004", + "individuals_ID0000005", + "individuals_ID0000006", + "individuals_ID0000007", + "individuals_ID0000001", + "individuals_ID0000002", + "individuals_ID0000003", + "individuals_ID0000008", + "individuals_ID0000009", + "individuals_ID0000010" + ] + }, + { + "name": "sifting_ID0000012", + "id": "sifting_ID0000012", + "children": [ + "mutation_overlap_ID0000025", + "frequency_ID0000026", + "mutation_overlap_ID0000027", + "frequency_ID0000028", + "mutation_overlap_ID0000029", + "frequency_ID0000030", + "mutation_overlap_ID0000031", + "frequency_ID0000032", + "mutation_overlap_ID0000033", + "frequency_ID0000034", + "mutation_overlap_ID0000035", + "frequency_ID0000036", + "mutation_overlap_ID0000037", + "frequency_ID0000038" + ], + "inputFiles": [ + "ALL.chr21.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.annotation.vcf" + ], + "outputFiles": [ + "sifted.SIFT.chr21.txt" + ], + "parents": [] + }, + { + "name": "individuals_ID0000013", + "id": "individuals_ID0000013", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-1-1001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000014", + "id": "individuals_ID0000014", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-1001-2001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000015", + "id": "individuals_ID0000015", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-2001-3001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000016", + "id": "individuals_ID0000016", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-3001-4001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000017", + "id": "individuals_ID0000017", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-4001-5001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000018", + "id": "individuals_ID0000018", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-5001-6001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000019", + "id": "individuals_ID0000019", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-6001-7001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000020", + "id": "individuals_ID0000020", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-7001-8001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000021", + "id": "individuals_ID0000021", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-8001-9001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_ID0000022", + "id": "individuals_ID0000022", + "children": [ + "individuals_merge_ID0000023" + ], + "inputFiles": [ + "columns.txt", + "ALL.chr22.100000.vcf" + ], + "outputFiles": [ + "chr22n-9001-10001.tar.gz" + ], + "parents": [] + }, + { + "name": "individuals_merge_ID0000023", + "id": "individuals_merge_ID0000023", + "children": [ + "mutation_overlap_ID0000039", + "frequency_ID0000040", + "mutation_overlap_ID0000041", + "frequency_ID0000042", + "mutation_overlap_ID0000043", + "frequency_ID0000044", + "mutation_overlap_ID0000045", + "frequency_ID0000046", + "mutation_overlap_ID0000047", + "frequency_ID0000048", + "mutation_overlap_ID0000049", + "frequency_ID0000050", + "mutation_overlap_ID0000051", + "frequency_ID0000052" + ], + "inputFiles": [ + "chr22n-4001-5001.tar.gz", + "chr22n-3001-4001.tar.gz", + "chr22n-1-1001.tar.gz", + "chr22n-2001-3001.tar.gz", + "chr22n-5001-6001.tar.gz", + "chr22n-6001-7001.tar.gz", + "chr22n-8001-9001.tar.gz", + "chr22n-7001-8001.tar.gz", + "chr22n-1001-2001.tar.gz", + "chr22n-9001-10001.tar.gz" + ], + "outputFiles": [ + "chr22n.tar.gz" + ], + "parents": [ + "individuals_ID0000015", + "individuals_ID0000016", + "individuals_ID0000017", + "individuals_ID0000018", + "individuals_ID0000013", + "individuals_ID0000014", + "individuals_ID0000019", + "individuals_ID0000022", + "individuals_ID0000020", + "individuals_ID0000021" + ] + }, + { + "name": "sifting_ID0000024", + "id": "sifting_ID0000024", + "children": [ + "mutation_overlap_ID0000039", + "frequency_ID0000040", + "mutation_overlap_ID0000041", + "frequency_ID0000042", + "mutation_overlap_ID0000043", + "frequency_ID0000044", + "mutation_overlap_ID0000045", + "frequency_ID0000046", + "mutation_overlap_ID0000047", + "frequency_ID0000048", + "mutation_overlap_ID0000049", + "frequency_ID0000050", + "mutation_overlap_ID0000051", + "frequency_ID0000052" + ], + "inputFiles": [ + "ALL.chr22.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.annotation.vcf" + ], + "outputFiles": [ + "sifted.SIFT.chr22.txt" + ], + "parents": [] + }, + { + "name": "mutation_overlap_ID0000025", + "id": "mutation_overlap_ID0000025", + "children": [], + "inputFiles": [ + "columns.txt", + "AFR", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-AFR.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "frequency_ID0000026", + "id": "frequency_ID0000026", + "children": [], + "inputFiles": [ + "columns.txt", + "AFR", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-AFR-freq.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "mutation_overlap_ID0000027", + "id": "mutation_overlap_ID0000027", + "children": [], + "inputFiles": [ + "columns.txt", + "GBR", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-GBR.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "frequency_ID0000028", + "id": "frequency_ID0000028", + "children": [], + "inputFiles": [ + "columns.txt", + "GBR", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-GBR-freq.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "mutation_overlap_ID0000029", + "id": "mutation_overlap_ID0000029", + "children": [], + "inputFiles": [ + "ALL", + "columns.txt", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-ALL.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "frequency_ID0000030", + "id": "frequency_ID0000030", + "children": [], + "inputFiles": [ + "ALL", + "columns.txt", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-ALL-freq.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "mutation_overlap_ID0000031", + "id": "mutation_overlap_ID0000031", + "children": [], + "inputFiles": [ + "columns.txt", + "SAS", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-SAS.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "frequency_ID0000032", + "id": "frequency_ID0000032", + "children": [], + "inputFiles": [ + "columns.txt", + "SAS", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-SAS-freq.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "mutation_overlap_ID0000033", + "id": "mutation_overlap_ID0000033", + "children": [], + "inputFiles": [ + "EAS", + "columns.txt", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-EAS.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "frequency_ID0000034", + "id": "frequency_ID0000034", + "children": [], + "inputFiles": [ + "EAS", + "columns.txt", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-EAS-freq.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "mutation_overlap_ID0000035", + "id": "mutation_overlap_ID0000035", + "children": [], + "inputFiles": [ + "columns.txt", + "AMR", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-AMR.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "frequency_ID0000036", + "id": "frequency_ID0000036", + "children": [], + "inputFiles": [ + "columns.txt", + "AMR", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-AMR-freq.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "mutation_overlap_ID0000037", + "id": "mutation_overlap_ID0000037", + "children": [], + "inputFiles": [ + "EUR", + "columns.txt", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-EUR.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "frequency_ID0000038", + "id": "frequency_ID0000038", + "children": [], + "inputFiles": [ + "EUR", + "columns.txt", + "chr21n.tar.gz", + "sifted.SIFT.chr21.txt" + ], + "outputFiles": [ + "chr21-EUR-freq.tar.gz" + ], + "parents": [ + "sifting_ID0000012", + "individuals_merge_ID0000011" + ] + }, + { + "name": "mutation_overlap_ID0000039", + "id": "mutation_overlap_ID0000039", + "children": [], + "inputFiles": [ + "columns.txt", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "AFR" + ], + "outputFiles": [ + "chr22-AFR.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "frequency_ID0000040", + "id": "frequency_ID0000040", + "children": [], + "inputFiles": [ + "columns.txt", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "AFR" + ], + "outputFiles": [ + "chr22-AFR-freq.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "mutation_overlap_ID0000041", + "id": "mutation_overlap_ID0000041", + "children": [], + "inputFiles": [ + "columns.txt", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "GBR" + ], + "outputFiles": [ + "chr22-GBR.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "frequency_ID0000042", + "id": "frequency_ID0000042", + "children": [], + "inputFiles": [ + "columns.txt", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "GBR" + ], + "outputFiles": [ + "chr22-GBR-freq.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "mutation_overlap_ID0000043", + "id": "mutation_overlap_ID0000043", + "children": [], + "inputFiles": [ + "ALL", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-ALL.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "frequency_ID0000044", + "id": "frequency_ID0000044", + "children": [], + "inputFiles": [ + "ALL", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-ALL-freq.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "mutation_overlap_ID0000045", + "id": "mutation_overlap_ID0000045", + "children": [], + "inputFiles": [ + "SAS", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-SAS.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "frequency_ID0000046", + "id": "frequency_ID0000046", + "children": [], + "inputFiles": [ + "SAS", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-SAS-freq.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "mutation_overlap_ID0000047", + "id": "mutation_overlap_ID0000047", + "children": [], + "inputFiles": [ + "EAS", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-EAS.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "frequency_ID0000048", + "id": "frequency_ID0000048", + "children": [], + "inputFiles": [ + "EAS", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-EAS-freq.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "mutation_overlap_ID0000049", + "id": "mutation_overlap_ID0000049", + "children": [], + "inputFiles": [ + "AMR", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-AMR.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "frequency_ID0000050", + "id": "frequency_ID0000050", + "children": [], + "inputFiles": [ + "AMR", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-AMR-freq.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "mutation_overlap_ID0000051", + "id": "mutation_overlap_ID0000051", + "children": [], + "inputFiles": [ + "EUR", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-EUR.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + }, + { + "name": "frequency_ID0000052", + "id": "frequency_ID0000052", + "children": [], + "inputFiles": [ + "EUR", + "sifted.SIFT.chr22.txt", + "chr22n.tar.gz", + "columns.txt" + ], + "outputFiles": [ + "chr22-EUR-freq.tar.gz" + ], + "parents": [ + "individuals_merge_ID0000023", + "sifting_ID0000024" + ] + } + ], + "files": [ + { + "id": "ALL.chr21.100000.vcf", + "sizeInBytes": 1014442803 + }, + { + "id": "columns.txt", + "sizeInBytes": 20078 + }, + { + "id": "chr21n-1-1001.tar.gz", + "sizeInBytes": 28281 + }, + { + "id": "chr21n-1001-2001.tar.gz", + "sizeInBytes": 28270 + }, + { + "id": "chr21n-2001-3001.tar.gz", + "sizeInBytes": 28266 + }, + { + "id": "chr21n-3001-4001.tar.gz", + "sizeInBytes": 28303 + }, + { + "id": "chr21n-4001-5001.tar.gz", + "sizeInBytes": 28232 + }, + { + "id": "chr21n-5001-6001.tar.gz", + "sizeInBytes": 28261 + }, + { + "id": "chr21n-6001-7001.tar.gz", + "sizeInBytes": 28307 + }, + { + "id": "chr21n-7001-8001.tar.gz", + "sizeInBytes": 28304 + }, + { + "id": "chr21n-8001-9001.tar.gz", + "sizeInBytes": 28348 + }, + { + "id": "chr21n-9001-10001.tar.gz", + "sizeInBytes": 28210 + }, + { + "id": "chr21n.tar.gz", + "sizeInBytes": 25037 + }, + { + "id": "ALL.chr21.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.annotation.vcf", + "sizeInBytes": 267806263 + }, + { + "id": "sifted.SIFT.chr21.txt", + "sizeInBytes": 231958 + }, + { + "id": "ALL.chr22.100000.vcf", + "sizeInBytes": 1014493636 + }, + { + "id": "chr22n-1-1001.tar.gz", + "sizeInBytes": 28128 + }, + { + "id": "chr22n-1001-2001.tar.gz", + "sizeInBytes": 28281 + }, + { + "id": "chr22n-2001-3001.tar.gz", + "sizeInBytes": 28138 + }, + { + "id": "chr22n-3001-4001.tar.gz", + "sizeInBytes": 27624 + }, + { + "id": "chr22n-4001-5001.tar.gz", + "sizeInBytes": 27615 + }, + { + "id": "chr22n-5001-6001.tar.gz", + "sizeInBytes": 28138 + }, + { + "id": "chr22n-6001-7001.tar.gz", + "sizeInBytes": 28125 + }, + { + "id": "chr22n-7001-8001.tar.gz", + "sizeInBytes": 28244 + }, + { + "id": "chr22n-8001-9001.tar.gz", + "sizeInBytes": 28302 + }, + { + "id": "chr22n-9001-10001.tar.gz", + "sizeInBytes": 28272 + }, + { + "id": "chr22n.tar.gz", + "sizeInBytes": 25055 + }, + { + "id": "ALL.chr22.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.annotation.vcf", + "sizeInBytes": 280949919 + }, + { + "id": "sifted.SIFT.chr22.txt", + "sizeInBytes": 480587 + }, + { + "id": "AFR", + "sizeInBytes": 8088 + }, + { + "id": "chr21-AFR.tar.gz", + "sizeInBytes": 144569 + }, + { + "id": "chr21-AFR-freq.tar.gz", + "sizeInBytes": 272990 + }, + { + "id": "GBR", + "sizeInBytes": 856 + }, + { + "id": "chr21-GBR.tar.gz", + "sizeInBytes": 136903 + }, + { + "id": "chr21-GBR-freq.tar.gz", + "sizeInBytes": 219928 + }, + { + "id": "ALL", + "sizeInBytes": 28000 + }, + { + "id": "chr21-ALL.tar.gz", + "sizeInBytes": 166614 + }, + { + "id": "chr21-ALL-freq.tar.gz", + "sizeInBytes": 289496 + }, + { + "id": "SAS", + "sizeInBytes": 5248 + }, + { + "id": "chr21-SAS.tar.gz", + "sizeInBytes": 143377 + }, + { + "id": "chr21-SAS-freq.tar.gz", + "sizeInBytes": 264723 + }, + { + "id": "EAS", + "sizeInBytes": 4896 + }, + { + "id": "chr21-EAS.tar.gz", + "sizeInBytes": 142592 + }, + { + "id": "chr21-EAS-freq.tar.gz", + "sizeInBytes": 265443 + }, + { + "id": "AMR", + "sizeInBytes": 4248 + }, + { + "id": "chr21-AMR.tar.gz", + "sizeInBytes": 142095 + }, + { + "id": "chr21-AMR-freq.tar.gz", + "sizeInBytes": 259983 + }, + { + "id": "EUR", + "sizeInBytes": 5312 + }, + { + "id": "chr21-EUR.tar.gz", + "sizeInBytes": 141848 + }, + { + "id": "chr21-EUR-freq.tar.gz", + "sizeInBytes": 266654 + }, + { + "id": "chr22-AFR.tar.gz", + "sizeInBytes": 146435 + }, + { + "id": "chr22-AFR-freq.tar.gz", + "sizeInBytes": 270800 + }, + { + "id": "chr22-GBR.tar.gz", + "sizeInBytes": 138728 + }, + { + "id": "chr22-GBR-freq.tar.gz", + "sizeInBytes": 221650 + }, + { + "id": "chr22-ALL.tar.gz", + "sizeInBytes": 169325 + }, + { + "id": "chr22-ALL-freq.tar.gz", + "sizeInBytes": 290690 + }, + { + "id": "chr22-SAS.tar.gz", + "sizeInBytes": 145894 + }, + { + "id": "chr22-SAS-freq.tar.gz", + "sizeInBytes": 265017 + }, + { + "id": "chr22-EAS.tar.gz", + "sizeInBytes": 145147 + }, + { + "id": "chr22-EAS-freq.tar.gz", + "sizeInBytes": 267302 + }, + { + "id": "chr22-AMR.tar.gz", + "sizeInBytes": 144357 + }, + { + "id": "chr22-AMR-freq.tar.gz", + "sizeInBytes": 258065 + }, + { + "id": "chr22-EUR.tar.gz", + "sizeInBytes": 143815 + }, + { + "id": "chr22-EUR-freq.tar.gz", + "sizeInBytes": 268471 + } + ] + }, + "execution": { + "makespanInSeconds": 776.0, + "executedAt": "20200401T035043+0000", + "tasks": [ + { + "id": "individuals_ID0000001", + "runtimeInSeconds": 53.6, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "1", + "1001", + "10000" + ] + }, + "avgCPU": 160.8619, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000002", + "runtimeInSeconds": 52.255, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "1001", + "2001", + "10000" + ] + }, + "avgCPU": 164.834, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000003", + "runtimeInSeconds": 53.827, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "2001", + "3001", + "10000" + ] + }, + "avgCPU": 159.8919, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000004", + "runtimeInSeconds": 52.111, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "3001", + "4001", + "10000" + ] + }, + "avgCPU": 165.3355, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000005", + "runtimeInSeconds": 52.409, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "4001", + "5001", + "10000" + ] + }, + "avgCPU": 164.6378, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000006", + "runtimeInSeconds": 51.066, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "5001", + "6001", + "10000" + ] + }, + "avgCPU": 168.7698, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000007", + "runtimeInSeconds": 51.545, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "6001", + "7001", + "10000" + ] + }, + "avgCPU": 166.8445, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000008", + "runtimeInSeconds": 52.759, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "7001", + "8001", + "10000" + ] + }, + "avgCPU": 163.5209, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000009", + "runtimeInSeconds": 52.859, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "8001", + "9001", + "10000" + ] + }, + "avgCPU": 163.1662, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000010", + "runtimeInSeconds": 51.251, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr21.100000.vcf", + "21", + "9001", + "10001", + "10000" + ] + }, + "avgCPU": 168.6679, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_merge_ID0000011", + "runtimeInSeconds": 38.206, + "command": { + "program": "individuals_merge", + "arguments": [ + "21", + "chr21n-1-1001.tar.gz", + "chr21n-1001-2001.tar.gz", + "chr21n-2001-3001.tar.gz", + "chr21n-3001-4001.tar.gz", + "chr21n-4001-5001.tar.gz", + "chr21n-5001-6001.tar.gz", + "chr21n-6001-7001.tar.gz", + "chr21n-7001-8001.tar.gz", + "chr21n-8001-9001.tar.gz", + "chr21n-9001-10001.tar.gz" + ] + }, + "avgCPU": 108.5484, + "priority": 30, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "sifting_ID0000012", + "runtimeInSeconds": 0.309, + "command": { + "program": "sifting", + "arguments": [ + "ALL.chr21.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.annotation.vcf", + "21" + ] + }, + "avgCPU": 116.8285, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000013", + "runtimeInSeconds": 51.199, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "1", + "1001", + "10000" + ] + }, + "avgCPU": 169.0424, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000014", + "runtimeInSeconds": 53.755, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "1001", + "2001", + "10000" + ] + }, + "avgCPU": 160.8743, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000015", + "runtimeInSeconds": 52.677, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "2001", + "3001", + "10000" + ] + }, + "avgCPU": 177.4645, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000016", + "runtimeInSeconds": 52.968, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "3001", + "4001", + "10000" + ] + }, + "avgCPU": 175.9383, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000017", + "runtimeInSeconds": 51.309, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "4001", + "5001", + "10000" + ] + }, + "avgCPU": 169.4771, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000018", + "runtimeInSeconds": 51.475, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "5001", + "6001", + "10000" + ] + }, + "avgCPU": 169.0782, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000019", + "runtimeInSeconds": 50.939, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "6001", + "7001", + "10000" + ] + }, + "avgCPU": 168.9059, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000020", + "runtimeInSeconds": 52.127, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "7001", + "8001", + "10000" + ] + }, + "avgCPU": 165.3366, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000021", + "runtimeInSeconds": 55.332, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "8001", + "9001", + "10000" + ] + }, + "avgCPU": 157.6899, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_ID0000022", + "runtimeInSeconds": 53.637, + "command": { + "program": "individuals", + "arguments": [ + "ALL.chr22.100000.vcf", + "22", + "9001", + "10001", + "10000" + ] + }, + "avgCPU": 160.8349, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "individuals_merge_ID0000023", + "runtimeInSeconds": 37.667, + "command": { + "program": "individuals_merge", + "arguments": [ + "22", + "chr22n-1-1001.tar.gz", + "chr22n-1001-2001.tar.gz", + "chr22n-2001-3001.tar.gz", + "chr22n-3001-4001.tar.gz", + "chr22n-4001-5001.tar.gz", + "chr22n-5001-6001.tar.gz", + "chr22n-6001-7001.tar.gz", + "chr22n-7001-8001.tar.gz", + "chr22n-8001-9001.tar.gz", + "chr22n-9001-10001.tar.gz" + ] + }, + "avgCPU": 108.6628, + "priority": 30, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "sifting_ID0000024", + "runtimeInSeconds": 0.344, + "command": { + "program": "sifting", + "arguments": [ + "ALL.chr22.phase3_shapeit2_mvncall_integrated_v5.20130502.sites.annotation.vcf", + "22" + ] + }, + "avgCPU": 123.5465, + "priority": 20, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000025", + "runtimeInSeconds": 4.975, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "21", + "-pop", + "AFR" + ] + }, + "avgCPU": 76.9246, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000026", + "runtimeInSeconds": 111.475, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "21", + "-pop", + "AFR" + ] + }, + "avgCPU": 80.4754, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000027", + "runtimeInSeconds": 3.957, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "21", + "-pop", + "GBR" + ] + }, + "avgCPU": 48.5469, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000028", + "runtimeInSeconds": 106.522, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "21", + "-pop", + "GBR" + ] + }, + "avgCPU": 83.7874, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000029", + "runtimeInSeconds": 29.216, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "21", + "-pop", + "ALL" + ] + }, + "avgCPU": 61.8326, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000030", + "runtimeInSeconds": 110.171, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "21", + "-pop", + "ALL" + ] + }, + "avgCPU": 82.0152, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000031", + "runtimeInSeconds": 4.882, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "21", + "-pop", + "SAS" + ] + }, + "avgCPU": 65.7927, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000032", + "runtimeInSeconds": 112.042, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "21", + "-pop", + "SAS" + ] + }, + "avgCPU": 79.2551, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000033", + "runtimeInSeconds": 7.824, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "21", + "-pop", + "EAS" + ] + }, + "avgCPU": 42.2163, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000034", + "runtimeInSeconds": 109.436, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "21", + "-pop", + "EAS" + ] + }, + "avgCPU": 81.0245, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000035", + "runtimeInSeconds": 5.158, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "21", + "-pop", + "AMR" + ] + }, + "avgCPU": 51.9581, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000036", + "runtimeInSeconds": 108.564, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "21", + "-pop", + "AMR" + ] + }, + "avgCPU": 81.7269, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000037", + "runtimeInSeconds": 10.799, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "21", + "-pop", + "EUR" + ] + }, + "avgCPU": 32.8827, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000038", + "runtimeInSeconds": 112.012, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "21", + "-pop", + "EUR" + ] + }, + "avgCPU": 79.654, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000039", + "runtimeInSeconds": 4.902, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "22", + "-pop", + "AFR" + ] + }, + "avgCPU": 79.845, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000040", + "runtimeInSeconds": 101.256, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "22", + "-pop", + "AFR" + ] + }, + "avgCPU": 83.6691, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000041", + "runtimeInSeconds": 2.579, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "22", + "-pop", + "GBR" + ] + }, + "avgCPU": 68.4762, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000042", + "runtimeInSeconds": 108.778, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "22", + "-pop", + "GBR" + ] + }, + "avgCPU": 81.4264, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000043", + "runtimeInSeconds": 33.96, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "22", + "-pop", + "ALL" + ] + }, + "avgCPU": 56.1366, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000044", + "runtimeInSeconds": 111.687, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "22", + "-pop", + "ALL" + ] + }, + "avgCPU": 81.7615, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000045", + "runtimeInSeconds": 4.311, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "22", + "-pop", + "SAS" + ] + }, + "avgCPU": 74.5999, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000046", + "runtimeInSeconds": 109.125, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "22", + "-pop", + "SAS" + ] + }, + "avgCPU": 82.0481, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000047", + "runtimeInSeconds": 5.282, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "22", + "-pop", + "EAS" + ] + }, + "avgCPU": 62.1734, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000048", + "runtimeInSeconds": 109.772, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "22", + "-pop", + "EAS" + ] + }, + "avgCPU": 81.6492, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000049", + "runtimeInSeconds": 3.777, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "22", + "-pop", + "AMR" + ] + }, + "avgCPU": 70.9558, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000050", + "runtimeInSeconds": 99.194, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "22", + "-pop", + "AMR" + ] + }, + "avgCPU": 84.62, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "mutation_overlap_ID0000051", + "runtimeInSeconds": 5.341, + "command": { + "program": "mutation_overlap", + "arguments": [ + "-c", + "22", + "-pop", + "EUR" + ] + }, + "avgCPU": 61.2245, + "priority": 40, + "machines": [ + "pegasus-5" + ] + }, + { + "id": "frequency_ID0000052", + "runtimeInSeconds": 108.672, + "command": { + "program": "frequency", + "arguments": [ + "-c", + "22", + "-pop", + "EUR" + ] + }, + "avgCPU": 81.6733, + "priority": 40, + "machines": [ + "pegasus-5" + ] + } + ], + "machines": [ + { + "nodeName": "pegasus-5", + "system": "linux", + "architecture": "x86_64", + "release": "4.15.0-88-generic", + "cpu": { + "vendor": "GenuineIntel", + "coreCount": 48, + "speedInMHz": 1200 + }, + "memoryInBytes": 131795984000 + } + ] + } + }, + "runtimeSystem": { + "url": "http://pegasus.isi.edu", + "version": "4.9.3", + "name": "Pegasus" + } +} \ No newline at end of file diff --git a/examples/workflow_api/real-workflow-example/CMakeLists.txt b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/CMakeLists.txt similarity index 61% rename from examples/workflow_api/real-workflow-example/CMakeLists.txt rename to examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/CMakeLists.txt index 3ece892221..5f62bb6e24 100755 --- a/examples/workflow_api/real-workflow-example/CMakeLists.txt +++ b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/CMakeLists.txt @@ -2,25 +2,25 @@ set(CMAKE_CXX_STANDARD 17) # Add source to this project's executable. -add_executable(wrench-example-real-workflow +add_executable(wrench-example-real-workflow-vms-and-pilots EXCLUDE_FROM_ALL - ./SimpleWMS.h - ./SimpleWMS.cpp - ./SimpleWorkflowSimulator.cpp) + SimpleWMS.h + SimpleWMS.cpp + SimpleWorkflowSimulator.cpp) add_custom_target( - wrench-example-real-workflow-files + wrench-example-real-workflow-vms-and-pilots-files COMMAND /bin/sh -c "if [ '${CMAKE_CURRENT_SOURCE_DIR}' != '${CMAKE_CURRENT_BINARY_DIR}' ]; then /bin/cp -f ${CMAKE_CURRENT_SOURCE_DIR}/cloud_batch_platform.xml ${CMAKE_CURRENT_BINARY_DIR}/cloud_batch_platform.xml ; fi ;" COMMAND /bin/sh -c "if [ '${CMAKE_CURRENT_SOURCE_DIR}' != '${CMAKE_CURRENT_BINARY_DIR}' ]; then /bin/cp -f ${CMAKE_CURRENT_SOURCE_DIR}/1000genome-chameleon-2ch-100k-001.json ${CMAKE_CURRENT_BINARY_DIR}/1000genome-chameleon-2ch-100k-001.json ; fi ;" VERBATIM ) -add_dependencies(examples wrench-example-real-workflow) -add_dependencies(wrench-example-real-workflow wrench-example-real-workflow-files) -add_dependencies(wrench-example-real-workflow wrenchwfcommonsworkflowparser) +add_dependencies(examples wrench-example-real-workflow-vms-and-pilots) +add_dependencies(wrench-example-real-workflow-vms-and-pilots wrench-example-real-workflow-vms-and-pilots-files) +add_dependencies(wrench-example-real-workflow-vms-and-pilots wrenchwfcommonsworkflowparser) if (ENABLE_BATSCHED) - target_link_libraries(wrench-example-real-workflow + target_link_libraries(wrench-example-real-workflow-vms-and-pilots wrench wrenchwfcommonsworkflowparser ${SimGrid_LIBRARY} @@ -28,7 +28,7 @@ if (ENABLE_BATSCHED) ${ZMQ_LIBRARY} ) else() - target_link_libraries(wrench-example-real-workflow + target_link_libraries(wrench-example-real-workflow-vms-and-pilots wrench wrenchwfcommonsworkflowparser ${SimGrid_LIBRARY} diff --git a/examples/workflow_api/real-workflow-example/SimpleWMS.cpp b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWMS.cpp similarity index 99% rename from examples/workflow_api/real-workflow-example/SimpleWMS.cpp rename to examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWMS.cpp index c4da8f8ef0..868a5107bb 100755 --- a/examples/workflow_api/real-workflow-example/SimpleWMS.cpp +++ b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWMS.cpp @@ -88,7 +88,7 @@ namespace wrench { (e.getCause()->toString().c_str())); continue; } - if (this->abort || this->workflow->isDone()) { + if (this->workflow->isDone()) { break; } } diff --git a/examples/workflow_api/real-workflow-example/SimpleWMS.h b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWMS.h similarity index 94% rename from examples/workflow_api/real-workflow-example/SimpleWMS.h rename to examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWMS.h index 9edd1514d0..f69072a6f7 100755 --- a/examples/workflow_api/real-workflow-example/SimpleWMS.h +++ b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWMS.h @@ -10,7 +10,7 @@ #ifndef WRENCH_EXAMPLE_SIMPLEWMS_H #define WRENCH_EXAMPLE_SIMPLEWMS_H -#include +#include "wrench-dev.h" namespace wrench { @@ -35,9 +35,6 @@ namespace wrench { private: int main() override; - /** @brief Whether the workflow execution should be aborted */ - bool abort = false; - /** @brief A pilot job that is submitted to the batch compute service */ std::shared_ptr pilot_job = nullptr; /** @brief A boolean to indicate whether the pilot job is running */ diff --git a/examples/workflow_api/real-workflow-example/SimpleWorkflowSimulator.cpp b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWorkflowSimulator.cpp similarity index 99% rename from examples/workflow_api/real-workflow-example/SimpleWorkflowSimulator.cpp rename to examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWorkflowSimulator.cpp index 59133ed6bb..50220f7146 100755 --- a/examples/workflow_api/real-workflow-example/SimpleWorkflowSimulator.cpp +++ b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/SimpleWorkflowSimulator.cpp @@ -8,10 +8,10 @@ */ #include -#include +#include "wrench.h" #include "SimpleWMS.h" -#include +#include "wrench/tools/wfcommons/WfCommonsWorkflowParser.h" #include #include diff --git a/examples/workflow_api/real-workflow-example/cloud_batch_platform.xml b/examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/cloud_batch_platform.xml similarity index 100% rename from examples/workflow_api/real-workflow-example/cloud_batch_platform.xml rename to examples/workflow_api/real-workflow-examples/vms-and-pilot-jobs/cloud_batch_platform.xml diff --git a/include/wrench/execution_events/CompoundJobCompletedEvent.h b/include/wrench/execution_events/CompoundJobCompletedEvent.h index 8a48eac2f0..9dbe96da2d 100755 --- a/include/wrench/execution_events/CompoundJobCompletedEvent.h +++ b/include/wrench/execution_events/CompoundJobCompletedEvent.h @@ -22,23 +22,10 @@ namespace wrench { - class WorkflowTask; - - class DataFile; - class CompoundJob; - class PilotJob; - class ComputeService; - class StorageService; - - class FileRegistryService; - - class FileRegistryService; - - /** * @brief A "compound job has completed" ExecutionEvent */ diff --git a/include/wrench/execution_events/CompoundJobFailedEvent.h b/include/wrench/execution_events/CompoundJobFailedEvent.h index e7c68d057f..398c074954 100755 --- a/include/wrench/execution_events/CompoundJobFailedEvent.h +++ b/include/wrench/execution_events/CompoundJobFailedEvent.h @@ -22,14 +22,8 @@ namespace wrench { - class WorkflowTask; - class DataFile; class CompoundJob; - class PilotJob; class ComputeService; - class StorageService; - class FileRegistryService; - class FileRegistryService; /** * @brief A "standard job has failed" ExecutionEvent diff --git a/include/wrench/execution_events/PilotJobExpiredEvent.h b/include/wrench/execution_events/PilotJobExpiredEvent.h index a9cf0506a3..3a85df6f96 100755 --- a/include/wrench/execution_events/PilotJobExpiredEvent.h +++ b/include/wrench/execution_events/PilotJobExpiredEvent.h @@ -20,23 +20,10 @@ namespace wrench { - class WorkflowTask; - - class DataFile; - - class StandardJob; - class PilotJob; class ComputeService; - class StorageService; - - class FileRegistryService; - - class FileRegistryService; - - /** * @brief A "pilot job has expired" ExecutionEvent */ diff --git a/include/wrench/services/compute/ComputeService.h b/include/wrench/services/compute/ComputeService.h index 01794c084f..52ca0f67e2 100644 --- a/include/wrench/services/compute/ComputeService.h +++ b/include/wrench/services/compute/ComputeService.h @@ -15,6 +15,7 @@ #include #include #include +#include #include "wrench/services/Service.h" #include "wrench/job/Job.h" @@ -53,7 +54,7 @@ namespace wrench { /** @brief A convenient constant to mean "use all ram of a physical host" whenever a ram capacity * is needed when instantiating compute services */ - static constexpr sg_size_t ALL_RAM = LONG_LONG_MAX; + static constexpr sg_size_t ALL_RAM = LONG_MAX; /***********************/ /** \cond DEVELOPER **/ diff --git a/include/wrench/services/compute/batch/BatchComputeService.h b/include/wrench/services/compute/batch/BatchComputeService.h index f4de453056..74585fb700 100755 --- a/include/wrench/services/compute/batch/BatchComputeService.h +++ b/include/wrench/services/compute/batch/BatchComputeService.h @@ -143,6 +143,7 @@ namespace wrench { friend class WorkloadTraceFileReplayer; friend class HomegrownBatchScheduler; friend class FCFSBatchScheduler; + friend class EasyBackfillingBatchScheduler; friend class ConservativeBackfillingBatchScheduler; friend class ConservativeBackfillingBatchSchedulerCoreLevel; @@ -216,7 +217,7 @@ namespace wrench { std::set queue_ordering_options = {"fcfs", "lcfs", "desc_bounded_slowdown", "desc_slowdown", "asc_size", "desc_size", "asc_walltime", "desc_walltime"}; #else - std::set scheduling_algorithms = {"fcfs", "conservative_bf", "conservative_bf_core_level"}; + std::set scheduling_algorithms = {"fcfs", "conservative_bf", "conservative_bf_core_level", "easy_bf_depth0", "easy_bf_depth1"}; //Batch queue ordering options std::set queue_ordering_options = {}; diff --git a/include/wrench/services/compute/batch/BatchComputeServiceProperty.h b/include/wrench/services/compute/batch/BatchComputeServiceProperty.h index 70ff8511ef..82ef1d3c42 100755 --- a/include/wrench/services/compute/batch/BatchComputeServiceProperty.h +++ b/include/wrench/services/compute/batch/BatchComputeServiceProperty.h @@ -29,14 +29,27 @@ namespace wrench { /** * @brief The batch scheduling algorithm. Can be: * - If ENABLE_BATSCHED is set to off / not set: - * - "fcfs": First Come First Serve, which allocates resources at the core level (i.e., two jobs may run on the same node if that node has enough cores to support both jobs) (default) - * - "conservative_bf": a home-grown implementation of FCFS with conservative backfilling, which only allocates resources at the node level (i.e., two jobs can never run on the same node even if that node has enough cores to support both jobs) - * - "conservative_bf_core_level": a home-grown implementation of FCFS with conservative backfilling, which allocates resources at the core level (i.e., two jobs may run on the same node if that node has enough cores to support both jobs) + * - "fcfs": First Come First Serve, which allocates resources at the core level (i.e., two jobs may run on the same node + * if that node has enough cores to support both jobs). (DEFAULT) + * + * - "easy_bf_depth0": a home-grown implementation of EASY (FCFS with backfilling), which only allocates resources at the node level + * (i.e., two jobs can never run on the same node even if that node has enough cores to support both jobs), + * and which may postpone the first (oldest) job in the queue via backfilling actions + * + * - "easy_bf_depth1": a home-grown implementation of EASY (FCFS with backfilling), which only allocates resources at the node level + * (i.e., two jobs can never run on the same node even if that node has enough cores to support both jobs), + * and which will never postpone the first (oldest) job in the queue via backfilling actions. + * THIS IS TYPICALLY CONSIDERED THE STANDARD "EASY" ALGORITHM. + * + * - "conservative_bf": a home-grown implementation of FCFS with conservative backfilling, which only allocates resources at the node level + * (i.e., two jobs can never run on the same node even if that node has enough cores to support both jobs) + * + * - "conservative_bf_core_level": a home-grown implementation of FCFS with conservative backfilling, which allocates resources at the core level + * (i.e., two jobs may run on the same node if that node has enough cores to support both jobs) * * - If ENABLE_BATSCHED is set to on: * - whatever scheduling algorithm is supported by Batsched - * (by default: "conservative_bf", other options include - * "easy_bf" and "easy_bf_fast") + * (DEFAULT: "conservative_bf"; other options include "easy_bf" and "easy_bf_fast") * - These only allocate resources at the node level (i.e., two jobs can never run on the same node even if that node has enough cores to support both jobs) * **/ @@ -111,7 +124,7 @@ namespace wrench { DECLARE_PROPERTY_NAME(SUBMIT_TIME_OF_FIRST_JOB_IN_WORKLOAD_TRACE_FILE); /** - * @brief Path to a to-be-generated Batsim-style CSV trace file (e.g. for b3atch schedule visualization purposes). + * @brief Path to a to-be-generated Batsim-style CSV trace file (e.g. for batch schedule visualization purposes). * - If ENABLE_BATSCHED is set to off or not set: ignored * - If ENABLE_BATSCHED is set to on: The trace file is generated in CSV format as follows: * allocated_processors,consumed_energy,execution_time,finish_time,job_id,metadata, diff --git a/include/wrench/services/compute/batch/BatchJob.h b/include/wrench/services/compute/batch/BatchJob.h index 743c055034..fbf4c822ec 100755 --- a/include/wrench/services/compute/batch/BatchJob.h +++ b/include/wrench/services/compute/batch/BatchJob.h @@ -53,9 +53,12 @@ namespace wrench { } private: + friend class EasyBackfillingBatchScheduler; friend class ConservativeBackfillingBatchScheduler; friend class ConservativeBackfillingBatchSchedulerCoreLevel; + u_int32_t easy_bf_start_date; // Field used by EASY_BF + u_int32_t easy_bf_expected_end_date; // Field used by EASY_BF u_int32_t conservative_bf_start_date; // Field used by CONSERVATIVE_BF u_int32_t conservative_bf_expected_end_date;// Field used by CONSERVATIVE_BF diff --git a/include/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.h b/include/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.h index aff971bda0..4fc5275063 100755 --- a/include/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.h +++ b/include/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.h @@ -10,6 +10,7 @@ #ifndef WRENCH_NODEAVAILABILITYTIMELINE_H #define WRENCH_NODEAVAILABILITYTIMELINE_H +#include #include #include #include "wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/BatchJobSet.h" @@ -31,12 +32,14 @@ namespace wrench { public: explicit NodeAvailabilityTimeLine(unsigned long max_num_nodes); void setTimeOrigin(u_int32_t t); - void add(u_int32_t start, u_int32_t end, std::shared_ptr job) { update(true, start, end, job); } - void remove(u_int32_t start, u_int32_t end, std::shared_ptr job) { update(false, start, end, job); } + u_int32_t getTimeOrigin(); + void add(u_int32_t start, u_int32_t end, std::shared_ptr job) { update(true, start, end, std::move(job)); } + void remove(u_int32_t start, u_int32_t end, std::shared_ptr job) { update(false, start, end, std::move(job)); } void clear(); void print(); std::set> getJobsInFirstSlot(); - u_int32_t findEarliestStartTime(uint32_t duration, unsigned long num_nodes); + u_int32_t findEarliestStartTime(uint32_t duration, unsigned long num_nodes, unsigned long *num_available_nodes_at_that_time); + unsigned long getNumAvailableNodesInFirstSlot(); private: unsigned long max_num_nodes; diff --git a/include/wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.h b/include/wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.h new file mode 100755 index 0000000000..448c12fc33 --- /dev/null +++ b/include/wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.h @@ -0,0 +1,55 @@ +/** + * Copyright (c) 2017-2019. The WRENCH Team. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + */ + +#ifndef WRENCH_EASYBACKFILLINGBATCHSCHEDULER_H +#define WRENCH_EASYBACKFILLINGBATCHSCHEDULER_H + +#include "wrench/services/compute/batch/BatchComputeService.h" +#include "wrench/services/compute/batch/batch_schedulers/homegrown/HomegrownBatchScheduler.h" +#include + +namespace wrench { + + /***********************/ + /** \cond INTERNAL */ + /***********************/ + + /** + * @brief A class that defines a easy backfilling batch scheduler + */ + class EasyBackfillingBatchScheduler : public HomegrownBatchScheduler { + + public: + explicit EasyBackfillingBatchScheduler(BatchComputeService *cs, int depth); + + void processQueuedJobs() override; + + void processJobSubmission(std::shared_ptr batch_job) override; + void processJobFailure(std::shared_ptr batch_job) override; + void processJobCompletion(std::shared_ptr batch_job) override; + void processJobTermination(std::shared_ptr batch_job) override; + + std::map> scheduleOnHosts(unsigned long, unsigned long, sg_size_t) override; + + std::map + getStartTimeEstimates(std::set> set_of_jobs) override; + + private: + std::unique_ptr schedule; + int _depth; + }; + + + /***********************/ + /** \endcond */ + /***********************/ +}// namespace wrench + + +#endif//WRENCH_EASYBACKFILLINGBATCHSCHEDULER_H diff --git a/include/wrench/services/storage/storage_helpers/LogicalFileSystemLRUCaching.h b/include/wrench/services/storage/storage_helpers/LogicalFileSystemLRUCaching.h deleted file mode 100755 index 2e4b7faea2..0000000000 --- a/include/wrench/services/storage/storage_helpers/LogicalFileSystemLRUCaching.h +++ /dev/null @@ -1,112 +0,0 @@ -///** -// * Copyright (c) 2017. The WRENCH Team. -// * -// * This program is free software: you can redistribute it and/or modify -// * it under the terms of the GNU General Public License as published by -// * the Free Software Foundation, either version 3 of the License, or -// * (at your option) any later version. -// */ -// -//#ifndef WRENCH_LOGICALFILESYSTEMLRUCACHING_H -//#define WRENCH_LOGICALFILESYSTEMLRUCACHING_H -// -//#include -//#include -//#include -//#include -//#include -//#include -// -//#include -// -// -//#include -// -//namespace wrench { -// -// /***********************/ -// /** \cond INTERNAL */ -// /***********************/ -// -// -// class StorageService; -// -// /** -// * @brief A class that implements a weak file system abstraction -// */ -// class LogicalFileSystemLRUCaching : public LogicalFileSystem { -// -// public: -// /** -// * @brief A helper struct to describe a file instance on disk -// */ -// struct FileOnDiskLRUCaching : public FileOnDisk { -// public: -// /** -// * @brief Constructor -// * @param last_write_date Last write date -// * @param lru_sequence_number LRU sequence number -// * @param num_current_transactions Number of current transactions using this file on disk -// */ -// FileOnDiskLRUCaching(double last_write_date, -// unsigned int lru_sequence_number, -// unsigned short num_current_transactions) : FileOnDisk(last_write_date), -// lru_sequence_number(lru_sequence_number), -// num_current_transactions(num_current_transactions) {} -// -// /** -// * @brief The LRU sequence number (lower means older) -// */ -// unsigned int lru_sequence_number; -// /** -// * @brief The number of transactions that involve this file, meaning that it's not evictable is > 0 -// */ -// unsigned short num_current_transactions; -// }; -// -// public: -// /** -// * @brief Next LRU sequence number -// */ -// unsigned int next_lru_sequence_number = 0; -// -// void storeFileInDirectory(const std::shared_ptr &file, const std::string &absolute_path) override; -// void removeFileFromDirectory(const std::shared_ptr &file, const std::string &absolute_path) override; -// void removeAllFilesInDirectory(const std::string &absolute_path) override; -// void updateReadDate(const std::shared_ptr &file, const std::string &absolute_path) override; -// void incrementNumRunningTransactionsForFileInDirectory(const std::shared_ptr &file, const std::string &absolute_path) override; -// void decrementNumRunningTransactionsForFileInDirectory(const std::shared_ptr &file, const std::string &absolute_path) override; -// -// protected: -// bool evictFiles(double needed_free_space) override; -// -// private: -// friend class StorageService; -// friend class LogicalFileSystem; -// -// explicit LogicalFileSystemLRUCaching(const std::string &hostname, -// StorageService *storage_service, -// const std::string &mount_point); -// -// -// std::map>> lru_list; -// -// void print_lru_list() { -// std::cerr << "LRU LIST:\n"; -// for (auto const &lru: this->lru_list) { -// std::cerr << "[" << lru.first << "] " << std::get<0>(lru.second) << ":" << std::get<1>(lru.second)->getID() << "\n"; -// } -// } -// -// private: -// }; -// -// -// /***********************/ -// /** \endcond */ -// /***********************/ -// -//}// namespace wrench -// -// -//#endif//WRENCH_LOGICALFILESYSTEMLRUCACHING_H diff --git a/include/wrench/services/storage/storage_helpers/LogicalFileSystemNoCaching.h b/include/wrench/services/storage/storage_helpers/LogicalFileSystemNoCaching.h deleted file mode 100755 index 3ab6a77f01..0000000000 --- a/include/wrench/services/storage/storage_helpers/LogicalFileSystemNoCaching.h +++ /dev/null @@ -1,75 +0,0 @@ -///** -// * Copyright (c) 2017. The WRENCH Team. -// * -// * This program is free software: you can redistribute it and/or modify -// * it under the terms of the GNU General Public License as published by -// * the Free Software Foundation, either version 3 of the License, or -// * (at your option) any later version. -// */ -// -//#ifndef WRENCH_LOGICALFILESYSTEMNOCACHING_H -//#define WRENCH_LOGICALFILESYSTEMNOCACHING_H -// -//#include -//#include -//#include -//#include -//#include -//#include -// -//#include -// -// -//#include -//#include -// -//namespace wrench { -// -// /***********************/ -// /** \cond INTERNAL */ -// /***********************/ -// -// -// class StorageService; -// -// /** -// * @brief A class that implements a weak file system abstraction -// */ -// class LogicalFileSystemNoCaching : public LogicalFileSystem { -// -// class FileOnDiskNoCaching : public FileOnDisk { -// public: -// explicit FileOnDiskNoCaching(double last_write_date) : FileOnDisk(last_write_date) {} -// }; -// -// public: -// void storeFileInDirectory(const std::shared_ptr &file, const std::string &absolute_path) override; -// void removeFileFromDirectory(const std::shared_ptr &file, const std::string &absolute_path) override; -// void removeAllFilesInDirectory(const std::string &absolute_path) override; -// void updateReadDate(const std::shared_ptr &file, const std::string &absolute_path) override; -// void incrementNumRunningTransactionsForFileInDirectory(const std::shared_ptr &file, const std::string &absolute_path) override; -// void decrementNumRunningTransactionsForFileInDirectory(const std::shared_ptr &file, const std::string &absolute_path) override; -// -// protected: -// friend class StorageService; -// bool evictFiles(double needed_free_space) override; -// -// private: -// friend class LogicalFileSystem; -// explicit LogicalFileSystemNoCaching(const std::string &hostname, -// StorageService *storage_service, -// const std::string &mount_point); -// -// -// private: -// }; -// -// -// /***********************/ -// /** \endcond */ -// /***********************/ -// -//}// namespace wrench -// -// -//#endif//WRENCH_LOGICALFILESYSTEMNOCACHING_H diff --git a/include/wrench/simgrid_S4U_util/S4U_Simulation.h b/include/wrench/simgrid_S4U_util/S4U_Simulation.h index 4b88cbf7f0..340e5c3398 100644 --- a/include/wrench/simgrid_S4U_util/S4U_Simulation.h +++ b/include/wrench/simgrid_S4U_util/S4U_Simulation.h @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -29,7 +30,7 @@ namespace wrench { class S4U_Simulation { public: /** @brief The ram capacity of a physical host whenever not specified in the platform description file */ - static constexpr sg_size_t DEFAULT_RAM = LONG_LONG_MAX; + static constexpr sg_size_t DEFAULT_RAM = LLONG_MAX; public: static void enableSMPI(); diff --git a/include/wrench/simulation/Version.h b/include/wrench/simulation/Version.h index ad85201529..2aadf31fd6 100755 --- a/include/wrench/simulation/Version.h +++ b/include/wrench/simulation/Version.h @@ -9,5 +9,5 @@ */ namespace wrench { -#define WRENCH_VERSION_STRING "2.5-dev" +#define WRENCH_VERSION_STRING "2.6-dev" }// namespace wrench diff --git a/src/wrench/action/ComputeAction.cpp b/src/wrench/action/ComputeAction.cpp index 3831ece388..0b84c6e2ce 100755 --- a/src/wrench/action/ComputeAction.cpp +++ b/src/wrench/action/ComputeAction.cpp @@ -38,7 +38,7 @@ namespace wrench { unsigned long max_num_cores, std::shared_ptr parallel_model) : Action(name, "compute_") { - if ((flops < 0) || (ram < 0) || (min_num_cores < 1) || (max_num_cores < min_num_cores)) { + if ((flops < 0) || (min_num_cores < 1) || (max_num_cores < min_num_cores)) { throw std::invalid_argument("ComputeAction::ComputeAction(): invalid arguments"); } this->flops = flops; diff --git a/src/wrench/execution_events/ExecutionEvent.cpp b/src/wrench/execution_events/ExecutionEvent.cpp index fcde3322ab..24bb0906cf 100755 --- a/src/wrench/execution_events/ExecutionEvent.cpp +++ b/src/wrench/execution_events/ExecutionEvent.cpp @@ -48,61 +48,61 @@ namespace wrench { } } - if (auto m = std::dynamic_pointer_cast(message)) { + if (auto jmcjcm = std::dynamic_pointer_cast(message)) { return std::shared_ptr( - new CompoundJobCompletedEvent(m->job, m->compute_service)); + new CompoundJobCompletedEvent(jmcjcm->job, jmcjcm->compute_service)); - } else if (auto m = std::dynamic_pointer_cast(message)) { + } else if (auto jmcjfm = std::dynamic_pointer_cast(message)) { return std::shared_ptr( - new CompoundJobFailedEvent(m->job, m->compute_service, m->cause)); + new CompoundJobFailedEvent(jmcjfm->job, jmcjfm->compute_service, jmcjfm->cause)); - } else if (auto m = std::dynamic_pointer_cast(message)) { + } else if (auto jmsjcm = std::dynamic_pointer_cast(message)) { std::set> failure_count_increments; - m->job->applyTaskUpdates(m->necessary_state_changes, failure_count_increments); + jmsjcm->job->applyTaskUpdates(jmsjcm->necessary_state_changes, failure_count_increments); return std::shared_ptr( - new StandardJobCompletedEvent(m->job, m->compute_service)); + new StandardJobCompletedEvent(jmsjcm->job, jmsjcm->compute_service)); - } else if (auto m = std::dynamic_pointer_cast(message)) { - m->job->applyTaskUpdates(m->necessary_state_changes, m->necessary_failure_count_increments); + } else if (auto jmsjfm = std::dynamic_pointer_cast(message)) { + jmsjfm->job->applyTaskUpdates(jmsjfm->necessary_state_changes, jmsjfm->necessary_failure_count_increments); return std::shared_ptr( - new StandardJobFailedEvent(m->job, m->compute_service, m->cause)); + new StandardJobFailedEvent(jmsjfm->job, jmsjfm->compute_service, jmsjfm->cause)); - } else if (auto m = std::dynamic_pointer_cast(message)) { - return std::shared_ptr(new PilotJobStartedEvent(m->job, m->compute_service)); + } else if (auto cspjsm = std::dynamic_pointer_cast(message)) { + return std::shared_ptr(new PilotJobStartedEvent(cspjsm->job, cspjsm->compute_service)); - } else if (auto m = std::dynamic_pointer_cast(message)) { - return std::shared_ptr(new PilotJobExpiredEvent(m->job, m->compute_service)); + } else if (auto cspjem = std::dynamic_pointer_cast(message)) { + return std::shared_ptr(new PilotJobExpiredEvent(cspjem->job, cspjem->compute_service)); - } else if (auto m = std::dynamic_pointer_cast(message)) { - if (m->success) { + } else if (auto dmfcam = std::dynamic_pointer_cast(message)) { + if (dmfcam->success) { return std::shared_ptr(new FileCopyCompletedEvent( - m->src_location, m->dst_location)); + dmfcam->src_location, dmfcam->dst_location)); } else { return std::shared_ptr( - new FileCopyFailedEvent(m->src_location, m->dst_location, m->failure_cause)); + new FileCopyFailedEvent(dmfcam->src_location, dmfcam->dst_location, dmfcam->failure_cause)); } - } else if (auto m = std::dynamic_pointer_cast(message)) { - if (m->success) { + } else if (auto dmfram = std::dynamic_pointer_cast(message)) { + if (dmfram->success) { return std::shared_ptr(new FileReadCompletedEvent( - m->location, m->num_bytes)); + dmfram->location, dmfram->num_bytes)); } else { return std::shared_ptr( - new FileReadFailedEvent(m->location, m->num_bytes, m->failure_cause)); + new FileReadFailedEvent(dmfram->location, dmfram->num_bytes, dmfram->failure_cause)); } - } else if (auto m = std::dynamic_pointer_cast(message)) { - if (m->success) { + } else if (auto dmfwam = std::dynamic_pointer_cast(message)) { + if (dmfwam->success) { return std::shared_ptr(new FileWriteCompletedEvent( - m->location)); + dmfwam->location)); } else { return std::shared_ptr( - new FileWriteFailedEvent(m->location, m->failure_cause)); + new FileWriteFailedEvent(dmfwam->location, dmfwam->failure_cause)); } - } else if (auto m = std::dynamic_pointer_cast(message)) { - return std::shared_ptr(new TimerEvent(m->message)); + } else if (auto ecatm = std::dynamic_pointer_cast(message)) { + return std::shared_ptr(new TimerEvent(ecatm->message)); } else { throw std::runtime_error( "ExecutionEvent::waitForNextExecutionEvent(): Non-handled message type when generating execution event (" + diff --git a/src/wrench/services/Service.cpp b/src/wrench/services/Service.cpp index 1c8c4c11d8..db22628f07 100644 --- a/src/wrench/services/Service.cpp +++ b/src/wrench/services/Service.cpp @@ -151,7 +151,7 @@ namespace wrench { std::string string_value; string_value = this->getPropertyValueAsString(property); if (string_value == "infinity") { - return LONG_LONG_MAX; + return LLONG_MAX; } if (string_value == "zero") { return 0; diff --git a/src/wrench/services/compute/batch/BatchComputeService.cpp b/src/wrench/services/compute/batch/BatchComputeService.cpp index dc508ddb8d..52a4809f66 100644 --- a/src/wrench/services/compute/batch/BatchComputeService.cpp +++ b/src/wrench/services/compute/batch/BatchComputeService.cpp @@ -27,6 +27,7 @@ #include "wrench/services/compute/batch/batch_schedulers/homegrown/fcfs/FCFSBatchScheduler.h" #include "wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/ConservativeBackfillingBatchScheduler.h" #include "wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf_core_level/ConservativeBackfillingBatchSchedulerCoreLevel.h" +#include "wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.h" #include "wrench/services/compute/batch/batch_schedulers/batsched/BatschedBatchScheduler.h" #include #include @@ -201,6 +202,10 @@ namespace wrench { this->scheduler = std::unique_ptr(new FCFSBatchScheduler(this)); } else if (batch_scheduling_alg == "conservative_bf") { this->scheduler = std::unique_ptr(new ConservativeBackfillingBatchScheduler(this)); + } else if (batch_scheduling_alg == "easy_bf_depth0") { + this->scheduler = std::unique_ptr(new EasyBackfillingBatchScheduler(this, 0)); + } else if (batch_scheduling_alg == "easy_bf_depth1") { + this->scheduler = std::unique_ptr(new EasyBackfillingBatchScheduler(this, 1)); } else if (batch_scheduling_alg == "conservative_bf_core_level") { this->scheduler = std::unique_ptr(new ConservativeBackfillingBatchSchedulerCoreLevel(this)); } diff --git a/src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/ConservativeBackfillingBatchScheduler.cpp b/src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/ConservativeBackfillingBatchScheduler.cpp index 712fe8c1dc..d3839a4275 100644 --- a/src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/ConservativeBackfillingBatchScheduler.cpp +++ b/src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/ConservativeBackfillingBatchScheduler.cpp @@ -40,7 +40,7 @@ namespace wrench { this->schedule->setTimeOrigin((u_int32_t) Simulation::getCurrentSimulatedDate()); // Find its earliest possible start time - auto est = this->schedule->findEarliestStartTime(batch_job->getRequestedTime(), batch_job->getRequestedNumNodes()); + auto est = this->schedule->findEarliestStartTime(batch_job->getRequestedTime(), batch_job->getRequestedNumNodes(), nullptr); // WRENCH_INFO("The Earliest start time is: %u", est); // Insert it in the schedule @@ -67,7 +67,6 @@ namespace wrench { this->schedule->setTimeOrigin((u_int32_t) Simulation::getCurrentSimulatedDate()); // Start all non-started the jobs in the next slot! - std::set> next_jobs = this->schedule->getJobsInFirstSlot(); if (next_jobs.empty()) { this->compactSchedule(); @@ -139,7 +138,7 @@ namespace wrench { // Find the earliest start time // WRENCH_INFO("FINDING THE EARLIEST START TIME"); - auto est = this->schedule->findEarliestStartTime(batch_job->getRequestedTime(), batch_job->getRequestedNumNodes()); + auto est = this->schedule->findEarliestStartTime(batch_job->getRequestedTime(), batch_job->getRequestedNumNodes(), nullptr); // WRENCH_INFO("EARLIEST START TIME FOR IT: %u", est); // Insert it in the schedule this->schedule->add(est, est + batch_job->getRequestedTime(), batch_job); @@ -273,7 +272,7 @@ namespace wrench { } auto duration = (u_int32_t) (std::get<3>(j)); - auto est = this->schedule->findEarliestStartTime(duration, num_nodes); + auto est = this->schedule->findEarliestStartTime(duration, num_nodes, nullptr); if (est < UINT32_MAX) { to_return[id] = (double) est; } else { diff --git a/src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.cpp b/src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.cpp index 1ccf568fbf..4fc1ccff99 100755 --- a/src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.cpp +++ b/src/wrench/services/compute/batch/batch_schedulers/homegrown/conservative_bf/NodeAvailabilityTimeLine.cpp @@ -70,6 +70,14 @@ namespace wrench { } } + /** + * @brief Method to get the node availability timeline's time origin + * @return a date + */ + u_int32_t NodeAvailabilityTimeLine::getTimeOrigin() { + return this->availability_timeslots.begin()->first.lower(); + } + /** * @brief Method to print the node availability timeline */ @@ -78,7 +86,7 @@ namespace wrench { for (auto &availability_timeslot: this->availability_timeslots) { std::cerr << availability_timeslot.first << "(" << availability_timeslot.second.num_nodes_utilized << ") | "; for (auto const &j: availability_timeslot.second.jobs) { - std::cerr << j->getJobID() << "(" << j->getRequestedNumNodes() << ") "; + std::cerr << j->getCompoundJob()->getName() << "(" << j->getRequestedNumNodes() << ") "; } std::cerr << "\n"; } @@ -90,7 +98,7 @@ namespace wrench { * @param add: true if we're adding, false otherwise * @param start: the start date * @param end: the end date - * @param job: the BatchComputeService job + * @param job: the batch job */ void NodeAvailabilityTimeLine::update(bool add, u_int32_t start, u_int32_t end, std::shared_ptr job) { auto job_set = new BatchJobSet(); @@ -109,9 +117,11 @@ namespace wrench { * @brief Method to find the earliest start time for a job spec * @param duration: the job's duration * @param num_nodes: the job's number of nodes + * @param num_available_nodes_at_that_time: the number of nodes available at that time (nullptr if you don't case) * @return a date */ - u_int32_t NodeAvailabilityTimeLine::findEarliestStartTime(uint32_t duration, unsigned long num_nodes) { + u_int32_t NodeAvailabilityTimeLine::findEarliestStartTime(uint32_t duration, unsigned long num_nodes, + unsigned long *num_available_nodes_at_that_time) { uint32_t start_time = UINT32_MAX; uint32_t remaining_duration = duration; @@ -139,12 +149,23 @@ namespace wrench { start_time = availability_timeslot.first.lower(); } } + + // Set the num of available nodes at that time if need be (weirdly annoying to do it in the loop above) + if (num_available_nodes_at_that_time) { + for (auto &availability_timeslot: this->availability_timeslots) { + if (availability_timeslot.first.lower() >= start_time) { + *num_available_nodes_at_that_time = this->max_num_nodes - availability_timeslot.second.num_nodes_utilized; + break; + } + } + } + return start_time; } /** - * @brief Get the BatchComputeService jobs in the first slot in the node availability timeline - * @return a set of BatchComputeService jobs + * @brief Get the batch jobs in the first slot in the node availability timeline + * @return a set of batch jobs */ std::set> NodeAvailabilityTimeLine::getJobsInFirstSlot() { std::set> to_return; @@ -154,4 +175,13 @@ namespace wrench { return to_return; } + /** + * @brief Return the number of nodes available in first slot + * @return + */ + unsigned long NodeAvailabilityTimeLine::getNumAvailableNodesInFirstSlot() { + return this->max_num_nodes - (*this->availability_timeslots.begin()).second.num_nodes_utilized; + } + + }// namespace wrench diff --git a/src/wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.cpp b/src/wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.cpp new file mode 100644 index 0000000000..a56485f682 --- /dev/null +++ b/src/wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.cpp @@ -0,0 +1,266 @@ +/** + * Copyright (c) 2017-2021. The WRENCH Team. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + */ + +#include +#include + +#include + +#include "wrench/services/compute/batch/batch_schedulers/homegrown/easy_bf/EasyBackfillingBatchScheduler.h" + +//#define PRINT_SCHEDULE 1 + +WRENCH_LOG_CATEGORY(wrench_core_easy_bf_batch_scheduler, "Log category for EasyBackfillingBatchScheduler"); + +namespace wrench { + + /** + * @brief Constructor + * @param cs: The BatchComputeService for which this scheduler is working + */ + EasyBackfillingBatchScheduler::EasyBackfillingBatchScheduler(BatchComputeService *cs, int depth) : HomegrownBatchScheduler(cs) { + this->schedule = std::make_unique(cs->total_num_of_nodes); + this->_depth = depth; + } + + /** + * @brief Method to process a job submission + * @param batch_job: the newly submitted BatchComputeService job + */ + void EasyBackfillingBatchScheduler::processJobSubmission(std::shared_ptr batch_job) { + WRENCH_INFO("Arrival of a new BatchComputeService job, %lu, that needs %lu nodes", + batch_job->getJobID(), batch_job->getRequestedNumNodes()); + } + + /** + * @brief Method to schedule (possibly) the next jobs to be scheduled + */ + void EasyBackfillingBatchScheduler::processQueuedJobs() { + if (this->cs->batch_queue.empty()) { + return; + } + + // Update the time origin + double now = Simulation::getCurrentSimulatedDate(); +// std::cerr << "** [" << now << "] IN PROCESSING QUEUE JOB (" << this->cs->batch_queue.size() << " JOBS IN THE QUEUE)" << std::endl; + this->schedule->setTimeOrigin((u_int32_t) now); + + // While the first job can be scheduled now, schedule it + unsigned int i; + for (i = 0; i < this->cs->batch_queue.size(); i++) { + auto first_job = this->cs->batch_queue.at(i); +// std::cerr << "SEEING IF JOB " << first_job->getCompoundJob()->getName() << " CAN BE STARTED\n"; + + // If the job has already been allocated resources, nevermind + if (not first_job->resources_allocated.empty()) { + continue; + } + + // If the job is already in the schedule, nevermind + auto jobs_in_first_slot = this->schedule->getJobsInFirstSlot(); + if (jobs_in_first_slot.find(first_job) != jobs_in_first_slot.end()) { + continue; + } + + // If the job cannot start now, that's it + if (this->schedule->getNumAvailableNodesInFirstSlot() < first_job->getRequestedNumNodes()) { +// std::cerr << "CANNOT BE STARTED: " << this->schedule->getNumAvailableNodesInFirstSlot() << " " << first_job->getRequestedNumNodes() << " \n"; + break; + } + + // SCHEDULED IT! +// std::cerr << "SCHEDULING IT!!\n"; + this->schedule->add(this->schedule->getTimeOrigin(), this->schedule->getTimeOrigin() + first_job->getRequestedTime(), + first_job); + first_job->easy_bf_start_date = this->schedule->getTimeOrigin(); + first_job->easy_bf_expected_end_date = this->schedule->getTimeOrigin() + first_job->getRequestedTime(); + + WRENCH_INFO("Scheduled BatchComputeService job %lu on %lu nodes from time %u to %u", + first_job->getJobID(), first_job->getRequestedNumNodes(), + first_job->easy_bf_start_date, first_job->easy_bf_expected_end_date); + } + + unsigned int first_job_not_started = i; + + if (first_job_not_started < this->cs->batch_queue.size()) { + +// std::cerr << "DOING BACKFILLING\n"; + // At this point, the first job in the queue cannot start now, so determine when it could start + unsigned long num_nodes_available_at_shadow_time; + auto shadow_time = this->schedule->findEarliestStartTime( + this->cs->batch_queue.at(first_job_not_started)->requested_time, + this->cs->batch_queue.at(first_job_not_started)->getRequestedNumNodes(), + &num_nodes_available_at_shadow_time); + + num_nodes_available_at_shadow_time -= this->cs->batch_queue.at(first_job_not_started)->getRequestedNumNodes(); + +// std::cerr << "THE FIRST JOB'S (" << this->cs->batch_queue.at(first_job_not_started)->getCompoundJob()->getName() << ") GUARANTEED START TIME IS: " << shadow_time << "\n"; +// std::cerr << "AND AT THAT TIME THE NUMBER OF AVAILABLE NODES (COUNTING THE JOB) IS: " << num_nodes_available_at_shadow_time << "\n"; + + // BACKFILLING: Go through all the other jobs, and start each one that can start right now + // (without hurting the first job in the queue if the depth is 1) + unsigned long num_nodes_available_now = this->schedule->getNumAvailableNodesInFirstSlot(); + for (unsigned int i = first_job_not_started + 1; i < this->cs->batch_queue.size(); i++) { + auto candidate_job = this->cs->batch_queue.at(i); + + // If the job's already started, forget it + if (not candidate_job->resources_allocated.empty()) { + continue; + } + + // If the job couldn't start now anyway, forget it +// std::cerr << " LOOKING AT JOB " << candidate_job->getCompoundJob()->getName() << "\n"; + if (candidate_job->getRequestedNumNodes() > num_nodes_available_now) { +// std::cerr << "NOT ENOUGH NODES NOW\n"; + continue; + } + + if (this->_depth == 1) { + // If the job would push back the shadow job, forget it + if ((this->schedule->getTimeOrigin() + candidate_job->getRequestedTime() > shadow_time) and + (candidate_job->getRequestedNumNodes() > num_nodes_available_at_shadow_time)) { +// std::cerr << "WOULD PUSH BACK SHADOW TIME\n"; + continue; + } + } + + // Schedule the job +// std::cerr << "BACKFILLING IT!\n"; + this->schedule->add(this->schedule->getTimeOrigin(), this->schedule->getTimeOrigin() + candidate_job->getRequestedTime(), candidate_job); + + num_nodes_available_now -= candidate_job->getRequestedNumNodes(); + if (this->schedule->getTimeOrigin() + candidate_job->getRequestedTime() > shadow_time) { + num_nodes_available_at_shadow_time -= candidate_job->getRequestedNumNodes(); + } + } + } + +// this->schedule->print(); +// std::cerr << "STARTING ALL THE JOBS THAT WERE SCHEDULED, GIVEN THE ABOVE SCHEDULE\n"; + + // Start all non-started the jobs in the next slot! + std::set> next_jobs = this->schedule->getJobsInFirstSlot(); + + for (auto const &batch_job: next_jobs) { + // If the job has already been allocated resources, it's already running anyway + if (not batch_job->resources_allocated.empty()) { + continue; + } + + // Get the workflow job associated to the picked BatchComputeService job + std::shared_ptr compound_job = batch_job->getCompoundJob(); + + // Find on which resources to actually run the job + unsigned long cores_per_node_asked_for = batch_job->getRequestedCoresPerNode(); + unsigned long num_nodes_asked_for = batch_job->getRequestedNumNodes(); + unsigned long requested_time = batch_job->getRequestedTime(); + + auto resources = this->scheduleOnHosts(num_nodes_asked_for, cores_per_node_asked_for, ComputeService::ALL_RAM); + if (resources.empty()) { + // Hmmm... we don't have the resources right now... we should get an update soon.... + return; + } + + WRENCH_INFO("Starting BatchComputeService job %lu ", batch_job->getJobID()); +// std::cerr << "STARTING JOB " << batch_job->getCompoundJob()->getName() << "\n"; + + // Remove the job from the BatchComputeService queue + this->cs->removeJobFromBatchQueue(batch_job); + + // Add it to the running list + this->cs->running_jobs[batch_job->getCompoundJob()] = batch_job; + + // Start it! + this->cs->startJob(resources, compound_job, batch_job, num_nodes_asked_for, requested_time, + cores_per_node_asked_for); + } + } + + /** + * @brief Method to process a job completion + * @param batch_job: the job that completed + */ + void EasyBackfillingBatchScheduler::processJobCompletion(std::shared_ptr batch_job) { + WRENCH_INFO("Notified of completion of BatchComputeService job, %lu", batch_job->getJobID()); + + auto now = (u_int32_t) Simulation::getCurrentSimulatedDate(); + this->schedule->setTimeOrigin(now); + this->schedule->remove(now, batch_job->easy_bf_expected_end_date + 100, batch_job); + +#ifdef PRINT_SCHEDULE + this->schedule->print(); +#endif + } + + /** + * @brief Method to process a job termination + * @param batch_job: the job that was terminated + */ + void EasyBackfillingBatchScheduler::processJobTermination(std::shared_ptr batch_job) { + // Just like a job Completion to me! + this->processJobCompletion(batch_job); + } + + /** + * @brief Method to process a job failure + * @param batch_job: the job that failed + */ + void EasyBackfillingBatchScheduler::processJobFailure(std::shared_ptr batch_job) { + // Just like a job Completion to me! + this->processJobCompletion(batch_job); + } + + /** + * @brief Method to figure out on which actual resources a job could be scheduled right now + * @param num_nodes: number of nodes + * @param cores_per_node: number of cores per node + * @param ram_per_node: amount of RAM + * @return a host: map + * + */ + std::map> + EasyBackfillingBatchScheduler::scheduleOnHosts(unsigned long num_nodes, unsigned long cores_per_node, sg_size_t ram_per_node) { + if (ram_per_node == ComputeService::ALL_RAM) { + ram_per_node = S4U_Simulation::getHostMemoryCapacity(cs->available_nodes_to_cores.begin()->first); + } + if (cores_per_node == ComputeService::ALL_CORES) { + cores_per_node = cs->available_nodes_to_cores.begin()->first->get_core_count(); + } + + if (ram_per_node > S4U_Simulation::getHostMemoryCapacity(cs->available_nodes_to_cores.begin()->first)) { + throw std::runtime_error("CONSERVATIVE_BFBatchScheduler::scheduleOnHosts(): Asking for too much RAM per host"); + } + if (num_nodes > cs->available_nodes_to_cores.size()) { + throw std::runtime_error("CONSERVATIVE_BFBatchScheduler::scheduleOnHosts(): Asking for too many hosts"); + } + if (cores_per_node > (unsigned long) cs->available_nodes_to_cores.begin()->first->get_core_count()) { + throw std::runtime_error("CONSERVATIVE_BFBatchScheduler::scheduleOnHosts(): Asking for too many cores per host (asking for " + + std::to_string(cores_per_node) + " but hosts have " + + std::to_string(cs->available_nodes_to_cores.begin()->first->get_core_count()) + "cores)"); + } + + // IMPORTANT: We always give all cores to a job on a node! + cores_per_node = cs->available_nodes_to_cores.begin()->first->get_core_count(); + + return HomegrownBatchScheduler::selectHostsFirstFit(cs, num_nodes, cores_per_node, ram_per_node); + } + + /** + * @brief Method to obtain start time estimates + * @param set_of_jobs: a set of job specs + * @return map of estimates + */ + std::map EasyBackfillingBatchScheduler::getStartTimeEstimates( + std::set> set_of_jobs) { + std::map to_return; + + throw std::runtime_error("EasyBackfillingBatchScheduler::getStartTimeEstimates(): Method not implemented (ever?) for EASY backfilling"); + } + +}// namespace wrench diff --git a/src/wrench/services/storage/StorageService.cpp b/src/wrench/services/storage/StorageService.cpp index 4af9bb21b3..50f5bef113 100755 --- a/src/wrench/services/storage/StorageService.cpp +++ b/src/wrench/services/storage/StorageService.cpp @@ -110,6 +110,10 @@ namespace wrench { throw std::invalid_argument("StorageService::writeFile(): Invalid arguments"); } + if (location->getStorageService() != this->getSharedPtr()) { + throw std::invalid_argument("StorageService::writeFile(): Can only read from a location at that same storage service"); + } + this->assertServiceIsUp(); this->commport->putMessage( @@ -249,6 +253,10 @@ namespace wrench { throw std::invalid_argument("StorageService::readFile(): Invalid nullptr/0 arguments"); } + if (location->getStorageService() != this->getSharedPtr()) { + throw std::invalid_argument("StorageService::readFile(): Can only read from a location at that same storage service"); + } + assertServiceIsUp(this->getSharedPtr()); this->commport->putMessage( diff --git a/src/wrench/services/storage/compound/CompoundStorageService.cpp b/src/wrench/services/storage/compound/CompoundStorageService.cpp index ce61c169db..5206700a23 100644 --- a/src/wrench/services/storage/compound/CompoundStorageService.cpp +++ b/src/wrench/services/storage/compound/CompoundStorageService.cpp @@ -60,7 +60,6 @@ namespace wrench { StorageSelectionStrategyCallback &allocate, WRENCH_PROPERTY_COLLECTION_TYPE property_list, const WRENCH_MESSAGE_PAYLOAD_COLLECTION_TYPE& messagepayload_list) : StorageService(hostname, "compound_storage_" + std::to_string(getNewUniqueNumber())), allocate(allocate) { - this->setProperties(this->default_property_values, std::move(property_list)); this->setMessagePayloads(this->default_messagepayload_values, std::move(messagepayload_list)); @@ -168,12 +167,10 @@ namespace wrench { } bool CompoundStorageService::processStorageSelectionMessage(const CompoundStorageAllocationRequestMessage *msg) { - auto file = msg->file; WRENCH_INFO("CSS::processStorageSelectionMessage(): For file %s", file->getID().c_str()); if (this->file_location_mapping.find(file) != this->file_location_mapping.end()) { - WRENCH_INFO("CSS::processStorageSelectionMessage: File %s already known by CSS", file->getID().c_str()); msg->answer_commport->dputMessage( @@ -252,7 +249,6 @@ namespace wrench { } bool CompoundStorageService::processStorageLookupMessage(const CompoundStorageLookupRequestMessage *msg) { - auto file = msg->file; WRENCH_INFO("CSS::processStorageLookupMessage(): For file %s", file->getID().c_str()); @@ -362,7 +358,6 @@ namespace wrench { * or nullptr if it's not. */ std::vector> CompoundStorageService::lookupOrDesignateStorageService(const std::shared_ptr& location) { - auto temp_commport = S4U_CommPort::getTemporaryCommPort(); auto locations = this->lookupOrDesignateStorageService(location->getFile(), 0, temp_commport); @@ -384,7 +379,6 @@ namespace wrench { * or nullptr if it's not. */ std::vector> CompoundStorageService::lookupOrDesignateStorageService(const std::shared_ptr& location, unsigned int stripe_count) { - auto temp_commport = S4U_CommPort::getTemporaryCommPort(); auto locations = this->lookupOrDesignateStorageService(location->getFile(), stripe_count, temp_commport); @@ -422,7 +416,6 @@ namespace wrench { // Send a message to the storage service's daemon for (const auto &loc: designated_locations) { - WRENCH_DEBUG("CSS:deleteFile Issuing delete message to SSS %s", loc->getStorageService()->getName().c_str()); // assertServiceIsUp(loc->getStorageService()); @@ -433,7 +426,6 @@ namespace wrench { this->getMessagePayloadValue(StorageServiceMessagePayload::FILE_DELETE_REQUEST_MESSAGE_PAYLOAD))); if (wait_for_answer) { - std::unique_ptr message = nullptr; auto msg = answer_commport->getMessage(this->network_timeout, "StorageService::deleteFile():"); @@ -487,7 +479,6 @@ namespace wrench { // Send a message to the storage service's daemon for (const auto &loc: file_parts) { - assertServiceIsUp(loc->getStorageService()); loc->getStorageService()->commport->putMessage(new StorageServiceFileLookupRequestMessage( @@ -818,7 +809,6 @@ namespace wrench { const std::shared_ptr &location, sg_size_t num_bytes_to_write, bool wait_for_answer) { - WRENCH_INFO("CSS::writeFile(): Writing %llu to file %s - starting at %f", num_bytes_to_write, location->getFile()->getID().c_str(), S4U_Simulation::getClock()); if (location == nullptr) { @@ -924,7 +914,6 @@ namespace wrench { auto buffer_size = msg->buffer_size; if (buffer_size >= 1) { - auto file = location->getFile(); for (auto const &dwmb: msg->data_write_commport_and_bytes) { // Bufferized @@ -1058,7 +1047,6 @@ namespace wrench { WRENCH_DEBUG("CSS::readFile(): %u FileReadRequests sent and validated", request_count); for (const auto &msg: messages) { - if (msg->buffer_size < 1) { // Non-Bufferized ; just wait for an ack for this message (note this may not be THE ack to this precise message, but it doesn't matter) recv_commport->getMessage("CSS::readFile(): "); @@ -1256,7 +1244,6 @@ namespace wrench { trace.parts_count = size(locations); if (locations.empty()) { - trace.file_name = "nofile"; for (const auto &storage: this->storage_services) { @@ -1270,13 +1257,11 @@ namespace wrench { } } else { - trace.file_name = locations.begin()->get()->getFile()->getID(); std::set known_services; for (const auto &location: locations) { - auto storage_service = location->getStorageService(); if (known_services.find(storage_service->getName()) == known_services.end()) { DiskUsage disk_usage; diff --git a/src/wrench/simgrid_S4U_util/S4U_Simulation.cpp b/src/wrench/simgrid_S4U_util/S4U_Simulation.cpp index 6208dada14..8a1ee38849 100755 --- a/src/wrench/simgrid_S4U_util/S4U_Simulation.cpp +++ b/src/wrench/simgrid_S4U_util/S4U_Simulation.cpp @@ -1111,7 +1111,7 @@ namespace wrench { " at host " + hostname + " has invalid size"); } } else { - capacity = LONG_LONG_MAX;// Default size if no size property specified + capacity = LLONG_MAX;// Default size if no size property specified } return capacity; diff --git a/src/wrench/util/UnitParser.cpp b/src/wrench/util/UnitParser.cpp index 6a3beb2486..b998e9352b 100755 --- a/src/wrench/util/UnitParser.cpp +++ b/src/wrench/util/UnitParser.cpp @@ -112,7 +112,7 @@ namespace wrench { try { double value = parseValueWithUnit(string, units, "B"); // default: bytes if (value == DBL_MAX) { - return LONG_LONG_MAX; + return LLONG_MAX; } else { return (sg_size_t) value; } diff --git a/test/energy_consumption/EnergyConsumptionTest.cpp b/test/energy_consumption/EnergyConsumptionTest.cpp index 2de779004d..fc2aa5fa5c 100644 --- a/test/energy_consumption/EnergyConsumptionTest.cpp +++ b/test/energy_consumption/EnergyConsumptionTest.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -780,7 +779,7 @@ class EnergyConsumptionPStateChangeTestWMS : public wrench::ExecutionController double after_current_energy_consumed_by_host1 = this->getSimulation()->getEnergyConsumed(simulation_hosts[1]); double energy_consumed_while_running_with_higher_speed = after_current_energy_consumed_by_host1 - before_current_energy_consumed_by_host1; - double higher_speed_compuation_time = wrench::S4U_Simulation::getClock(); + double higher_speed_computation_time = wrench::S4U_Simulation::getClock(); if (energy_consumed_while_running_with_higher_speed <= 0) { @@ -804,7 +803,7 @@ class EnergyConsumptionPStateChangeTestWMS : public wrench::ExecutionController double after_current_energy_consumed_by_host2 = this->getSimulation()->getEnergyConsumed(simulation_hosts[1]); double energy_consumed_while_running_with_lower_speed = after_current_energy_consumed_by_host2 - before_current_energy_consumed_by_host2; - double lower_speed_computation_time = wrench::S4U_Simulation::getClock() - higher_speed_compuation_time; + double lower_speed_computation_time = wrench::S4U_Simulation::getClock() - higher_speed_computation_time; if (energy_consumed_while_running_with_lower_speed <= 0) { throw std::runtime_error("Unexpectedly the energy consumed is less than 0 for a lower speed ??"); @@ -820,7 +819,7 @@ class EnergyConsumptionPStateChangeTestWMS : public wrench::ExecutionController double exact_max_wattage_power_1 = wrench::Simulation::getMaxPowerConsumption(simulation_hosts[1]); double exact_max_wattage_power_2 = wrench::Simulation::getMaxPowerConsumption(simulation_hosts[1]); double EPSILON = 1.0; - double computed_wattage_power_1 = energy_consumed_while_running_with_higher_speed / higher_speed_compuation_time; + double computed_wattage_power_1 = energy_consumed_while_running_with_higher_speed / higher_speed_computation_time; double computed_wattage_power_2 = energy_consumed_while_running_with_lower_speed / lower_speed_computation_time; if (std::abs(exact_max_wattage_power_1 - computed_wattage_power_1) > EPSILON && std::abs(exact_max_wattage_power_2 - computed_wattage_power_2) > EPSILON) { diff --git a/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceOneTaskTest.cpp b/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceOneTaskTest.cpp index 745676b754..7f772181e9 100644 --- a/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceOneTaskTest.cpp +++ b/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceOneTaskTest.cpp @@ -7,7 +7,6 @@ * (at your option) any later version. */ -#include #include #include @@ -62,7 +61,7 @@ class BareMetalComputeServiceOneTaskTest : public ::testing::Test { protected: - ~BareMetalComputeServiceOneTaskTest() { + ~BareMetalComputeServiceOneTaskTest() override { workflow->clear(); wrench::Simulation::removeAllFiles(); } @@ -654,14 +653,14 @@ void BareMetalComputeServiceOneTaskTest::do_StandardJobConstructor_test() { class HostMemoryTestWMS : public wrench::ExecutionController { public: - HostMemoryTestWMS(BareMetalComputeServiceOneTaskTest *test, std::string hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + HostMemoryTestWMS(BareMetalComputeServiceOneTaskTest *test, const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { } private: BareMetalComputeServiceOneTaskTest *test; int main() override { - double ram_capacity; + sg_size_t ram_capacity; ram_capacity = wrench::Simulation::getHostMemoryCapacity("TwoCoreHost"); if (ram_capacity != wrench::S4U_Simulation::DEFAULT_RAM) { @@ -677,7 +676,7 @@ class HostMemoryTestWMS : public wrench::ExecutionController { if (ram_capacity == wrench::S4U_Simulation::DEFAULT_RAM) { throw std::runtime_error("RAM Capacity of RAMHost should not be the default"); } - if (std::abs(ram_capacity - 1024) > 0.01) { + if (ram_capacity != 1024) { throw std::runtime_error("RAM Capacity of RAMHost should be 1024"); } diff --git a/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceResourceInformationTest.cpp b/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceResourceInformationTest.cpp index 1b6157fdef..8aea78be5f 100755 --- a/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceResourceInformationTest.cpp +++ b/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceResourceInformationTest.cpp @@ -7,8 +7,6 @@ * (at your option) any later version. */ -#include - #include #include @@ -32,7 +30,7 @@ class BareMetalComputeServiceTestResourceInformation : public ::testing::Test { void do_ResourceInformation_test(); protected: - ~BareMetalComputeServiceTestResourceInformation() { + ~BareMetalComputeServiceTestResourceInformation() override { workflow->clear(); wrench::Simulation::removeAllFiles(); } @@ -81,7 +79,7 @@ class ResourceInformationTestWMS : public wrench::ExecutionController { public: ResourceInformationTestWMS(BareMetalComputeServiceTestResourceInformation *test, - std::string hostname) : wrench::ExecutionController(hostname, "test") { + const std::string& hostname) : wrench::ExecutionController(hostname, "test") { this->test = test; } diff --git a/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceTestPilotJobs.cpp b/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceTestPilotJobs.cpp index 25e19900f8..4ac31b74f7 100755 --- a/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceTestPilotJobs.cpp +++ b/test/services/compute_services/bare_metal_standard_jobs/BareMetalComputeServiceTestPilotJobs.cpp @@ -7,8 +7,6 @@ * (at your option) any later version. */ -#include - #include #include @@ -36,7 +34,7 @@ class BareMetalComputeServiceTestPilotJobs : public ::testing::Test { void do_UnsupportedPilotJobs_test(); protected: - ~BareMetalComputeServiceTestPilotJobs() { + ~BareMetalComputeServiceTestPilotJobs() override { workflow->clear(); wrench::Simulation::removeAllFiles(); } @@ -90,7 +88,7 @@ class BareMetalComputeServiceUnsupportedPilotJobsTestWMS : public wrench::Execut public: BareMetalComputeServiceUnsupportedPilotJobsTestWMS(BareMetalComputeServiceTestPilotJobs *test, - std::string hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { } private: diff --git a/test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceEASYBFTest.cpp b/test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceEASYBFTest.cpp new file mode 100644 index 0000000000..328bde047d --- /dev/null +++ b/test/services/compute_services/batch_standard_and_pilot_jobs/BatchServiceEASYBFTest.cpp @@ -0,0 +1,297 @@ +/** + * Copyright (c) 2017. The WRENCH Team. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include "../../../include/TestWithFork.h" +#include "../../../include/UniqueTmpPathPrefix.h" + +#define EPSILON 0.05 + +WRENCH_LOG_CATEGORY(batch_service_easy_bf_test, "Log category for BatchServiceEASYBFTest"); + +class BatchServiceEASY_BFTest : public ::testing::Test { + +public: + std::shared_ptr workflow; + + std::shared_ptr compute_service = nullptr; + + void do_EASY_BF_test(int num_compute_nodes, + std::vector> spec, + bool print_completion_times); + +protected: + ~BatchServiceEASY_BFTest() override { + workflow->clear(); + wrench::Simulation::removeAllFiles(); + } + + BatchServiceEASY_BFTest() { + + // Create the simplest workflow + workflow = wrench::Workflow::createWorkflow(); + + // Create a four-host 10-core platform file + std::string xml = "" + "" + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + ""; + FILE *platform_file = fopen(platform_file_path.c_str(), "w"); + fprintf(platform_file, "%s", xml.c_str()); + fclose(platform_file); + } + + std::string platform_file_path = UNIQUE_TMP_PATH_PREFIX + "platform.xml"; +}; + + +/**********************************************************************/ +/** SIMPLE EASY_BF GENERIC (DEPTH=0) **/ +/**********************************************************************/ + +class EASY_BFTest_WMS : public wrench::ExecutionController { + +public: + EASY_BFTest_WMS(BatchServiceEASY_BFTest *test, + const std::string& hostname, + std::vector> &spec, + bool print_completion_times) : wrench::ExecutionController(hostname, "test") { + this->test = test; + this->spec = spec; + this->print_completion_times = print_completion_times; + } + +private: + BatchServiceEASY_BFTest *test; + std::vector> spec; + bool print_completion_times; + + int main() override { + // Create a job manager + auto job_manager = this->createJobManager(); + + std::vector> jobs; + std::map> completion_times; + std::map expected_completion_times; + + // Create and submit all jobs + for (auto const &job_spec : spec) { + std::string job_name =std::get<0>(job_spec); + auto num_nodes = std::get<1>(job_spec); + auto duration = std::get<2>(job_spec); + auto sleep_after = std::get<3>(job_spec); + expected_completion_times[job_name] = std::get<4>(job_spec); + + auto job = job_manager->createCompoundJob(job_name); + job->addSleepAction("sleep" + std::to_string(duration), duration); + std::map args = {{"-N", std::to_string(num_nodes)}, {"-t", std::to_string(duration)}, {"-c", "10"}}; + job_manager->submitJob(job, this->test->compute_service, args); + jobs.push_back(job); + wrench::Simulation::sleep(sleep_after); + } + + for (unsigned int i=0; i < jobs.size(); i++) { + auto event = this->waitForNextEvent(); + if (auto real_event = std::dynamic_pointer_cast(event)) { + auto job = real_event->job; + auto sleep_action = *(job->getActions().begin()); + completion_times[real_event->job->getName()] = + std::make_tuple(job->getSubmitDate(), + job->getServiceSpecificArguments()["-N"], + job->getServiceSpecificArguments()["-t"], + sleep_action->getStartDate(), + wrench::Simulation::getCurrentSimulatedDate()); + } else { + throw std::runtime_error("Unexpected workflow execution event: " + event->toString()); + } + } + + if (print_completion_times) { + for (auto const &item: completion_times) { + std::cerr << " " << item.first << + " (arr=" << std::get<0>(item.second) << + ", N=" << std::get<1>(item.second) << + ", t=" << std::get<2>(item.second) << "): " << + std::get<3>(item.second) << " -> " << + std::get<4>(item.second) << std::endl; + } + } + for (auto const &item: completion_times) { + if ((expected_completion_times[item.first] > 0) and (std::abs(std::get<4>(item.second) - expected_completion_times[item.first]) > 0.001)) { + throw std::runtime_error("Invalid job completion time for " + item.first + ": " + + std::to_string(std::get<3>(item.second)) + " (expected: " + + std::to_string(expected_completion_times[item.first]) + ")"); + } + } + + return 0; + } +}; + +void BatchServiceEASY_BFTest::do_EASY_BF_test(int num_compute_nodes, + std::vector> spec, + bool print_completion_times) { + + // Create and initialize a simulation + auto simulation = wrench::Simulation::createSimulation(); + int argc = 2; + auto argv = (char **) calloc(argc, sizeof(char *)); + argv[0] = strdup("unit_test"); + argv[1] = strdup("--wrench-commport-pool-size=50000"); +// argv[2] = strdup("--wrench-full-log"); + + ASSERT_NO_THROW(simulation->init(&argc, argv)); + + // Setting up the platform + ASSERT_NO_THROW(simulation->instantiatePlatform(platform_file_path)); + + // Get a hostname + std::string hostname = "Host1"; + +#ifdef ENABLE_BATSCHED + std::string scheduling_algorithm = "easy_bf"; +#else + std::string scheduling_algorithm = "easy_bf_depth1"; +#endif + + std::vector compute_hosts; + for (int i=1; i <= num_compute_nodes; i++) { + compute_hosts.push_back("Host" + std::to_string(i)); + } + + // Create a Batch Service with a fcfs scheduling algorithm + ASSERT_NO_THROW(compute_service = simulation->add( + new wrench::BatchComputeService(hostname, compute_hosts, "", + {{wrench::BatchComputeServiceProperty::BATCH_SCHEDULING_ALGORITHM, scheduling_algorithm}, + {wrench::BatchComputeServiceProperty::BATCH_RJMS_PADDING_DELAY, "0"}}))); + + simulation->add(new wrench::FileRegistryService(hostname)); + + // Create a WMS + ASSERT_NO_THROW(simulation->add(new EASY_BFTest_WMS(this, hostname, spec, print_completion_times))); + + ASSERT_NO_THROW(simulation->launch()); + + for (int i = 0; i < argc; i++) + free(argv[i]); + free(argv); +} + + + +#ifdef ENABLE_BATSCHED +TEST_F(BatchServiceEASY_BFTest, DISABLED_SimpleEASY_BFTest_1) +#else +TEST_F(BatchServiceEASY_BFTest, SimpleEASY_BFTest_1) +#endif +{ + // job_name, num_nodes, duration, sleep_after, expected CT + std::vector> spec = { + {"job1", 2, 60, 0, 60}, + {"job2", 4, 30, 0, 90}, + {"job3", 2, 30, 0, 30}, + {"job4", 2, 50, 0, 140} + }; + + DO_TEST_WITH_FORK_THREE_ARGS(do_EASY_BF_test, 4, spec, false); +} + + +#ifdef ENABLE_BATSCHED +TEST_F(BatchServiceEASY_BFTest, DISABLED_SimpleEASY_BFTest_2) +#else +TEST_F(BatchServiceEASY_BFTest, SimpleEASY_BFTest_2) +#endif +{ + // job_name, num_nodes, duration, sleep_after, expected CT + std::vector> spec = { + {"job1", 1, 6000, 0, 6000}, + {"job2", 2, 70, 0, 70}, + {"job3", 4, 20, 0, 90}, + {"job4", 5, 20, 0, 6020}, + {"job5", 1, 6000, 0, 6000}, + }; + + DO_TEST_WITH_FORK_THREE_ARGS(do_EASY_BF_test, 6, spec, false); +} + +#ifdef ENABLE_BATSCHED +TEST_F(BatchServiceEASY_BFTest, DISABLED_SimpleEASY_BFTest_3) +#else +TEST_F(BatchServiceEASY_BFTest, SimpleEASY_BFTest_3) +#endif +{ + // job_name, num_nodes, duration, sleep_after, expected CT + std::vector> spec = { + {"job1", 3, 660, 1, 660}, + {"job2", 1, 120, 1, 121}, + {"job3", 3, 1740, 1, 121 + 1740}, + {"job4", 1, 1080, 1, 660 + 1080}, + }; + + DO_TEST_WITH_FORK_THREE_ARGS(do_EASY_BF_test, 6, spec, false); +} + +#ifdef ENABLE_BATSCHED +TEST_F(BatchServiceEASY_BFTest, SimpleEASY_BFTest_RANDOM) +#else +TEST_F(BatchServiceEASY_BFTest, SimpleEASY_BFTest_RANDOM) +#endif +{ + int num_jobs = 1000; + for (int seed = 0; seed < 10; seed++) { + std::vector> spec; +// std::cerr << "SEED= " << seed << "\n"; + unsigned int random = seed; + for (int i = 1; i <= num_jobs; i++) { + std::string job_name = "job" + std::to_string(i); + random = random * 17 + 4123451; + unsigned int num_nodes = 1 + random % 4; + random = random * 17 + 4123451; + unsigned int duration = 60 + 60 * (random % 30); + int expected_ct = -1; + spec.emplace_back(job_name, num_nodes, duration, 0, expected_ct); + } + DO_TEST_WITH_FORK_THREE_ARGS(do_EASY_BF_test, 6, spec, false); + } +} + + diff --git a/test/services/storage_services/LogicalFileSystem/LogicalFileSystemTest.cpp b/test/services/storage_services/LogicalFileSystem/LogicalFileSystemTest.cpp deleted file mode 100755 index 80fe3b1a25..0000000000 --- a/test/services/storage_services/LogicalFileSystem/LogicalFileSystemTest.cpp +++ /dev/null @@ -1,276 +0,0 @@ -//#include -//#include -// -//#include "../../../include/TestWithFork.h" -//#include "../../../include/UniqueTmpPathPrefix.h" -// -//WRENCH_LOG_CATEGORY(logical_file_system_test, "Log category for LogicalFileSystemTest"); -// -// -//class LogicalFileSystemTest : public ::testing::Test { -// -//public: -// void do_BasicTests(); -// void do_DevNullTests(); -// void do_LRUTests(); -// -//protected: -// LogicalFileSystemTest() { -// -// // Create a 2-host platform file -// // [WMSHost]-----[StorageHost] -// std::string xml = "" -// "" -// " " -// " " -// " " -// " " -// " " -// " " -// " " -// " " -// " " -// " " -// " " -// " " -// " " -// ""; -// FILE *platform_file = fopen(platform_file_path.c_str(), "w"); -// fprintf(platform_file, "%s", xml.c_str()); -// fclose(platform_file); -// } -// -// std::string platform_file_path = UNIQUE_TMP_PATH_PREFIX + "platform.xml"; -//}; -// -// -//TEST_F(LogicalFileSystemTest, BasicTests) { -// DO_TEST_WITH_FORK(do_BasicTests); -//} -// -//void LogicalFileSystemTest::do_BasicTests() { -// // Create and initialize the simulation -// auto simulation = wrench::Simulation::createSimulation(); -// -// int argc = 1; -// char **argv = (char **) calloc(argc, sizeof(char *)); -// argv[0] = strdup("unit_test"); -// // argv[1] = strdup("--wrench-full-log"); -// -// ASSERT_NO_THROW(simulation->init(&argc, argv)); -// auto workflow = wrench::Workflow::createWorkflow(); -// -// // set up the platform -// ASSERT_NO_THROW(simulation->instantiatePlatform(platform_file_path)); -// -// // Create two Storage Services -// std::shared_ptr storage_service1, storage_service2; -// ASSERT_NO_THROW(storage_service1 = simulation->add( -// wrench::SimpleStorageService::createSimpleStorageService("Host", {"/"}))); -// -// ASSERT_THROW(wrench::LogicalFileSystem::createLogicalFileSystem("Host", nullptr, "/tmp", "NONE"), std::invalid_argument); -// ASSERT_THROW(wrench::LogicalFileSystem::createLogicalFileSystem("Host", storage_service1.get(), "/bogus"), std::invalid_argument); -// -// // Create a Logical File System -// try { -// wrench::LogicalFileSystem::createLogicalFileSystem("Host", storage_service1.get(), "/tmp", "BOGUS"); -// throw std::runtime_error("Should not be able to create a logical file system with a bogus caching policy"); -// } catch (std::invalid_argument &ignore) {} -// -// auto fs1 = wrench::LogicalFileSystem::createLogicalFileSystem("Host", storage_service1.get(), "/tmp", "NONE"); -// -// // Attempt to create a redundant Logical File System -// try { -// wrench::LogicalFileSystem::createLogicalFileSystem("Host", storage_service1.get(), "/tmp"); -// throw std::runtime_error("Initializing a redundant file system should have thrown"); -// } catch (std::invalid_argument &ignore) { -// } -// -// fs1->createDirectory(("/foo")); -// fs1->removeAllFilesInDirectory("/foo"); -// fs1->listFilesInDirectory("/foo"); -// fs1->removeEmptyDirectory("/foo"); -// -// ASSERT_DOUBLE_EQ(100, fs1->getTotalCapacity()); -// auto file_80 = wrench::Simulation::addFile("file_80", 80); -// ASSERT_TRUE(fs1->reserveSpace(file_80, "/files/")); -// fs1->unreserveSpace(file_80, "/files/"); -// ASSERT_DOUBLE_EQ(100, fs1->getFreeSpace()); -// ASSERT_TRUE(fs1->reserveSpace(file_80, "/files/")); -// fs1->storeFileInDirectory(file_80, "/files/"); -// ASSERT_DOUBLE_EQ(20, fs1->getFreeSpace()); -// fs1->incrementNumRunningTransactionsForFileInDirectory(file_80, "/files");// coverage -// fs1->decrementNumRunningTransactionsForFileInDirectory(file_80, "/files");// coverage -// -// auto file_50 = wrench::Simulation::addFile("file_50", 50); -// ASSERT_FALSE(fs1->reserveSpace(file_50, "/files/")); -// ASSERT_DOUBLE_EQ(20, fs1->getFreeSpace()); -// fs1->removeFileFromDirectory(file_80, "/files/"); -// ASSERT_DOUBLE_EQ(100, fs1->getFreeSpace()); -// -// fs1->storeFileInDirectory(file_50, "/faa"); -// fs1->removeAllFilesInDirectory("/faa/");// coverage -// -// -// workflow->clear(); -// wrench::Simulation::removeAllFiles(); -// -// for (int i = 0; i < argc; i++) -// free(argv[i]); -// free(argv); -//} -// -// -//TEST_F(LogicalFileSystemTest, DevNullTests) { -// DO_TEST_WITH_FORK(do_DevNullTests); -//} -// -//void LogicalFileSystemTest::do_DevNullTests() { -// // Create and initialize the simulation -// auto simulation = wrench::Simulation::createSimulation(); -// -// int argc = 1; -// char **argv = (char **) calloc(argc, sizeof(char *)); -// argv[0] = strdup("unit_test"); -// // argv[1] = strdup("--wrench-full-log"); -// -// ASSERT_NO_THROW(simulation->init(&argc, argv)); -// auto workflow = wrench::Workflow::createWorkflow(); -// -// // set up the platform -// ASSERT_NO_THROW(simulation->instantiatePlatform(platform_file_path)); -// -// // Create a Storage Services -// std::shared_ptr storage_service; -// ASSERT_NO_THROW(storage_service = simulation->add( -// wrench::SimpleStorageService::createSimpleStorageService("Host", {"/"}))); -// -// // Create a Logical File System -// auto fs1 = wrench::LogicalFileSystem::createLogicalFileSystem("Host", storage_service.get(), "/dev/null"); -// -// auto file = wrench::Simulation::addFile("file", 1); -// -// fs1->createDirectory(("/foo")); -// fs1->createFile(file, "/foo"); -// ASSERT_FALSE(fs1->doesDirectoryExist(("/foo"))); -// ASSERT_TRUE(fs1->isDirectoryEmpty(("/foo"))); -// ASSERT_FALSE(fs1->isFileInDirectory(file, "/foo")); -// fs1->removeEmptyDirectory("/foo"); -// fs1->storeFileInDirectory(file, "/foo"); -// fs1->removeFileFromDirectory(file, "/foo"); -// fs1->removeAllFilesInDirectory("/foo"); -// ASSERT_TRUE(fs1->listFilesInDirectory("/foo").empty()); -// fs1->reserveSpace(file, "/foo"); -// fs1->unreserveSpace(file, "/foo"); -// fs1->getFileLastWriteDate(file, "/foo"); -// -// // Create a Logical File System -// auto fs2 = wrench::LogicalFileSystem::createLogicalFileSystem("Host", storage_service.get(), "/dev/null", "LRU"); -// -// fs2->createDirectory(("/foo")); -// ASSERT_FALSE(fs2->doesDirectoryExist(("/foo"))); -// ASSERT_TRUE(fs2->isDirectoryEmpty(("/foo"))); -// ASSERT_FALSE(fs2->isFileInDirectory(file, "/foo")); -// fs2->removeEmptyDirectory("/foo"); -// fs2->storeFileInDirectory(file, "/foo"); -// fs2->removeFileFromDirectory(file, "/foo"); -// fs2->removeAllFilesInDirectory("/foo"); -// ASSERT_TRUE(fs2->listFilesInDirectory("/foo").empty()); -// fs2->reserveSpace(file, "/foo"); -// fs2->unreserveSpace(file, "/foo"); -// fs2->getFileLastWriteDate(file, "/foo"); -// -// workflow->clear(); -// wrench::Simulation::removeAllFiles(); -// -// for (int i = 0; i < argc; i++) -// free(argv[i]); -// free(argv); -//} -// -// -//TEST_F(LogicalFileSystemTest, LRUTests) { -// DO_TEST_WITH_FORK(do_LRUTests); -//} -// -//void LogicalFileSystemTest::do_LRUTests() { -// // Create and initialize the simulation -// auto simulation = wrench::Simulation::createSimulation(); -// -// int argc = 1; -// char **argv = (char **) calloc(argc, sizeof(char *)); -// argv[0] = strdup("unit_test"); -// // argv[1] = strdup("--wrench-full-log"); -// -// ASSERT_NO_THROW(simulation->init(&argc, argv)); -// -// // set up the platform -// ASSERT_NO_THROW(simulation->instantiatePlatform(platform_file_path)); -// -// // Create a Storage Services -// std::shared_ptr storage_service; -// ASSERT_NO_THROW(storage_service = simulation->add( -// wrench::SimpleStorageService::createSimpleStorageService("Host", {"/"}))); -// -// // Create a Logical File System with LRU eviction -// auto fs1 = wrench::LogicalFileSystem::createLogicalFileSystem("Host", storage_service.get(), "/tmp", "LRU"); -// -// auto file_60 = wrench::Simulation::addFile("file_60", 60); -// auto file_50 = wrench::Simulation::addFile("file_50", 50); -// auto file_30 = wrench::Simulation::addFile("file_30", 30); -// auto file_20 = wrench::Simulation::addFile("file_20", 20); -// auto file_10 = wrench::Simulation::addFile("file_10", 10); -// -// -// fs1->createDirectory(("/foo")); -// ASSERT_TRUE(fs1->reserveSpace(file_60, "/foo")); -// ASSERT_FALSE(fs1->reserveSpace(file_50, "/foo")); -// fs1->storeFileInDirectory(file_60, "/foo"); -// ASSERT_DOUBLE_EQ(40, fs1->getFreeSpace()); -// fs1->storeFileInDirectory(file_10, "/foo"); -// ASSERT_DOUBLE_EQ(30, fs1->getFreeSpace()); -// -// ASSERT_TRUE(fs1->reserveSpace(file_50, "/foo")); -// // Check that file_60 has been evicted -// ASSERT_FALSE(fs1->isFileInDirectory(file_60, "/foo")); -// // Check that file_10 is still there evicted -// ASSERT_TRUE(fs1->isFileInDirectory(file_10, "/foo")); -// fs1->storeFileInDirectory(file_50, "/foo"); -// ASSERT_DOUBLE_EQ(40, fs1->getFreeSpace()); -// -// // At this point the content is: -// // If I store another file that requires 50 bytes, but make file_10 unevictable, file_50 should be evicted -// auto other_file_50 = wrench::Simulation::addFile("other_file_50", 50); -// fs1->incrementNumRunningTransactionsForFileInDirectory(file_10, "/foo"); -// ASSERT_TRUE(fs1->reserveSpace(other_file_50, "/foo")); -// ASSERT_TRUE(fs1->isFileInDirectory(file_10, "/foo")); -// ASSERT_FALSE(fs1->isFileInDirectory(file_50, "/foo")); -// fs1->storeFileInDirectory(other_file_50, "/foo"); -// fs1->updateReadDate(other_file_50, "/foo"); -// fs1->updateReadDate(other_file_50, "/faa");// coverage -// -// // At this point the content is; -// // LRU: file_10 (UNEVICTABLE), other_file_50 (EVICTABLE) -// fs1->incrementNumRunningTransactionsForFileInDirectory(other_file_50, "/foo"); -// // At this point the content is; -// // LRU: file_10 (UNEVICTABLE), other_file_50 (UNEVICTABLE) -// // I should not be able to store/reserve space for file_50 -// ASSERT_FALSE(fs1->reserveSpace(file_50, "/foo")); -// ASSERT_DOUBLE_EQ(fs1->getFreeSpace(), 40); -// // Make other_file_50 EVICTABLE again -// fs1->decrementNumRunningTransactionsForFileInDirectory(other_file_50, "/foo"); -// ASSERT_TRUE(fs1->reserveSpace(file_50, "/foo")); -// ASSERT_FALSE(fs1->isFileInDirectory(file_50, "/foo")); -// ASSERT_DOUBLE_EQ(fs1->getFreeSpace(), 40); -// -// -// fs1->removeFileFromDirectory(file_10, "/foo");// coverage -// fs1->storeFileInDirectory(file_10, "/foo"); // coverage -// fs1->removeAllFilesInDirectory("/foo"); // coverage -// -// fs1->storeFileInDirectory(file_10, "/faa");// coverage -// -// for (int i = 0; i < argc; i++) -// free(argv[i]); -// free(argv); -//} diff --git a/test/services/storage_services/SimpleStorageService/SimpleStorageServiceFunctionalTest.cpp b/test/services/storage_services/SimpleStorageService/SimpleStorageServiceFunctionalTest.cpp index c838d6211b..18add7f1c1 100755 --- a/test/services/storage_services/SimpleStorageService/SimpleStorageServiceFunctionalTest.cpp +++ b/test/services/storage_services/SimpleStorageService/SimpleStorageServiceFunctionalTest.cpp @@ -35,23 +35,23 @@ class SimpleStorageServiceFunctionalTest : public ::testing::Test { std::shared_ptr compute_service = nullptr; - void do_BasicFunctionality_test(double buffer_size); + void do_BasicFunctionality_test(sg_size_t buffer_size); - void do_SynchronousFileCopy_test(double buffer_size); + void do_SynchronousFileCopy_test(sg_size_t buffer_size); - void do_AsynchronousFileCopy_test(double buffer_size); + void do_AsynchronousFileCopy_test(sg_size_t buffer_size); - void do_SynchronousFileCopyFailures_test(double buffer_size); + void do_SynchronousFileCopyFailures_test(sg_size_t buffer_size); - void do_AsynchronousFileCopyFailures_test(double buffer_size); + void do_AsynchronousFileCopyFailures_test(sg_size_t buffer_size); - void do_Partitions_test(double buffer_size); + void do_Partitions_test(sg_size_t buffer_size); - void do_FileWrite_test(double buffer_size); + void do_FileWrite_test(sg_size_t buffer_size); protected: - ~SimpleStorageServiceFunctionalTest() { + ~SimpleStorageServiceFunctionalTest() override { workflow->clear(); wrench::Simulation::removeAllFiles(); } @@ -113,7 +113,7 @@ class SimpleStorageServiceBasicFunctionalityTestWMS : public wrench::ExecutionCo public: SimpleStorageServiceBasicFunctionalityTestWMS(SimpleStorageServiceFunctionalTest *test, - std::string hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { } private: @@ -237,14 +237,14 @@ class SimpleStorageServiceBasicFunctionalityTestWMS : public wrench::ExecutionCo // Send a free space request - double free_space; + sg_size_t free_space; try { free_space = this->test->storage_service_100->getTotalFreeSpace(); } catch (wrench::ExecutionException &e) { throw std::runtime_error("Should be able to get a storage's service free space"); } - if (free_space != 90.0) { + if (free_space != 90) { throw std::runtime_error( "Free space on storage service is wrong (" + std::to_string(free_space) + ") instead of 90.0"); } @@ -255,7 +255,7 @@ class SimpleStorageServiceBasicFunctionalityTestWMS : public wrench::ExecutionCo } catch (wrench::ExecutionException &e) { throw std::runtime_error("Should be able to get a storage's service free space at a path"); } - if (free_space != 90.0) { + if (free_space != 90) { throw std::runtime_error( "Free space on storage service is wrong (" + std::to_string(free_space) + ") instead of 90.0"); } @@ -266,7 +266,7 @@ class SimpleStorageServiceBasicFunctionalityTestWMS : public wrench::ExecutionCo } catch (wrench::ExecutionException &e) { throw std::runtime_error("Should be able to get a storage's service free space at a path"); } - if (free_space != 90.0) { + if (free_space != 90) { throw std::runtime_error( "Free space on storage service is wrong (" + std::to_string(free_space) + ") instead of 90.0"); } @@ -277,7 +277,7 @@ class SimpleStorageServiceBasicFunctionalityTestWMS : public wrench::ExecutionCo } catch (wrench::ExecutionException &ignore) { throw std::runtime_error("Should be able to get a storage's service free space, even at a bogus path"); } - if (free_space != 0.0) { + if (free_space != 0) { throw std::runtime_error( "Free space on storage service at a bogus path should be 0.0 (" + std::to_string(free_space) + ")"); } @@ -402,7 +402,7 @@ class SimpleStorageServiceBasicFunctionalityTestWMS : public wrench::ExecutionCo throw std::runtime_error("Should be able to get a storage's service free space"); } - if (free_space != 100.0) { + if (free_space != 100) { throw std::runtime_error( "Free space on storage service is wrong (" + std::to_string(free_space) + ") instead of 100.0"); } @@ -468,7 +468,7 @@ class SimpleStorageServiceBasicFunctionalityTestWMS : public wrench::ExecutionCo } catch (wrench::ExecutionException &e) { throw std::runtime_error("Should be able to get a storage's service free space"); } - if (free_space != 99.0) { + if (free_space != 99) { throw std::runtime_error( "Free space on storage service is wrong (" + std::to_string(free_space) + ") instead of 99.0"); } @@ -644,7 +644,7 @@ TEST_F(SimpleStorageServiceFunctionalTest, BasicFunctionality) { DO_TEST_WITH_FORK_ONE_ARG(do_BasicFunctionality_test, 0); } -void SimpleStorageServiceFunctionalTest::do_BasicFunctionality_test(double buffer_size) { +void SimpleStorageServiceFunctionalTest::do_BasicFunctionality_test(sg_size_t buffer_size) { // Create and initialize a simulation auto simulation = wrench::Simulation::createSimulation(); @@ -724,7 +724,6 @@ void SimpleStorageServiceFunctionalTest::do_BasicFunctionality_test(double buffe free(argv); } - /**********************************************************************/ /** SYNCHRONOUS FILE COPY TEST **/ /**********************************************************************/ @@ -784,13 +783,13 @@ class SimpleStorageServiceSynchronousFileCopyTestWMS : public wrench::ExecutionC throw std::runtime_error("Should not be able to do a file copy with a bogus path"); } catch (wrench::ExecutionException &e) { auto cause = e.getCause(); - if (auto real_cause = std::dynamic_pointer_cast(e.getCause())) { - real_cause->toString(); // Coverage - real_cause->getLocation();// Coverage - } else if (auto real_cause = std::dynamic_pointer_cast(e.getCause())) { - real_cause->toString(); // Coverage - real_cause->getFile(); // Coverage - real_cause->getLocation();// Coverage + if (auto real_cause_1 = std::dynamic_pointer_cast(e.getCause())) { + real_cause_1->toString(); // Coverage + real_cause_1->getLocation();// Coverage + } else if (auto real_cause_2 = std::dynamic_pointer_cast(e.getCause())) { + real_cause_2->toString(); // Coverage + real_cause_2->getFile(); // Coverage + real_cause_2->getLocation();// Coverage } else { throw std::runtime_error("Got the expected exception, but the failure cause is not InvalidDirectoryPath or FileNotFound (it's " + cause->toString() + ")"); } @@ -832,7 +831,7 @@ TEST_F(SimpleStorageServiceFunctionalTest, SynchronousFileCopy) { DO_TEST_WITH_FORK_ONE_ARG(do_SynchronousFileCopy_test, 0); } -void SimpleStorageServiceFunctionalTest::do_SynchronousFileCopy_test(double buffer_size) { +void SimpleStorageServiceFunctionalTest::do_SynchronousFileCopy_test(sg_size_t buffer_size) { // Create and initialize a simulation auto simulation = wrench::Simulation::createSimulation(); @@ -898,7 +897,7 @@ class SimpleStorageServiceAsynchronousFileCopyTestWMS : public wrench::Execution public: SimpleStorageServiceAsynchronousFileCopyTestWMS(SimpleStorageServiceFunctionalTest *test, - std::string hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { } private: @@ -978,7 +977,7 @@ TEST_F(SimpleStorageServiceFunctionalTest, AsynchronousFileCopy) { DO_TEST_WITH_FORK_ONE_ARG(do_AsynchronousFileCopy_test, 0); } -void SimpleStorageServiceFunctionalTest::do_AsynchronousFileCopy_test(double buffer_size) { +void SimpleStorageServiceFunctionalTest::do_AsynchronousFileCopy_test(sg_size_t buffer_size) { // Create and initialize a simulation auto simulation = wrench::Simulation::createSimulation(); @@ -1043,7 +1042,7 @@ class SimpleStorageServiceSynchronousFileCopyFailuresTestWMS : public wrench::Ex public: SimpleStorageServiceSynchronousFileCopyFailuresTestWMS(SimpleStorageServiceFunctionalTest *test, - std::string hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { } private: @@ -1175,7 +1174,7 @@ TEST_F(SimpleStorageServiceFunctionalTest, SynchronousFileCopyFailures) { DO_TEST_WITH_FORK_ONE_ARG(do_SynchronousFileCopyFailures_test, 0); } -void SimpleStorageServiceFunctionalTest::do_SynchronousFileCopyFailures_test(double buffer_size) { +void SimpleStorageServiceFunctionalTest::do_SynchronousFileCopyFailures_test(sg_size_t buffer_size) { // Create and initialize a simulation auto simulation = wrench::Simulation::createSimulation(); @@ -1246,7 +1245,7 @@ class SimpleStorageServiceAsynchronousFileCopyFailuresTestWMS : public wrench::E public: SimpleStorageServiceAsynchronousFileCopyFailuresTestWMS(SimpleStorageServiceFunctionalTest *test, - std::string hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { } private: @@ -1382,7 +1381,7 @@ TEST_F(SimpleStorageServiceFunctionalTest, AsynchronousFileCopyFailures) { DO_TEST_WITH_FORK_ONE_ARG(do_AsynchronousFileCopyFailures_test, 0); } -void SimpleStorageServiceFunctionalTest::do_AsynchronousFileCopyFailures_test(double buffer_size) { +void SimpleStorageServiceFunctionalTest::do_AsynchronousFileCopyFailures_test(sg_size_t buffer_size) { // Create and initialize a simulation auto simulation = wrench::Simulation::createSimulation(); @@ -1449,7 +1448,7 @@ class PartitionsTestWMS : public wrench::ExecutionController { public: PartitionsTestWMS(SimpleStorageServiceFunctionalTest *test, - std::string hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { } private: @@ -1642,7 +1641,7 @@ TEST_F(SimpleStorageServiceFunctionalTest, Partitions) { DO_TEST_WITH_FORK_ONE_ARG(do_Partitions_test, 0); } -void SimpleStorageServiceFunctionalTest::do_Partitions_test(double buffer_size) { +void SimpleStorageServiceFunctionalTest::do_Partitions_test(sg_size_t buffer_size) { // Create and initialize a simulation auto simulation = wrench::Simulation::createSimulation(); @@ -1700,7 +1699,7 @@ class FileWriteTestWMS : public wrench::ExecutionController { public: FileWriteTestWMS(SimpleStorageServiceFunctionalTest *test, - std::string hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { } private: @@ -1747,7 +1746,7 @@ TEST_F(SimpleStorageServiceFunctionalTest, FileWrite) { DO_TEST_WITH_FORK_ONE_ARG(do_FileWrite_test, 0); } -void SimpleStorageServiceFunctionalTest::do_FileWrite_test(double buffer_size) { +void SimpleStorageServiceFunctionalTest::do_FileWrite_test(sg_size_t buffer_size) { // Create and initialize a simulation auto simulation = wrench::Simulation::createSimulation(); diff --git a/test/services/storage_services/SimpleStorageService/SimpleStorageServiceWrongServiceTest.cpp b/test/services/storage_services/SimpleStorageService/SimpleStorageServiceWrongServiceTest.cpp new file mode 100755 index 0000000000..7e1d680f12 --- /dev/null +++ b/test/services/storage_services/SimpleStorageService/SimpleStorageServiceWrongServiceTest.cpp @@ -0,0 +1,162 @@ +/** + * Copyright (c) 2017. The WRENCH Team. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + */ + + +#include + +#include +#include "../../../include/TestWithFork.h" +#include "../../../include/UniqueTmpPathPrefix.h" + +WRENCH_LOG_CATEGORY(simple_storage_service_wrong_service_test, "Log category for SimpleStorageServiceWrongServiceTest"); + + +class SimpleStorageServiceWrongServiceTest : public ::testing::Test { + +public: + std::shared_ptr workflow; + + std::shared_ptr file_1; + std::shared_ptr file_10; + std::shared_ptr file_100; + std::shared_ptr storage_service_1 = nullptr; + std::shared_ptr storage_service_2 = nullptr; + + void do_WrongService_test(double buffer_size); + + +protected: + ~SimpleStorageServiceWrongServiceTest() override { + workflow->clear(); + wrench::Simulation::removeAllFiles(); + } + + SimpleStorageServiceWrongServiceTest() { + + // Create the simplest workflow + workflow = wrench::Workflow::createWorkflow(); + + // Create the files + file_1 = wrench::Simulation::addFile("file_1", 1); + file_10 = wrench::Simulation::addFile("file_10", 10); + file_100 = wrench::Simulation::addFile("file_100", 100); + + // Create a one-host platform file + std::string xml = "" + "" + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + " " + ""; + FILE *platform_file = fopen(platform_file_path.c_str(), "w"); + fprintf(platform_file, "%s", xml.c_str()); + fclose(platform_file); + } + + std::string platform_file_path = UNIQUE_TMP_PATH_PREFIX + "platform.xml"; +}; + + +/**********************************************************************/ +/** WRONG SERVICE TEST **/ +/**********************************************************************/ + +class SimpleStorageServiceWrongServiceTestWMS : public wrench::ExecutionController { + +public: + SimpleStorageServiceWrongServiceTestWMS(SimpleStorageServiceWrongServiceTest *test, + const std::string& hostname) : wrench::ExecutionController(hostname, "test"), test(test) { + } + +private: + SimpleStorageServiceWrongServiceTest *test; + + int main() override { + + // Create a data movement manager + auto data_movement_manager = this->createDataMovementManager(); + + // Wrong-service read + { + try { + this->test->storage_service_1->readFile(wrench::FileLocation::LOCATION(this->test->storage_service_2, this->test->file_1)); + throw std::runtime_error("Shouldn't be able to read with a wrong-service location"); + } catch (std::invalid_argument &ignore) { + } + } + + // Wrong-service write + { + try { + this->test->storage_service_1->writeFile(wrench::FileLocation::LOCATION(this->test->storage_service_2, this->test->file_10)); + throw std::runtime_error("Shouldn't be able to write with a wrong-service location"); + } catch (std::invalid_argument &ignore) { + } + } + + return 0; + } +}; + +TEST_F(SimpleStorageServiceWrongServiceTest, WrongService) { + DO_TEST_WITH_FORK_ONE_ARG(do_WrongService_test, 1000000); +} + +void SimpleStorageServiceWrongServiceTest::do_WrongService_test(double buffer_size) { + + // Create and initialize a simulation + auto simulation = wrench::Simulation::createSimulation(); + + int argc = 1; + char **argv = (char **) calloc(argc, sizeof(char *)); + argv[0] = strdup("unit_test"); +// argv[1] = strdup("--wrench-full-log"); + + ASSERT_NO_THROW(simulation->init(&argc, argv)); + + // Setting up the platform + ASSERT_NO_THROW(simulation->instantiatePlatform(platform_file_path)); + + // Get a hostname + std::string hostname = wrench::Simulation::getHostnameList()[0]; + + // Create Three Storage Services + ASSERT_NO_THROW(storage_service_1 = simulation->add( + wrench::SimpleStorageService::createSimpleStorageService("Host1", {"/disk100"}, + {{wrench::SimpleStorageServiceProperty::BUFFER_SIZE, std::to_string(buffer_size)}}, {}))); + ASSERT_NO_THROW(storage_service_2 = simulation->add( + wrench::SimpleStorageService::createSimpleStorageService("Host2", {"/disk100"}, + {{wrench::SimpleStorageServiceProperty::BUFFER_SIZE, std::to_string(buffer_size)}}, {}))); + + // Create a WMS + ASSERT_NO_THROW(simulation->add(new SimpleStorageServiceWrongServiceTestWMS(this, hostname))); + + // Staging all files on the 1000 storage service + ASSERT_NO_THROW(storage_service_1->createFile(file_1)); + ASSERT_NO_THROW(storage_service_2->createFile(file_1)); + + ASSERT_NO_THROW(simulation->launch()); + + for (int i = 0; i < argc; i++) + free(argv[i]); + free(argv); +} + diff --git a/test/simulated_failures/link_failures/FileRegistryLinkFailuresTest.cpp b/test/simulated_failures/link_failures/FileRegistryLinkFailuresTest.cpp index 98f6fe0a66..9982e22ad1 100755 --- a/test/simulated_failures/link_failures/FileRegistryLinkFailuresTest.cpp +++ b/test/simulated_failures/link_failures/FileRegistryLinkFailuresTest.cpp @@ -181,7 +181,7 @@ void FileRegistryLinkFailuresTest::do_FileRegistryLinkFailureSimpleRandom_Test() } // Create a file registry service - double message_payload = 2; + sg_size_t message_payload = 2; wrench::WRENCH_MESSAGE_PAYLOAD_COLLECTION_TYPE payloads = { {wrench::FileRegistryServiceMessagePayload::ADD_ENTRY_REQUEST_MESSAGE_PAYLOAD, message_payload}, diff --git a/test/simulation/S4U_VirtualMachineTest.cpp b/test/simulation/S4U_VirtualMachineTest.cpp index 84206bbce9..63b5ccdb40 100755 --- a/test/simulation/S4U_VirtualMachineTest.cpp +++ b/test/simulation/S4U_VirtualMachineTest.cpp @@ -21,7 +21,7 @@ class S4U_VirtualMachineTest : public ::testing::Test { void do_basic_Test(); protected: - ~S4U_VirtualMachineTest() { + ~S4U_VirtualMachineTest() override { workflow->clear(); wrench::Simulation::removeAllFiles(); } @@ -73,7 +73,7 @@ class S4U_VirtualMachineTest : public ::testing::Test { class Sleep100Daemon : public wrench::S4U_Daemon { public: - Sleep100Daemon(std::string hostname) : S4U_Daemon(hostname, "sleep100daemon") {} + Sleep100Daemon(const std::string& hostname) : S4U_Daemon(hostname, "sleep100daemon") {} int main() override { simgrid::s4u::this_actor::execute(100); @@ -89,14 +89,10 @@ class Sleep100Daemon : public wrench::S4U_Daemon { class S4U_VirtualMachineTestWMS : public wrench::ExecutionController { public: - S4U_VirtualMachineTestWMS(S4U_VirtualMachineTest *test, - std::string hostname) : wrench::ExecutionController(hostname, "test") { - this->test = test; + S4U_VirtualMachineTestWMS(const std::string& hostname) : wrench::ExecutionController(hostname, "test") { } private: - S4U_VirtualMachineTest *test; - int main() override { auto vm = new wrench::S4U_VirtualMachine("vm", 1, 1, {}, {}); @@ -176,12 +172,7 @@ void S4U_VirtualMachineTest::do_basic_Test() { // Create a WMS - std::shared_ptr wms = nullptr; - - ASSERT_NO_THROW(wms = simulation->add( - new S4U_VirtualMachineTestWMS( - this, hostname))); - + ASSERT_NO_THROW(simulation->add(new S4U_VirtualMachineTestWMS(hostname))); // Running a "run a single task" simulation ASSERT_NO_THROW(simulation->launch()); diff --git a/tools/wrench/wrench-daemon/doc/wrench-openapi.json b/tools/wrench/wrench-daemon/doc/wrench-openapi.json index 8f452e1ea5..a18dd97475 100755 --- a/tools/wrench/wrench-daemon/doc/wrench-openapi.json +++ b/tools/wrench/wrench-daemon/doc/wrench-openapi.json @@ -62,10 +62,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -98,10 +96,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -151,10 +147,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -207,10 +201,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -243,10 +235,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -279,10 +269,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -315,10 +303,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1368,10 +1354,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1441,10 +1425,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1493,10 +1475,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1578,10 +1558,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1662,11 +1640,9 @@ } } } - }, - "405": { - "description": "Invalid input", - "content": {} - } + }, + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1748,10 +1724,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1793,10 +1767,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1838,10 +1810,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1883,10 +1853,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -1944,10 +1912,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -2005,10 +1971,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -2064,10 +2028,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -2116,10 +2078,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -2172,10 +2132,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -2287,10 +2245,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, @@ -2478,10 +2434,8 @@ } } }, - "405": { - "description": "Invalid input", - "content": {} - } + "404": { "$ref": "#/components/responses/NotFound" }, + "405": { "$ref": "#/components/responses/MethodNotAllowed" } } } }, diff --git a/tools/wrench/wrench-daemon/include/SimulationLauncher.h b/tools/wrench/wrench-daemon/include/SimulationLauncher.h index 100e06b1a8..27fe8110a0 100755 --- a/tools/wrench/wrench-daemon/include/SimulationLauncher.h +++ b/tools/wrench/wrench-daemon/include/SimulationLauncher.h @@ -23,6 +23,7 @@ class SimulationLauncher { ~SimulationLauncher() = default; void createSimulation(bool full_log, + unsigned long num_commports, const std::string &platform_xml, const std::string &controller_host, int sleep_us); diff --git a/tools/wrench/wrench-daemon/include/WRENCHDaemon.h b/tools/wrench/wrench-daemon/include/WRENCHDaemon.h index b88bdf706b..987af81536 100644 --- a/tools/wrench/wrench-daemon/include/WRENCHDaemon.h +++ b/tools/wrench/wrench-daemon/include/WRENCHDaemon.h @@ -34,6 +34,7 @@ class WRENCHDaemon { public: WRENCHDaemon(bool simulation_logging, bool daemon_logging, + unsigned long num_commports, int port_number, int fixed_simulation_port_number, const std::string &allowed_origin, @@ -64,6 +65,7 @@ class WRENCHDaemon { bool simulation_logging; bool daemon_logging; + unsigned long num_commports; int port_number; int fixed_simulation_port_number; std::string allowed_origin; diff --git a/tools/wrench/wrench-daemon/src/SimulationController.cpp b/tools/wrench/wrench-daemon/src/SimulationController.cpp index 6ef8e6feba..554356f30e 100644 --- a/tools/wrench/wrench-daemon/src/SimulationController.cpp +++ b/tools/wrench/wrench-daemon/src/SimulationController.cpp @@ -107,7 +107,6 @@ namespace wrench { // Main control loop while (keep_going) { - // Starting compute and storage services that should be started, if any while (true) { std::function thing_to_do; @@ -375,7 +374,6 @@ namespace wrench { * @return JSON output */ json SimulationController::createVM(json data) { - std::string cs_name = data["service_name"]; unsigned long num_cores = data["num_cores"]; sg_size_t ram_memory = data["ram_memory"]; @@ -831,7 +829,6 @@ namespace wrench { * @return JSON output */ json SimulationController::createStandardJob(json data) { - std::vector> tasks; std::string workflow_name = data["workflow_name"]; std::shared_ptr workflow; @@ -1377,7 +1374,6 @@ namespace wrench { * @return JSON output */ json SimulationController::createTask(json data) { - std::string workflow_name = data["workflow_name"]; std::shared_ptr workflow; if (not this->workflow_registry.lookup(workflow_name, workflow)) { @@ -1594,7 +1590,6 @@ namespace wrench { * @return JSON output */ json SimulationController::getTaskOutputFiles(json data) { - std::string workflow_name = data["workflow_name"]; std::shared_ptr workflow; if (not this->workflow_registry.lookup(workflow_name, workflow)) { @@ -2011,7 +2006,6 @@ namespace wrench { * @return JSON output */ json SimulationController::createWorkflow(const json &data) { - auto wf = wrench::Workflow::createWorkflow(); json answer; answer["workflow_name"] = wf->getName(); diff --git a/tools/wrench/wrench-daemon/src/SimulationLauncher.cpp b/tools/wrench/wrench-daemon/src/SimulationLauncher.cpp index 95e8d28a86..015c76069a 100755 --- a/tools/wrench/wrench-daemon/src/SimulationLauncher.cpp +++ b/tools/wrench/wrench-daemon/src/SimulationLauncher.cpp @@ -22,11 +22,13 @@ * simple sets an error message variable and returns * * @param full_log: whether to show all simulation log + * @param num_commports: the number of comm ports to use * @param platform_xml: XML platform description (an XML string - not a file path) * @param controller_host: hostname of the host that will run the execution_controller * @param sleep_us: number of microseconds to sleep at each iteration of the main loop */ void SimulationLauncher::createSimulation(bool full_log, + unsigned long num_commports, const std::string &platform_xml, const std::string &controller_host, int sleep_us) { @@ -35,11 +37,12 @@ void SimulationLauncher::createSimulation(bool full_log, try { // Set up command-line arguments - int argc = (full_log ? 2 : 1); + int argc = (full_log ? 3 : 2); char **argv = (char **) calloc((size_t) argc, sizeof(char *)); argv[0] = strdup("wrench-daemon-simulation"); - if (argc > 1) { - argv[1] = strdup("--wrench-full-log"); + argv[1] = strdup(("--wrench-commport-pool-size=" + std::to_string(num_commports)).c_str()); + if (argc > 2) { + argv[2] = strdup("--wrench-full-log"); } simulation = wrench::Simulation::createSimulation(); diff --git a/tools/wrench/wrench-daemon/src/WRENCHDaemon.cpp b/tools/wrench/wrench-daemon/src/WRENCHDaemon.cpp index 67874c78d3..05b84dda2f 100644 --- a/tools/wrench/wrench-daemon/src/WRENCHDaemon.cpp +++ b/tools/wrench/wrench-daemon/src/WRENCHDaemon.cpp @@ -36,6 +36,7 @@ std::vector WRENCHDaemon::allowed_origins; * @brief Constructor * @param simulation_logging true if simulation logging should be printed * @param daemon_logging true if daemon logging should be printed +* @param num_commports the number of commports to use * @param port_number port number on which to listen for 'start simulation' requests * @param simulation_port_number port number on which to listen for a new simulation (0 means: use a random port each time) * @param allowed_origin allowed origin for http connection @@ -44,11 +45,13 @@ std::vector WRENCHDaemon::allowed_origins; */ WRENCHDaemon::WRENCHDaemon(bool simulation_logging, bool daemon_logging, + unsigned long num_commports, int port_number, int simulation_port_number, const std::string &allowed_origin, int sleep_us) : simulation_logging(simulation_logging), daemon_logging(daemon_logging), + num_commports(num_commports), port_number(port_number), fixed_simulation_port_number(simulation_port_number), sleep_us(sleep_us) { @@ -240,6 +243,7 @@ void WRENCHDaemon::startSimulation(const Request &req, Response &res) { auto simulation_thread = std::thread([simulation_launcher, this, body, &guard, &signal]() { // Create simulation simulation_launcher->createSimulation(this->simulation_logging, + this->num_commports, body["platform_xml"], body["controller_hostname"], this->sleep_us); diff --git a/tools/wrench/wrench-daemon/src/main.cpp b/tools/wrench/wrench-daemon/src/main.cpp index 16ba67be48..7feedb524e 100644 --- a/tools/wrench/wrench-daemon/src/main.cpp +++ b/tools/wrench/wrench-daemon/src/main.cpp @@ -43,6 +43,8 @@ int main(int argc, char **argv) { "Show full simulation log during execution") ("daemon-logging", po::bool_switch()->default_value(false), "Show full daemon log during execution") + ("num-commports", po::value()->default_value(5000)->notifier(in(1, 100000, "port")), + "The number of commports that the simulation can use") ("port", po::value()->default_value(8101)->notifier(in(1024, 49151, "port")), "A port number, between 1024 and 4951, on which this daemon will listen for 'start simulation' requests") ("allow-origin", po::value()->default_value(""), @@ -69,6 +71,11 @@ int main(int argc, char **argv) { cerr << "Error: " << e.what() << "\n"; exit(1); } + + unsigned long num_commports = 5000; + if (vm.count("num-commports")) { + num_commports = vm["num-commports"].as(); + } int simulation_port = 0; if (vm.count("simulation-port")) { @@ -78,6 +85,7 @@ int main(int argc, char **argv) { // Create and run the WRENCH daemon WRENCHDaemon daemon(vm["simulation-logging"].as(), vm["daemon-logging"].as(), + num_commports, vm["port"].as(), simulation_port, vm["allow-origin"].as(), diff --git a/tools/wrench/wrench-init/base_code/Controller_ACTION.h b/tools/wrench/wrench-init/base_code/Controller_ACTION.h index 043beefbdc..6844235224 100755 --- a/tools/wrench/wrench-init/base_code/Controller_ACTION.h +++ b/tools/wrench/wrench-init/base_code/Controller_ACTION.h @@ -29,7 +29,7 @@ namespace wrench { protected: // Overridden method - void processEventCompoundJobCompletion(std::shared_ptr) override; + void processEventCompoundJobCompletion(std::shared_ptr); private: // main() method of the WMS diff --git a/tools/wrench/wrench-init/base_code/Controller_WORKFLOW.h b/tools/wrench/wrench-init/base_code/Controller_WORKFLOW.h index 7114a0a454..13d4ae99b4 100755 --- a/tools/wrench/wrench-init/base_code/Controller_WORKFLOW.h +++ b/tools/wrench/wrench-init/base_code/Controller_WORKFLOW.h @@ -30,7 +30,7 @@ namespace wrench { protected: // Overridden method - void processEventStandardJobCompletion(std::shared_ptr) override; + void processEventStandardJobCompletion(std::shared_ptr); private: // main() method of the WMS