Skip to content

Commit

Permalink
Implement code coverage for python and change provider to codecov (#326)
Browse files Browse the repository at this point in the history
* fix(documentation) remove space between badges

* feature(pytests) add steadystate testcases

* feature(coverage) create merged report for cpp and py and change to codecov

* fix(codecov) remove codecov yaml

* fix(codecov) fix lcov command

* feature(codecov) simplify report upload

* fix(codecov) fix python coverage

* doc(codecov) add documentation to testCoverage.py + bugfixes

* fix(ci) add python coverage package in travis
  • Loading branch information
FFroehlich authored May 31, 2018
1 parent 331ad23 commit 17c07e8
Show file tree
Hide file tree
Showing 9 changed files with 212 additions and 126 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -187,3 +187,6 @@ python/.idea/workspace.xml
python/testSpeedy.py

python/test.txt

tests/test/*
coverage_py.xml
10 changes: 5 additions & 5 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
language: generic
sudo: true
sudo: false

branches:
only:
Expand Down Expand Up @@ -27,10 +27,10 @@ matrix:
- swig3.0
- libboost-serialization-dev
env: ENABLE_GCOV_COVERAGE=TRUE
before_install:
- gem install coveralls-lcov
after_success:
- ./scripts/run-coveralls.sh
- ./scripts/run-codecov.sh
- bash <(curl -s https://codecov.io/bash) -f coverage_py.xml -X fix -cF python
- bash <(curl -s https://codecov.io/bash) -f coverage.info -X fix -cF cpp
- os: osx
osx_image: xcode9.3
compiler: clang
Expand Down Expand Up @@ -58,7 +58,7 @@ install:
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then pyenv versions; fi
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then pyenv shell 2.7 3.6; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export PATH=/Users/travis/Library/Python/3.6/bin:$PATH; fi
- pip3 install --user --upgrade pip setuptools wheel pkgconfig doxypypy
- pip3 install --user --upgrade pip setuptools wheel pkgconfig doxypypy coverage
- ./scripts/buildSuiteSparse.sh
- ./scripts/buildSundials.sh
- ./scripts/buildCpputest.sh
Expand Down
4 changes: 1 addition & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ The interface was designed to provide routines for efficient gradient computatio
## Current build status

[![Build Status](https://travis-ci.org/ICB-DCM/AMICI.svg?branch=master)](https://travis-ci.org/ICB-DCM/AMICI)

[![Coverage Status](https://coveralls.io/repos/github/ICB-DCM/AMICI/badge.svg?branch=master)](https://coveralls.io/github/ICB-DCM/AMICI?branch=master)

[![CodeCov](https://codecov.io/gh/ICB-DCM/AMICI/branch/master/graph/badge.svg)](https://codecov.io/gh/ICB-DCM/AMICI)


2 changes: 1 addition & 1 deletion scripts/run-SBMLTestsuite.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ if [ ! -d "tests/sbml-test-suite" ]; then
mv -f ./sbml-test-suite ./tests/sbml-test-suite
fi

python3 ./tests/testSBML.py
python3 ./tests/testSBMLSuite.py

cat ./test.txt
rm ./test.txt
5 changes: 3 additions & 2 deletions scripts/run-coveralls.sh → scripts/run-codecov.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
# Check code coverage via coveralls
# Check code coverage via codecov

AMICI_PATH="`dirname \"$0\"`"
AMICI_PATH="`( cd \"$AMICI_PATH/..\" && pwd )`"
Expand All @@ -13,5 +13,6 @@ cd ${AMICI_PATH}

lcov --compat-libtool --no-external --directory ${AMICI_PATH}/build/CMakeFiles/amici.dir/src --base-directory ${AMICI_PATH} --capture --output-file coverage.info

coveralls-lcov coverage.info
python3 ./tests/testCoverage.py

rm -rf ./test
32 changes: 32 additions & 0 deletions tests/testCoverage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/usr/bin/env python3

"""
Generate coverage reports for the testModels and testSBML scripts
exported format is cobertura xml
"""

import coverage
import unittest
import os
import sys
import amici

import testModels
import testSBML

# only consider amici module and ignore the swig generated amici.py
cov = coverage.Coverage(source=['amici'],omit=['*/amici.py'])

# ignore code blocks containing import statements
cov.exclude('import')
cov.start()

# build the testSuite from testModels and testSBML
suite = unittest.TestSuite()
suite.addTest(testModels.TestAmiciPregeneratedModel())
suite.addTest(testSBML.TestAmiciSBMLModel())
testRunner = unittest.TextTestRunner(verbosity=0)
testRunner.run(suite)

cov.stop()
cov.xml_report(outfile='coverage_py.xml')
27 changes: 13 additions & 14 deletions tests/testModels.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,16 +64,16 @@ def runTest(self):



def verifySimulationResults(rdata,expectedResults,atol=1e-8,rtol=1e-4):
def verifySimulationResults(rdata, expectedResults, atol=1e-8, rtol=1e-4):
'''
compares all fields of the simulation results in rdata against the expectedResults using the provided
tolerances
Args:
rdata: simulation results as returned by amici.runAmiciSimulation
expectedResults: stored test results
atol: absolute tolerance
rtol: relative tolerance
Arguments:
rdata: simulation results as returned by amici.runAmiciSimulation
expectedResults: stored test results
atol: absolute tolerance
rtol: relative tolerance
'''

if expectedResults.attrs['status'][0] != 0:
Expand All @@ -98,12 +98,12 @@ def checkResults(rdata, field, expected, atol, rtol):
'''
checks whether rdata[field] agrees with expected according to provided tolerances
Args:
rdata: simulation results as returned by amici.runAmiciSimulation
field: name of the field to check
expected: expected test results
atol: absolute tolerance
rtol: relative tolerance
Arguments:
rdata: simulation results as returned by amici.runAmiciSimulation
field: name of the field to check
expected: expected test results
atol: absolute tolerance
rtol: relative tolerance
'''

result = rdata[field]
Expand All @@ -126,8 +126,7 @@ def checkResults(rdata, field, expected, atol, rtol):
adev = adev[~np.isinf(expected)]
rdev = rdev[~np.isinf(expected)]

if not np.all(np.logical_or(rdev <= rtol,adev <= atol)):
assert np.all(np.logical_or(rdev <= rtol,adev <= atol))
assert np.all(np.logical_or(rdev <= rtol, adev <= atol))



Expand Down
138 changes: 37 additions & 101 deletions tests/testSBML.py
Original file line number Diff line number Diff line change
@@ -1,117 +1,53 @@
#!/usr/bin/env python3
"""Run SBML Test Suite and verify simulation results [https://github.com/sbmlteam/sbml-test-suite/releases]"""
import traceback
import os

import sys
import h5py
import amici
import unittest
import importlib
import os
import re
import numpy as np
import amici

# directory with sbml semantic test cases
test_path = os.path.join(os.path.dirname(__file__), 'sbml-test-suite', 'cases', 'semantic')
class TestAmiciSBMLModel(unittest.TestCase):
'''
TestCase class for tests that were pregenerated using the the matlab code generation routines and cmake
build routines
NOTE: requires having run scripts/buildTests.sh before to build the python modules for the test models
'''

def runTest(testId, logfile):
try:
current_test_path = os.path.join(test_path, testId)
expectedResultsFile = os.path.join(os.path.dirname(__file__), 'cpputest','expectedResults.h5')

# results
resultsFile = os.path.join(current_test_path, testId + '-results.csv')
results = np.genfromtxt(resultsFile, delimiter=',')
def runTest(self):
'''
test runner routine that loads data expectedResults.h5 hdf file and runs individual models/settings
as subTests
'''

# model
sbmlFile = findModelFile(current_test_path, testId)
sbmlFile = os.path.join(os.path.dirname(__file__), '..', 'python', 'examples', 'example_steadystate', 'model_steadystate_scaled.sbml')
sbmlImporter = amici.SbmlImporter(sbmlFile)
sbml = sbmlImporter.sbml

wrapper = amici.SbmlImporter(sbmlFile)

modelDir = os.path.join(os.path.dirname(__file__),'SBMLTestModels',testId)
if not os.path.exists(modelDir):
os.makedirs(modelDir)
wrapper.sbml2amici('SBMLTest' + testId, output_dir=modelDir)

# settings
settings = readSettingsFile(current_test_path, testId)
ts = np.linspace(float(settings['start']), float(settings['start']) + float(settings['duration']),
int(settings['steps']) + 1)
atol = float(settings['absolute'])
rtol = float(settings['relative'])
observables = amici.assignmentRules2observables(sbml, filter=lambda variableId:
variableId.startswith('observable_') and not variableId.endswith('_sigma'))

sbmlImporter.sbml2amici('test', 'test',
observables=observables,
constantParameters=['k4'],
sigmas={'observable_x1withsigma': 'observable_x1withsigma_sigma'})

sys.path.insert(0, wrapper.modelPath)
mod = importlib.import_module(wrapper.modelName)
sys.path.insert(0, 'test')
import test as modelModule

model = mod.getModel()
model.setTimepoints(mod.amici.DoubleVector(ts))
model = modelModule.getModel()
model.setTimepoints(amici.DoubleVector(np.linspace(0, 60, 60)))
solver = model.getSolver()
solver.setMaxSteps(int(1e6))
solver.setRelativeTolerance(rtol / 1000.0)
solver.setAbsoluteTolerance(atol / 1000.0)
rdata = amici.runAmiciSimulation(model, solver)
amountSpecies = settings['amount'].replace(' ', '').replace('\n', '').split(',')
simulated_x = rdata['x']
test_x = results[1:, [1+ wrapper.speciesIndex[variable] for variable in settings['variables'].replace(' ', '').replace('\n', '').split(',') if variable in wrapper.speciesIndex.keys() ] ]

for species in amountSpecies:
if not species == '':
volume = wrapper.speciesCompartment[wrapper.speciesIndex[species]].subs(wrapper.compartmentSymbols,
wrapper.compartmentVolume)
simulated_x[:, wrapper.speciesIndex[species]] = simulated_x[:, wrapper.speciesIndex[species]] * volume
pass

adev = abs(simulated_x - test_x)
adev[np.isnan(adev)] = True
rdev = abs((simulated_x - test_x) / test_x)
rdev[np.isnan(rdev)] = True
if (not np.all(np.logical_or(adev < atol, rdev < rtol))):
if (not np.all(adev < atol)):
raise Exception('Absolute tolerance violated')

if (not np.all(rdev < rtol)):
raise Exception('Relative tolerance violated')

except amici.SBMLException as err:
print("Did not run test " + testId + ": {0}".format(err))
pass

except Exception as err:
str = "Failed test " + testId + ": {0}".format(err)
traceback.print_exc(10)
logfile.write(str + '\n')
return

def findModelFile(current_test_path, testId):
"""Find model file for the given test (guess filename extension)"""
sbmlFile = os.path.join(current_test_path, testId + '-sbml-l3v2.xml')

# fallback l3v1
if not os.path.isfile(sbmlFile):
sbmlFile = os.path.join(current_test_path, testId + '-sbml-l3v1.xml')

# fallback l2v5
if not os.path.isfile(sbmlFile):
sbmlFile = os.path.join(current_test_path, testId + '-sbml-l2v5.xml')

return sbmlFile

def readSettingsFile(current_test_path, testId):
"""Read settings for the given test"""
settingsFile = os.path.join(current_test_path, testId + '-settings.txt')
settings = {}
with open(settingsFile) as f:
for line in f:
if not line == '\n':
(key, val) = line.split(':')
settings[key] = val
return settings


def getTestStr(testId):
testStr = str(testId)
testStr = '0'*(5-len(testStr)) + testStr
return testStr

def main():
for testId in range(1,1781):
with open("test.txt", "w") as logfile:
runTest(getTestStr(testId), logfile)

if __name__ == '__main__':
main()
suite = unittest.TestSuite()
suite.addTest(TestAmiciPregeneratedModel())
unittest.main()

Loading

0 comments on commit 17c07e8

Please sign in to comment.