diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..49b0156 --- /dev/null +++ b/.gitignore @@ -0,0 +1,299 @@ + +# Created by https://www.gitignore.io/api/macos,python,pycharm,intellij +# Edit at https://www.gitignore.io/?templates=macos,python,pycharm,intellij + +### Intellij ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# Ignore directories +.idea +/**/__pycache__ + + + +# User-specific stuff +# .idea/**/workspace.xml +# .idea/**/tasks.xml +# .idea/**/usage.statistics.xml +# .idea/**/dictionaries +# .idea/**/shelf + +# Generated files +# .idea/**/contentModel.xml + +# Sensitive or high-churn files +# .idea/**/dataSources/ +# .idea/**/dataSources.ids +# .idea/**/dataSources.local.xml +# .idea/**/sqlDataSources.xml +# .idea/**/dynamic.xml +# .idea/**/uiDesigner.xml +# .idea/**/dbnavigator.xml + +# Gradle +# .idea/**/gradle.xml +# .idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +# .idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +# .idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +# .idea/httpRequests + +# Android studio 3.1+ serialized cache file +# .idea/caches/build_file_checksums.ser + +### Intellij Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +.idea/sonarlint + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### PyCharm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### PyCharm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Python Patch ### +.venv/ + +### Python.VirtualEnv Stack ### +# Virtualenv +# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ +[Bb]in +[Ii]nclude +[Ll]ib +[Ll]ib64 +[Ll]ocal +[Ss]cripts +pyvenv.cfg +pip-selfcheck.json + +# End of https://www.gitignore.io/api/macos,python,pycharm,intellij + +*testfiles* diff --git a/README.md b/README.md new file mode 100644 index 0000000..d6aa61a --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +# VRE Project Overview + +This project was originally maintained as a branch in Sqooba's `Insel` mono-repo, and therefore "packaged" into a sub-folder +named `spitalhygiene`. + diff --git a/spitalhygiene/Docs/Makefile b/spitalhygiene/Docs/Makefile new file mode 100644 index 0000000..69fe55e --- /dev/null +++ b/spitalhygiene/Docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/spitalhygiene/Docs/build/doctrees/Patient_Test_Data.doctree b/spitalhygiene/Docs/build/doctrees/Patient_Test_Data.doctree new file mode 100644 index 0000000..9c04e48 Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/Patient_Test_Data.doctree differ diff --git a/spitalhygiene/Docs/build/doctrees/Unused.doctree b/spitalhygiene/Docs/build/doctrees/Unused.doctree new file mode 100644 index 0000000..9973267 Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/Unused.doctree differ diff --git a/spitalhygiene/Docs/build/doctrees/VRE_Model.doctree b/spitalhygiene/Docs/build/doctrees/VRE_Model.doctree new file mode 100644 index 0000000..e1a75f6 Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/VRE_Model.doctree differ diff --git a/spitalhygiene/Docs/build/doctrees/environment.pickle b/spitalhygiene/Docs/build/doctrees/environment.pickle new file mode 100644 index 0000000..4d70280 Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/environment.pickle differ diff --git a/spitalhygiene/Docs/build/doctrees/index.doctree b/spitalhygiene/Docs/build/doctrees/index.doctree new file mode 100644 index 0000000..e42e32a Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/index.doctree differ diff --git a/spitalhygiene/Docs/build/doctrees/model.doctree b/spitalhygiene/Docs/build/doctrees/model.doctree new file mode 100644 index 0000000..b485bab Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/model.doctree differ diff --git a/spitalhygiene/Docs/build/doctrees/resources.doctree b/spitalhygiene/Docs/build/doctrees/resources.doctree new file mode 100644 index 0000000..462a91c Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/resources.doctree differ diff --git a/spitalhygiene/Docs/build/doctrees/sql.doctree b/spitalhygiene/Docs/build/doctrees/sql.doctree new file mode 100644 index 0000000..93a3595 Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/sql.doctree differ diff --git a/spitalhygiene/Docs/build/doctrees/tests.doctree b/spitalhygiene/Docs/build/doctrees/tests.doctree new file mode 100644 index 0000000..a93bbf0 Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/tests.doctree differ diff --git a/spitalhygiene/Docs/build/doctrees/vre.doctree b/spitalhygiene/Docs/build/doctrees/vre.doctree new file mode 100644 index 0000000..14f7b73 Binary files /dev/null and b/spitalhygiene/Docs/build/doctrees/vre.doctree differ diff --git a/spitalhygiene/Docs/build/html/.buildinfo b/spitalhygiene/Docs/build/html/.buildinfo new file mode 100644 index 0000000..adf09cb --- /dev/null +++ b/spitalhygiene/Docs/build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 1800654d7c15071b98f1472f8661f81b +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/spitalhygiene/Docs/build/html/.nojekyll b/spitalhygiene/Docs/build/html/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/spitalhygiene/Docs/build/html/Patient_Test_Data.html b/spitalhygiene/Docs/build/html/Patient_Test_Data.html new file mode 100644 index 0000000..c88da4b --- /dev/null +++ b/spitalhygiene/Docs/build/html/Patient_Test_Data.html @@ -0,0 +1,546 @@ + + + + + + + + + + + Patient Test Data — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Patient Test Data

+

The test patient dataset for the VRE model consists of a small subset of the complete dataset. The SQL queries used to +extract the test patient data are identical to those used for the true dataset, with the exception that data are +restricted to a small subset in the form of a simple WHERE statement. This filter includes the following 3 patient +IDs:

+
    +
  • 00003067149

  • +
  • 00008301433

  • +
  • 00004348346

  • +
+

Associated with these patients are the following 40 case IDs:

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Patient ID

Case ID

00003067149

0006280483

00003067149

0006314210

00003067149

0006336438

00003067149

0005889802

00003067149

0005873082

00003067149

0006065973

00003067149

0006091736

00003067149

0006148746

00003067149

0006334066

00003067149

0006059391

00003067149

0005976205

00003067149

0006057834

00003067149

0005983693

00003067149

0006520444

00003067149

0006931777

00003067149

0006812114

00003067149

0005965462

00003067149

0006452545

00003067149

0006433446

00003067149

0006466165

00004348346

0001927876

00004348346

0004555507

00004348346

0004728517

00004348346

0001928507

00004348346

0002802610

00004348346

0004204668

00004348346

0004181978

00003067149

0006951942

00003067149

0005880782

00008301433

0002289902

00008301433

0004411153

00008301433

0004411005

00008301433

0006565152

00008301433

0003962974

00008301433

0006594482

00008301433

0006596375

00008301433

0006551728

00003067149

0005864325

00003067149

0005877026

00003067149

0006069476

+

For partners, only a single entry associated to one of the three test patients is used:

+
    +
  • 0010000990

  • +
+

For CHOP codes, the following sub-selection of codes will be imported:

+
    +
  • Z99.B8.11

  • +
  • Z50.27.32

  • +
  • Z00.99.60

  • +
  • Z88.38.60

  • +
  • Z99.85

  • +
  • Z89.07.24

  • +
  • Z50.27.32

  • +
  • Z00.99.60

  • +
  • Z88.38.60

  • +
  • Z99.85

  • +
  • Z50.23.13

  • +
  • Z50.12.09

  • +
  • Z88.79.50

  • +
  • Z00.9A.13

  • +
  • Z39.32.41

  • +
  • Z50.93

  • +
  • Z34.84

  • +
  • Z34.89.99

  • +
  • Z39.29.89

  • +
  • Z50.52

  • +
  • Z00.93.99

  • +
  • Z00.90.99

  • +
  • Z00.99.10

  • +
  • Z50.27.32

  • +
  • Z00.99.60

  • +
  • Z88.38.60

  • +
  • Z99.85

  • +
  • Z99.04.10

  • +
  • Z94.8X.40

  • +
  • Z99.B7.12

  • +
  • Z99.07.3C

  • +
  • Z99.04.15

  • +
  • Z99.05.47

  • +
  • Z99.B7.13

  • +
  • Z99.0A

  • +
  • Z99.28.11

  • +
  • Z50.52

  • +
  • Z54.52

  • +
  • Z00.93.99

  • +
  • Z00.90.99

  • +
  • Z51.22.11

  • +
  • Z39.29.89

  • +
  • Z99.00

  • +
  • Z54.12.11

  • +
  • Z50.12.12

  • +
  • Z88.79.50

  • +
  • Z54.25

  • +
  • Z36.11.22

  • +
  • Z36.11.26

  • +
  • Z36.1C.12

  • +
  • Z39.61.10

  • +
  • Z39.63

  • +
  • Z39.64

  • +
  • Z88.79.50

  • +
  • Z01.16.12

  • +
  • Z99.00

  • +
+

For appointments (german: “Termine”), only the following 98 TerminIDs are used:

+
    +
  • 38515699

  • +
  • 38321122

  • +
  • 35416924

  • +
  • 1164130

  • +
  • 38470639

  • +
  • 41827160

  • +
  • 39893063

  • +
  • 38411180

  • +
  • 35571391

  • +
  • 35130813

  • +
  • 36160483

  • +
  • 40766840

  • +
  • 42155710

  • +
  • 39491988

  • +
  • 36067632

  • +
  • 37374631

  • +
  • 36129549

  • +
  • 39001478

  • +
  • 39425469

  • +
  • 34338471

  • +
  • 35630084

  • +
  • 35139096

  • +
  • 38431954

  • +
  • 38452040

  • +
  • 40344805

  • +
  • 13831398

  • +
  • 38063644

  • +
  • 38539785

  • +
  • 34220024

  • +
  • 39819467

  • +
  • 39423020

  • +
  • 38386995

  • +
  • 42394432

  • +
  • 38446243

  • +
  • 42213628

  • +
  • 38565198

  • +
  • 39893320

  • +
  • 37244357

  • +
  • 37554138

  • +
  • 41124954

  • +
  • 39051017

  • +
  • 36129560

  • +
  • 35621237

  • +
  • 38772701

  • +
  • 21130116

  • +
  • 38063650

  • +
  • 39608858

  • +
  • 39427731

  • +
  • 21131159

  • +
  • 38331618

  • +
  • 38062724

  • +
  • 24171386

  • +
  • 14908956

  • +
  • 41909560

  • +
  • 39114133

  • +
  • 14091256

  • +
  • 38939623

  • +
  • 35626775

  • +
  • 35139491

  • +
  • 36006751

  • +
  • 38329080

  • +
  • 41909690

  • +
  • 35130747

  • +
  • 36129541

  • +
  • 1278803

  • +
  • 38507433

  • +
  • 1192059

  • +
  • 39456191

  • +
  • 14091249

  • +
  • 39933520

  • +
  • 24291359

  • +
  • 36071093

  • +
  • 36160474

  • +
  • 19096210

  • +
  • 40218521

  • +
  • 1162144

  • +
  • 38660148

  • +
  • 42211133

  • +
  • 39613790

  • +
  • 24230235

  • +
  • 38262758

  • +
  • 35417252

  • +
  • 19252406

  • +
  • 39215737

  • +
  • 38446041

  • +
  • 36830543

  • +
  • 35200182

  • +
  • 40766156

  • +
  • 36070942

  • +
  • 34310589

  • +
  • 37232112

  • +
  • 34337667

  • +
  • 38446523

  • +
  • 34482529

  • +
  • 17297480

  • +
  • 39298995

  • +
  • 36830574

  • +
  • 1405150

  • +
+

And finally for devices, the subset is restricted to the following GeraetIDs:

+
    +
  • 134074

  • +
  • 125922

  • +
  • 137160

  • +
  • 125916

  • +
  • 125981

  • +
  • 125981

  • +
  • 64174

  • +
  • 125921

  • +
  • 125981

  • +
  • 125981

  • +
  • 125981

  • +
  • 125981

  • +
  • 125981

  • +
  • 28609

  • +
  • 86293

  • +
  • 125981

  • +
  • 125981

  • +
  • 125981

  • +
  • 86293

  • +
  • 125981

  • +
  • 125981

  • +
  • 64174

  • +
  • 125981

  • +
  • 125981

  • +
  • 125981

  • +
  • 125981

  • +
  • 125981

  • +
  • 125974

  • +
  • 28609

  • +
  • 125981

  • +
+
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/Unused.html b/spitalhygiene/Docs/build/html/Unused.html new file mode 100644 index 0000000..4ee6d18 --- /dev/null +++ b/spitalhygiene/Docs/build/html/Unused.html @@ -0,0 +1,210 @@ + + + + + + + + + + + Unused folder — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Unused folder

+

This folder contains archived files and scripts no longer used in the current version of the VRE model.

+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/VRE_Model.html b/spitalhygiene/Docs/build/html/VRE_Model.html new file mode 100644 index 0000000..edfa4f5 --- /dev/null +++ b/spitalhygiene/Docs/build/html/VRE_Model.html @@ -0,0 +1,212 @@ + + + + + + + + + + + VRE Model Overview — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

VRE Model Overview

+

Welcome to the VRE Model Documentation!

+

The VRE model is documented with Sphinx on a per-folder basis. The root folder for this documentation is the +spitalhygiene folder of the vre-data-science repository on the Insel GitHub.

+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/_sources/Patient_Test_Data.rst.txt b/spitalhygiene/Docs/build/html/_sources/Patient_Test_Data.rst.txt new file mode 100644 index 0000000..1bb2794 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_sources/Patient_Test_Data.rst.txt @@ -0,0 +1,258 @@ +******************** +Patient Test Data +******************** + +The test patient dataset for the VRE model consists of a small subset of the complete dataset. The SQL queries used to +extract the test patient data are `identical` to those used for the true dataset, with the exception that data are +restricted to a small subset in the form of a simple ``WHERE`` statement. This filter includes the following 3 patient +IDs: + +- 00003067149 +- 00008301433 +- 00004348346 + +Associated with these patients are the following 40 case IDs: + +=========== =========== +Patient ID Case ID +=========== =========== +00003067149 0006280483 +00003067149 0006314210 +00003067149 0006336438 +00003067149 0005889802 +00003067149 0005873082 +00003067149 0006065973 +00003067149 0006091736 +00003067149 0006148746 +00003067149 0006334066 +00003067149 0006059391 +00003067149 0005976205 +00003067149 0006057834 +00003067149 0005983693 +00003067149 0006520444 +00003067149 0006931777 +00003067149 0006812114 +00003067149 0005965462 +00003067149 0006452545 +00003067149 0006433446 +00003067149 0006466165 +00004348346 0001927876 +00004348346 0004555507 +00004348346 0004728517 +00004348346 0001928507 +00004348346 0002802610 +00004348346 0004204668 +00004348346 0004181978 +00003067149 0006951942 +00003067149 0005880782 +00008301433 0002289902 +00008301433 0004411153 +00008301433 0004411005 +00008301433 0006565152 +00008301433 0003962974 +00008301433 0006594482 +00008301433 0006596375 +00008301433 0006551728 +00003067149 0005864325 +00003067149 0005877026 +00003067149 0006069476 +=========== =========== + +For partners, only a single entry associated to one of the three test patients is used: + +- 0010000990 + +For CHOP codes, the following sub-selection of codes will be imported: + +- Z99.B8.11 +- Z50.27.32 +- Z00.99.60 +- Z88.38.60 +- Z99.85 +- Z89.07.24 +- Z50.27.32 +- Z00.99.60 +- Z88.38.60 +- Z99.85 +- Z50.23.13 +- Z50.12.09 +- Z88.79.50 +- Z00.9A.13 +- Z39.32.41 +- Z50.93 +- Z34.84 +- Z34.89.99 +- Z39.29.89 +- Z50.52 +- Z00.93.99 +- Z00.90.99 +- Z00.99.10 +- Z50.27.32 +- Z00.99.60 +- Z88.38.60 +- Z99.85 +- Z99.04.10 +- Z94.8X.40 +- Z99.B7.12 +- Z99.07.3C +- Z99.04.15 +- Z99.05.47 +- Z99.B7.13 +- Z99.0A +- Z99.28.11 +- Z50.52 +- Z54.52 +- Z00.93.99 +- Z00.90.99 +- Z51.22.11 +- Z39.29.89 +- Z99.00 +- Z54.12.11 +- Z50.12.12 +- Z88.79.50 +- Z54.25 +- Z36.11.22 +- Z36.11.26 +- Z36.1C.12 +- Z39.61.10 +- Z39.63 +- Z39.64 +- Z88.79.50 +- Z01.16.12 +- Z99.00 + +For appointments (german: "Termine"), only the following 98 TerminIDs are used: + +- 38515699 +- 38321122 +- 35416924 +- 1164130 +- 38470639 +- 41827160 +- 39893063 +- 38411180 +- 35571391 +- 35130813 +- 36160483 +- 40766840 +- 42155710 +- 39491988 +- 36067632 +- 37374631 +- 36129549 +- 39001478 +- 39425469 +- 34338471 +- 35630084 +- 35139096 +- 38431954 +- 38452040 +- 40344805 +- 13831398 +- 38063644 +- 38539785 +- 34220024 +- 39819467 +- 39423020 +- 38386995 +- 42394432 +- 38446243 +- 42213628 +- 38565198 +- 39893320 +- 37244357 +- 37554138 +- 41124954 +- 39051017 +- 36129560 +- 35621237 +- 38772701 +- 21130116 +- 38063650 +- 39608858 +- 39427731 +- 21131159 +- 38331618 +- 38062724 +- 24171386 +- 14908956 +- 41909560 +- 39114133 +- 14091256 +- 38939623 +- 35626775 +- 35139491 +- 36006751 +- 38329080 +- 41909690 +- 35130747 +- 36129541 +- 1278803 +- 38507433 +- 1192059 +- 39456191 +- 14091249 +- 39933520 +- 24291359 +- 36071093 +- 36160474 +- 19096210 +- 40218521 +- 1162144 +- 38660148 +- 42211133 +- 39613790 +- 24230235 +- 38262758 +- 35417252 +- 19252406 +- 39215737 +- 38446041 +- 36830543 +- 35200182 +- 40766156 +- 36070942 +- 34310589 +- 37232112 +- 34337667 +- 38446523 +- 34482529 +- 17297480 +- 39298995 +- 36830574 +- 1405150 + +And finally for devices, the subset is restricted to the following GeraetIDs: + +- 134074 +- 125922 +- 137160 +- 125916 +- 125981 +- 125981 +- 64174 +- 125921 +- 125981 +- 125981 +- 125981 +- 125981 +- 125981 +- 28609 +- 86293 +- 125981 +- 125981 +- 125981 +- 86293 +- 125981 +- 125981 +- 64174 +- 125981 +- 125981 +- 125981 +- 125981 +- 125981 +- 125974 +- 28609 +- 125981 + +------ diff --git a/spitalhygiene/Docs/build/html/_sources/Unused.rst.txt b/spitalhygiene/Docs/build/html/_sources/Unused.rst.txt new file mode 100644 index 0000000..bf77846 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_sources/Unused.rst.txt @@ -0,0 +1,6 @@ +************************ +``Unused`` folder +************************ + +This folder contains archived files and scripts no longer used in the current version of the VRE model. + diff --git a/spitalhygiene/Docs/build/html/_sources/VRE_Model.rst.txt b/spitalhygiene/Docs/build/html/_sources/VRE_Model.rst.txt new file mode 100644 index 0000000..84f8a6b --- /dev/null +++ b/spitalhygiene/Docs/build/html/_sources/VRE_Model.rst.txt @@ -0,0 +1,11 @@ +******************** +VRE Model Overview +******************** + +Welcome to the VRE Model Documentation! + +The VRE model is documented with `Sphinx` on a `per-folder` basis. The root folder for this documentation is the +``spitalhygiene`` folder of the ``vre-data-science`` repository on the Insel GitHub. + + + diff --git a/spitalhygiene/Docs/build/html/_sources/index.rst.txt b/spitalhygiene/Docs/build/html/_sources/index.rst.txt new file mode 100644 index 0000000..fa32ed8 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_sources/index.rst.txt @@ -0,0 +1,21 @@ +.. VRE Model documentation master file, created by + sphinx-quickstart on Tue Apr 23 15:53:29 2019. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to the VRE Model documentation! +======================================= + +.. toctree:: + :caption: Contents: + + VRE_Model.rst + resources.rst + sql.rst + Unused.rst + vre.rst + model.rst + Patient_Test_Data.rst + + + diff --git a/spitalhygiene/Docs/build/html/_sources/model.rst.txt b/spitalhygiene/Docs/build/html/_sources/model.rst.txt new file mode 100644 index 0000000..0d52e1f --- /dev/null +++ b/spitalhygiene/Docs/build/html/_sources/model.rst.txt @@ -0,0 +1,65 @@ +***************************************** +``vre/src/main/python/vre/model`` folder +***************************************** + +This folder contains the definitions for all classes used in the VRE model. + + +----- + +File: Appointment.py +=============================== + +.. automodule:: Appointment + :members: + + +File: Bed.py +=============================== + +.. automodule:: Bed + :members: + + +File: Care.py +=============================== + +.. automodule:: Care + :members: + + +File: Chop.py +=============================== + +.. automodule:: Chop + :members: + + +File: Devices.py +=============================== + +.. automodule:: Device + :members: + + +File: Employee.py +=============================== + +.. automodule:: Employee + :members: + + +File: ICD.py +=============================== + +.. automodule:: ICD + :members: + + +File: Medication.py +=============================== + +.. automodule:: Medication + :members: + + diff --git a/spitalhygiene/Docs/build/html/_sources/resources.rst.txt b/spitalhygiene/Docs/build/html/_sources/resources.rst.txt new file mode 100644 index 0000000..a0a0813 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_sources/resources.rst.txt @@ -0,0 +1,28 @@ +************************ +``resources`` folder +************************ + +This folder contains important functions for loading data from SQL into CSV, thereby preparing the "raw" data used for +building the actual network models. + +Most importantly, this folder also contains the file ``Update_Model.sh``, which is a bash script controlling `all steps` +in the VRE calculation. Since all VRE data are recalculated once per day, this includes (in order): + +1) Backing up data from the previous calculation cycle in the HDFS file system (from step 4 in the previous run) +2) Reloading data from SQL into CSV +3) Running the VRE analysis (all steps of the analysis are controlled with ``feature_extractor.py``) +4) Adding new data (SQL files, CSV files, other data) to the HDFS file system + + +File: Query_Atelier_Data.py +=============================== + +.. automodule:: Query_Atelier_Data + :members: + + +File: preprocesor.py +=============================== + +.. automodule:: preprocessor + :members: diff --git a/spitalhygiene/Docs/build/html/_sources/sql.rst.txt b/spitalhygiene/Docs/build/html/_sources/sql.rst.txt new file mode 100644 index 0000000..4ccc191 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_sources/sql.rst.txt @@ -0,0 +1,6 @@ +************************ +``sql`` folder +************************ + +This folder contains important functions for extracting SQL data. + diff --git a/spitalhygiene/Docs/build/html/_sources/vre.rst.txt b/spitalhygiene/Docs/build/html/_sources/vre.rst.txt new file mode 100644 index 0000000..15213bf --- /dev/null +++ b/spitalhygiene/Docs/build/html/_sources/vre.rst.txt @@ -0,0 +1,36 @@ +***************************************** +``vre/src/main/python/vre`` folder +***************************************** + +This folder contains all files relevant for building the actual VRE model. + + +----- + +File: data_compiler.py +=============================== + +.. automodule:: data_compiler + :members: + + +File: feature_extractor.py +=============================== + +.. automodule:: feature_extractor + :members: + + +File: HDFS_data_loader.py +=============================== + +.. automodule:: HDFS_data_loader + :members: + + +File: networkx_graph.py +=============================== + +.. automodule:: networkx_graph + :members: + diff --git a/spitalhygiene/Docs/build/html/_static/ajax-loader.gif b/spitalhygiene/Docs/build/html/_static/ajax-loader.gif new file mode 100644 index 0000000..61faf8c Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/ajax-loader.gif differ diff --git a/spitalhygiene/Docs/build/html/_static/basic.css b/spitalhygiene/Docs/build/html/_static/basic.css new file mode 100644 index 0000000..53acd09 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_static/basic.css @@ -0,0 +1,748 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a.brackets:before, +span.brackets > a:before{ + content: "["; +} + +a.brackets:after, +span.brackets > a:after { + content: "]"; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > p:first-child, +td > p:first-child { + margin-top: 0px; +} + +th > p:last-child, +td > p:last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist td { + vertical-align: top; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +li > p:first-child { + margin-top: 0px; +} + +li > p:last-child { + margin-bottom: 0px; +} + +dl.footnote > dt, +dl.citation > dt { + float: left; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +dl.field-list { + display: flex; + flex-wrap: wrap; +} + +dl.field-list > dt { + flex-basis: 20%; + font-weight: bold; + word-break: break-word; +} + +dl.field-list > dt:after { + content: ":"; +} + +dl.field-list > dd { + flex-basis: 70%; + padding-left: 1em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > p:first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0.5em; + content: ":"; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/_static/comment-bright.png b/spitalhygiene/Docs/build/html/_static/comment-bright.png new file mode 100644 index 0000000..15e27ed Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/comment-bright.png differ diff --git a/spitalhygiene/Docs/build/html/_static/comment-close.png b/spitalhygiene/Docs/build/html/_static/comment-close.png new file mode 100644 index 0000000..4d91bcf Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/comment-close.png differ diff --git a/spitalhygiene/Docs/build/html/_static/comment.png b/spitalhygiene/Docs/build/html/_static/comment.png new file mode 100644 index 0000000..dfbc0cb Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/comment.png differ diff --git a/spitalhygiene/Docs/build/html/_static/css/badge_only.css b/spitalhygiene/Docs/build/html/_static/css/badge_only.css new file mode 100644 index 0000000..3c33cef --- /dev/null +++ b/spitalhygiene/Docs/build/html/_static/css/badge_only.css @@ -0,0 +1 @@ +.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} diff --git a/spitalhygiene/Docs/build/html/_static/css/theme.css b/spitalhygiene/Docs/build/html/_static/css/theme.css new file mode 100644 index 0000000..aed8cef --- /dev/null +++ b/spitalhygiene/Docs/build/html/_static/css/theme.css @@ -0,0 +1,6 @@ +/* sphinx_rtd_theme version 0.4.3 | MIT license */ +/* Built 20190212 16:02 */ +*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}[hidden]{display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:hover,a:active{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;color:#000;text-decoration:none}mark{background:#ff0;color:#000;font-style:italic;font-weight:bold}pre,code,.rst-content tt,.rst-content code,kbd,samp{font-family:monospace,serif;_font-family:"courier new",monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:before,q:after{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}ul,ol,dl{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:0;margin:0;padding:0}label{cursor:pointer}legend{border:0;*margin-left:-7px;padding:0;white-space:normal}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*width:13px;*height:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top;resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{html,body,section{background:none !important}*{box-shadow:none !important;text-shadow:none !important;filter:none !important;-ms-filter:none !important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:.5cm}p,h2,.rst-content .toctree-wrapper p.caption,h3{orphans:3;widows:3}h2,.rst-content .toctree-wrapper p.caption,h3{page-break-after:avoid}}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content .code-block-caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition,.btn,input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"],select,textarea,.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a,.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a,.wy-nav-top a{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:'FontAwesome';src:url("../fonts/fontawesome-webfont.eot?v=4.7.0");src:url("../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff2?v=4.7.0") format("woff2"),url("../fonts/fontawesome-webfont.woff?v=4.7.0") format("woff"),url("../fonts/fontawesome-webfont.ttf?v=4.7.0") format("truetype"),url("../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular") format("svg");font-weight:normal;font-style:normal}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.3333333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.2857142857em;text-align:center}.fa-ul{padding-left:0;margin-left:2.1428571429em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.1428571429em;width:2.1428571429em;top:.1428571429em;text-align:center}.fa-li.fa-lg{left:-1.8571428571em}.fa-border{padding:.2em .25em .15em;border:solid 0.08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.wy-menu-vertical li span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.rst-content .fa-pull-left.admonition-title,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content dl dt .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.rst-content code.download span.fa-pull-left:first-child,.fa-pull-left.icon{margin-right:.3em}.fa.fa-pull-right,.wy-menu-vertical li span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.rst-content .fa-pull-right.admonition-title,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content dl dt .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.rst-content code.download span.fa-pull-right:first-child,.fa-pull-right.icon{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.wy-menu-vertical li span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.rst-content .pull-left.admonition-title,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content dl dt .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content .code-block-caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.rst-content code.download span.pull-left:first-child,.pull-left.icon{margin-right:.3em}.fa.pull-right,.wy-menu-vertical li span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.rst-content .pull-right.admonition-title,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content dl dt .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content .code-block-caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.rst-content code.download span.pull-right:first-child,.pull-right.icon{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-remove:before,.fa-close:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-gear:before,.fa-cog:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-rotate-right:before,.fa-repeat:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.rst-content .admonition-title:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-warning:before,.fa-exclamation-triangle:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-gears:before,.fa-cogs:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-save:before,.fa-floppy-o:before{content:""}.fa-square:before{content:""}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.wy-dropdown .caret:before,.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-unsorted:before,.fa-sort:before{content:""}.fa-sort-down:before,.fa-sort-desc:before{content:""}.fa-sort-up:before,.fa-sort-asc:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-legal:before,.fa-gavel:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-flash:before,.fa-bolt:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-paste:before,.fa-clipboard:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-unlink:before,.fa-chain-broken:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:""}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:""}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:""}.fa-euro:before,.fa-eur:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-rupee:before,.fa-inr:before{content:""}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:""}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:""}.fa-won:before,.fa-krw:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-turkish-lira:before,.fa-try:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-institution:before,.fa-bank:before,.fa-university:before{content:""}.fa-mortar-board:before,.fa-graduation-cap:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:""}.fa-file-zip-o:before,.fa-file-archive-o:before{content:""}.fa-file-sound-o:before,.fa-file-audio-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:""}.fa-ge:before,.fa-empire:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-send:before,.fa-paper-plane:before{content:""}.fa-send-o:before,.fa-paper-plane-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-hotel:before,.fa-bed:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-yc:before,.fa-y-combinator:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-tv:before,.fa-television:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:""}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-signing:before,.fa-sign-language:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-vcard:before,.fa-address-card:before{content:""}.fa-vcard-o:before,.fa-address-card-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context{font-family:inherit}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content .code-block-caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before{font-family:"FontAwesome";display:inline-block;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa,a .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,a .rst-content .admonition-title,.rst-content a .admonition-title,a .rst-content h1 .headerlink,.rst-content h1 a .headerlink,a .rst-content h2 .headerlink,.rst-content h2 a .headerlink,a .rst-content h3 .headerlink,.rst-content h3 a .headerlink,a .rst-content h4 .headerlink,.rst-content h4 a .headerlink,a .rst-content h5 .headerlink,.rst-content h5 a .headerlink,a .rst-content h6 .headerlink,.rst-content h6 a .headerlink,a .rst-content dl dt .headerlink,.rst-content dl dt a .headerlink,a .rst-content p.caption .headerlink,.rst-content p.caption a .headerlink,a .rst-content table>caption .headerlink,.rst-content table>caption a .headerlink,a .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption a .headerlink,a .rst-content tt.download span:first-child,.rst-content tt.download a span:first-child,a .rst-content code.download span:first-child,.rst-content code.download a span:first-child,a .icon{display:inline-block;text-decoration:inherit}.btn .fa,.btn .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .btn span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.btn .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.btn .rst-content .admonition-title,.rst-content .btn .admonition-title,.btn .rst-content h1 .headerlink,.rst-content h1 .btn .headerlink,.btn .rst-content h2 .headerlink,.rst-content h2 .btn .headerlink,.btn .rst-content h3 .headerlink,.rst-content h3 .btn .headerlink,.btn .rst-content h4 .headerlink,.rst-content h4 .btn .headerlink,.btn .rst-content h5 .headerlink,.rst-content h5 .btn .headerlink,.btn .rst-content h6 .headerlink,.rst-content h6 .btn .headerlink,.btn .rst-content dl dt .headerlink,.rst-content dl dt .btn .headerlink,.btn .rst-content p.caption .headerlink,.rst-content p.caption .btn .headerlink,.btn .rst-content table>caption .headerlink,.rst-content table>caption .btn .headerlink,.btn .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .btn .headerlink,.btn .rst-content tt.download span:first-child,.rst-content tt.download .btn span:first-child,.btn .rst-content code.download span:first-child,.rst-content code.download .btn span:first-child,.btn .icon,.nav .fa,.nav .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand,.nav .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.nav .rst-content .admonition-title,.rst-content .nav .admonition-title,.nav .rst-content h1 .headerlink,.rst-content h1 .nav .headerlink,.nav .rst-content h2 .headerlink,.rst-content h2 .nav .headerlink,.nav .rst-content h3 .headerlink,.rst-content h3 .nav .headerlink,.nav .rst-content h4 .headerlink,.rst-content h4 .nav .headerlink,.nav .rst-content h5 .headerlink,.rst-content h5 .nav .headerlink,.nav .rst-content h6 .headerlink,.rst-content h6 .nav .headerlink,.nav .rst-content dl dt .headerlink,.rst-content dl dt .nav .headerlink,.nav .rst-content p.caption .headerlink,.rst-content p.caption .nav .headerlink,.nav .rst-content table>caption .headerlink,.rst-content table>caption .nav .headerlink,.nav .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .nav .headerlink,.nav .rst-content tt.download span:first-child,.rst-content tt.download .nav span:first-child,.nav .rst-content code.download span:first-child,.rst-content code.download .nav span:first-child,.nav .icon{display:inline}.btn .fa.fa-large,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.btn .rst-content .fa-large.admonition-title,.rst-content .btn .fa-large.admonition-title,.btn .rst-content h1 .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.btn .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .btn .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.btn .rst-content .code-block-caption .fa-large.headerlink,.rst-content .code-block-caption .btn .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .btn span.fa-large:first-child,.btn .rst-content code.download span.fa-large:first-child,.rst-content code.download .btn span.fa-large:first-child,.btn .fa-large.icon,.nav .fa.fa-large,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand,.nav .rst-content .fa-large.admonition-title,.rst-content .nav .fa-large.admonition-title,.nav .rst-content h1 .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.nav .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.nav .rst-content .code-block-caption .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.nav .rst-content code.download span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.nav .fa-large.icon{line-height:.9em}.btn .fa.fa-spin,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.btn .rst-content .fa-spin.admonition-title,.rst-content .btn .fa-spin.admonition-title,.btn .rst-content h1 .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.btn .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .btn .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.btn .rst-content .code-block-caption .fa-spin.headerlink,.rst-content .code-block-caption .btn .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .btn span.fa-spin:first-child,.btn .rst-content code.download span.fa-spin:first-child,.rst-content code.download .btn span.fa-spin:first-child,.btn .fa-spin.icon,.nav .fa.fa-spin,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand,.nav .rst-content .fa-spin.admonition-title,.rst-content .nav .fa-spin.admonition-title,.nav .rst-content h1 .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.nav .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.nav .rst-content .code-block-caption .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.nav .rst-content code.download span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.nav .fa-spin.icon{display:inline-block}.btn.fa:before,.wy-menu-vertical li span.btn.toctree-expand:before,.rst-content .btn.admonition-title:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content dl dt .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.rst-content code.download span.btn:first-child:before,.btn.icon:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.rst-content code.download span.btn:first-child:hover:before,.btn.icon:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before,.btn-mini .rst-content .admonition-title:before,.rst-content .btn-mini .admonition-title:before,.btn-mini .rst-content h1 .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.btn-mini .rst-content dl dt .headerlink:before,.rst-content dl dt .btn-mini .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.rst-content tt.download .btn-mini span:first-child:before,.btn-mini .rst-content code.download span:first-child:before,.rst-content code.download .btn-mini span:first-child:before,.btn-mini .icon:before{font-size:14px;vertical-align:-15%}.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.wy-alert-title,.rst-content .admonition-title{color:#fff;font-weight:bold;display:block;color:#fff;background:#6ab0de;margin:-12px;padding:6px 12px;margin-bottom:12px}.wy-alert.wy-alert-danger,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.admonition{background:#fdf3f2}.wy-alert.wy-alert-danger .wy-alert-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .danger .wy-alert-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .danger .admonition-title,.rst-content .error .admonition-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition .admonition-title{background:#f29f97}.wy-alert.wy-alert-warning,.rst-content .wy-alert-warning.note,.rst-content .attention,.rst-content .caution,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.tip,.rst-content .warning,.rst-content .wy-alert-warning.seealso,.rst-content .admonition-todo,.rst-content .wy-alert-warning.admonition{background:#ffedcc}.wy-alert.wy-alert-warning .wy-alert-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .attention .wy-alert-title,.rst-content .caution .wy-alert-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .attention .admonition-title,.rst-content .caution .admonition-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .warning .admonition-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .admonition-todo .admonition-title,.rst-content .wy-alert-warning.admonition .admonition-title{background:#f0b37e}.wy-alert.wy-alert-info,.rst-content .note,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.rst-content .seealso,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.admonition{background:#e7f2fa}.wy-alert.wy-alert-info .wy-alert-title,.rst-content .note .wy-alert-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.rst-content .note .admonition-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .seealso .admonition-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition .admonition-title{background:#6ab0de}.wy-alert.wy-alert-success,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.warning,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.admonition{background:#dbfaf4}.wy-alert.wy-alert-success .wy-alert-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .hint .wy-alert-title,.rst-content .important .wy-alert-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .hint .admonition-title,.rst-content .important .admonition-title,.rst-content .tip .admonition-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition .admonition-title{background:#1abc9c}.wy-alert.wy-alert-neutral,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.admonition{background:#f3f6f6}.wy-alert.wy-alert-neutral .wy-alert-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition .admonition-title{color:#404040;background:#e1e4e5}.wy-alert.wy-alert-neutral a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a{color:#2980B9}.wy-alert p:last-child,.rst-content .note p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.rst-content .seealso p:last-child,.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0px;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,0.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27AE60}.wy-tray-container li.wy-tray-item-info{background:#2980B9}.wy-tray-container li.wy-tray-item-warning{background:#E67E22}.wy-tray-container li.wy-tray-item-danger{background:#E74C3C}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width: 768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px 12px;color:#fff;border:1px solid rgba(0,0,0,0.1);background-color:#27AE60;text-decoration:none;font-weight:normal;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:0px 1px 2px -1px rgba(255,255,255,0.5) inset,0px -2px 0px 0px rgba(0,0,0,0.1) inset;outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:0px -1px 0px 0px rgba(0,0,0,0.05) inset,0px 2px 0px 0px rgba(0,0,0,0.1) inset;padding:8px 12px 6px 12px}.btn:visited{color:#fff}.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled:hover,.btn-disabled:focus,.btn-disabled:active{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980B9 !important}.btn-info:hover{background-color:#2e8ece !important}.btn-neutral{background-color:#f3f6f6 !important;color:#404040 !important}.btn-neutral:hover{background-color:#e5ebeb !important;color:#404040}.btn-neutral:visited{color:#404040 !important}.btn-success{background-color:#27AE60 !important}.btn-success:hover{background-color:#295 !important}.btn-danger{background-color:#E74C3C !important}.btn-danger:hover{background-color:#ea6153 !important}.btn-warning{background-color:#E67E22 !important}.btn-warning:hover{background-color:#e98b39 !important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f !important}.btn-link{background-color:transparent !important;color:#2980B9;box-shadow:none;border-color:transparent !important}.btn-link:hover{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:active{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:visited{color:#9B59B6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:before,.wy-btn-group:after{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:solid 1px #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,0.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980B9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:solid 1px #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type="search"]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980B9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned input,.wy-form-aligned textarea,.wy-form-aligned select,.wy-form-aligned .wy-help-inline,.wy-form-aligned label{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{border:0;margin:0;padding:0}legend{display:block;width:100%;border:0;padding:0;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label{display:block;margin:0 0 .3125em 0;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;*zoom:1;max-width:68em;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#E74C3C}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full input[type="text"],.wy-control-group .wy-form-full input[type="password"],.wy-control-group .wy-form-full input[type="email"],.wy-control-group .wy-form-full input[type="url"],.wy-control-group .wy-form-full input[type="date"],.wy-control-group .wy-form-full input[type="month"],.wy-control-group .wy-form-full input[type="time"],.wy-control-group .wy-form-full input[type="datetime"],.wy-control-group .wy-form-full input[type="datetime-local"],.wy-control-group .wy-form-full input[type="week"],.wy-control-group .wy-form-full input[type="number"],.wy-control-group .wy-form-full input[type="search"],.wy-control-group .wy-form-full input[type="tel"],.wy-control-group .wy-form-full input[type="color"],.wy-control-group .wy-form-halves input[type="text"],.wy-control-group .wy-form-halves input[type="password"],.wy-control-group .wy-form-halves input[type="email"],.wy-control-group .wy-form-halves input[type="url"],.wy-control-group .wy-form-halves input[type="date"],.wy-control-group .wy-form-halves input[type="month"],.wy-control-group .wy-form-halves input[type="time"],.wy-control-group .wy-form-halves input[type="datetime"],.wy-control-group .wy-form-halves input[type="datetime-local"],.wy-control-group .wy-form-halves input[type="week"],.wy-control-group .wy-form-halves input[type="number"],.wy-control-group .wy-form-halves input[type="search"],.wy-control-group .wy-form-halves input[type="tel"],.wy-control-group .wy-form-halves input[type="color"],.wy-control-group .wy-form-thirds input[type="text"],.wy-control-group .wy-form-thirds input[type="password"],.wy-control-group .wy-form-thirds input[type="email"],.wy-control-group .wy-form-thirds input[type="url"],.wy-control-group .wy-form-thirds input[type="date"],.wy-control-group .wy-form-thirds input[type="month"],.wy-control-group .wy-form-thirds input[type="time"],.wy-control-group .wy-form-thirds input[type="datetime"],.wy-control-group .wy-form-thirds input[type="datetime-local"],.wy-control-group .wy-form-thirds input[type="week"],.wy-control-group .wy-form-thirds input[type="number"],.wy-control-group .wy-form-thirds input[type="search"],.wy-control-group .wy-form-thirds input[type="tel"],.wy-control-group .wy-form-thirds input[type="color"]{width:100%}.wy-control-group .wy-form-full{float:left;display:block;margin-right:2.3576515979%;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.3576515979%;width:48.821174201%}.wy-control-group .wy-form-halves:last-child{margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n+1){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.3576515979%;width:31.7615656014%}.wy-control-group .wy-form-thirds:last-child{margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control{margin:6px 0 0 0;font-size:90%}.wy-control-no-input{display:inline-block;margin:6px 0 0 0;font-size:90%}.wy-control-group.fluid-input input[type="text"],.wy-control-group.fluid-input input[type="password"],.wy-control-group.fluid-input input[type="email"],.wy-control-group.fluid-input input[type="url"],.wy-control-group.fluid-input input[type="date"],.wy-control-group.fluid-input input[type="month"],.wy-control-group.fluid-input input[type="time"],.wy-control-group.fluid-input input[type="datetime"],.wy-control-group.fluid-input input[type="datetime-local"],.wy-control-group.fluid-input input[type="week"],.wy-control-group.fluid-input input[type="number"],.wy-control-group.fluid-input input[type="search"],.wy-control-group.fluid-input input[type="tel"],.wy-control-group.fluid-input input[type="color"]{width:100%}.wy-form-message-inline{display:inline-block;padding-left:.3em;color:#666;vertical-align:middle;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;*overflow:visible}input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type="datetime-local"]{padding:.34375em .625em}input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}input[type="text"]:focus,input[type="password"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus{outline:0;outline:thin dotted \9;border-color:#333}input.no-focus:focus{border-color:#ccc !important}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:1px auto #129FEA}input[type="text"][disabled],input[type="password"][disabled],input[type="email"][disabled],input[type="url"][disabled],input[type="date"][disabled],input[type="month"][disabled],input[type="time"][disabled],input[type="datetime"][disabled],input[type="datetime-local"][disabled],input[type="week"][disabled],input[type="number"][disabled],input[type="search"][disabled],input[type="tel"][disabled],input[type="color"][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#E74C3C;border:1px solid #E74C3C}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#E74C3C}input[type="file"]:focus:invalid:focus,input[type="radio"]:focus:invalid:focus,input[type="checkbox"]:focus:invalid:focus{outline-color:#E74C3C}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type="radio"][disabled],input[type="checkbox"][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:solid 1px #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{position:absolute;content:"";display:block;left:0;top:0;width:36px;height:12px;border-radius:4px;background:#ccc;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{position:absolute;content:"";display:block;width:18px;height:18px;border-radius:4px;background:#999;left:-3px;top:-3px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27AE60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#E74C3C}.wy-control-group.wy-control-group-error input[type="text"],.wy-control-group.wy-control-group-error input[type="password"],.wy-control-group.wy-control-group-error input[type="email"],.wy-control-group.wy-control-group-error input[type="url"],.wy-control-group.wy-control-group-error input[type="date"],.wy-control-group.wy-control-group-error input[type="month"],.wy-control-group.wy-control-group-error input[type="time"],.wy-control-group.wy-control-group-error input[type="datetime"],.wy-control-group.wy-control-group-error input[type="datetime-local"],.wy-control-group.wy-control-group-error input[type="week"],.wy-control-group.wy-control-group-error input[type="number"],.wy-control-group.wy-control-group-error input[type="search"],.wy-control-group.wy-control-group-error input[type="tel"],.wy-control-group.wy-control-group-error input[type="color"]{border:solid 1px #E74C3C}.wy-control-group.wy-control-group-error textarea{border:solid 1px #E74C3C}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27AE60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#E74C3C}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#E67E22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980B9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width: 480px){.wy-form button[type="submit"]{margin:.7em 0 0}.wy-form input[type="text"],.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:.3em;display:block}.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0 0}.wy-form .wy-help-inline,.wy-form-message-inline,.wy-form-message{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width: 768px){.tablet-hide{display:none}}@media screen and (max-width: 480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.wy-table,.rst-content table.docutils,.rst-content table.field-list{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.wy-table caption,.rst-content table.docutils caption,.rst-content table.field-list caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td,.wy-table th,.rst-content table.docutils th,.rst-content table.field-list th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.wy-table td:first-child,.rst-content table.docutils td:first-child,.rst-content table.field-list td:first-child,.wy-table th:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list th:first-child{border-left-width:0}.wy-table thead,.rst-content table.docutils thead,.rst-content table.field-list thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.wy-table thead th,.rst-content table.docutils thead th,.rst-content table.field-list thead th{font-weight:bold;border-bottom:solid 2px #e1e4e5}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td{background-color:transparent;vertical-align:middle}.wy-table td p,.rst-content table.docutils td p,.rst-content table.field-list td p{line-height:18px}.wy-table td p:last-child,.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child{margin-bottom:0}.wy-table .wy-table-cell-min,.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min{width:1%;padding-right:0}.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:gray;font-size:90%}.wy-table-tertiary{color:gray;font-size:80%}.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td,.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td{background-color:#f3f6f6}.wy-table-backed{background-color:#f3f6f6}.wy-table-bordered-all,.rst-content table.docutils{border:1px solid #e1e4e5}.wy-table-bordered-all td,.rst-content table.docutils td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.wy-table-bordered-all tbody>tr:last-child td,.rst-content table.docutils tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px 0;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0 !important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980B9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9B59B6}html{height:100%;overflow-x:hidden}body{font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;font-weight:normal;color:#404040;min-height:100%;overflow-x:hidden;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#E67E22 !important}a.wy-text-warning:hover{color:#eb9950 !important}.wy-text-info{color:#2980B9 !important}a.wy-text-info:hover{color:#409ad5 !important}.wy-text-success{color:#27AE60 !important}a.wy-text-success:hover{color:#36d278 !important}.wy-text-danger{color:#E74C3C !important}a.wy-text-danger:hover{color:#ed7669 !important}.wy-text-neutral{color:#404040 !important}a.wy-text-neutral:hover{color:#595959 !important}h1,h2,.rst-content .toctree-wrapper p.caption,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif}p{line-height:24px;margin:0;font-size:16px;margin-bottom:24px}h1{font-size:175%}h2,.rst-content .toctree-wrapper p.caption{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}code,.rst-content tt,.rst-content code{white-space:nowrap;max-width:100%;background:#fff;border:solid 1px #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;color:#E74C3C;overflow-x:auto}code.code-large,.rst-content tt.code-large{font-size:90%}.wy-plain-list-disc,.rst-content .section ul,.rst-content .toctree-wrapper ul,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.wy-plain-list-disc li,.rst-content .section ul li,.rst-content .toctree-wrapper ul li,article ul li{list-style:disc;margin-left:24px}.wy-plain-list-disc li p:last-child,.rst-content .section ul li p:last-child,.rst-content .toctree-wrapper ul li p:last-child,article ul li p:last-child{margin-bottom:0}.wy-plain-list-disc li ul,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li ul,article ul li ul{margin-bottom:0}.wy-plain-list-disc li li,.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,article ul li li{list-style:circle}.wy-plain-list-disc li li li,.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,article ul li li li{list-style:square}.wy-plain-list-disc li ol li,.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,article ul li ol li{list-style:decimal}.wy-plain-list-decimal,.rst-content .section ol,.rst-content ol.arabic,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.wy-plain-list-decimal li,.rst-content .section ol li,.rst-content ol.arabic li,article ol li{list-style:decimal;margin-left:24px}.wy-plain-list-decimal li p:last-child,.rst-content .section ol li p:last-child,.rst-content ol.arabic li p:last-child,article ol li p:last-child{margin-bottom:0}.wy-plain-list-decimal li ul,.rst-content .section ol li ul,.rst-content ol.arabic li ul,article ol li ul{margin-bottom:0}.wy-plain-list-decimal li ul li,.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:before,.wy-breadcrumbs:after{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.wy-breadcrumbs li code,.wy-breadcrumbs li .rst-content tt,.rst-content .wy-breadcrumbs li tt{padding:5px;border:none;background:none}.wy-breadcrumbs li code.literal,.wy-breadcrumbs li .rst-content tt.literal,.rst-content .wy-breadcrumbs li tt.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width: 480px){.wy-breadcrumbs-extra{display:none}.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:before,.wy-menu-horiz:after{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz ul,.wy-menu-horiz li{display:inline-block}.wy-menu-horiz li:hover{background:rgba(255,255,255,0.1)}.wy-menu-horiz li.divide-left{border-left:solid 1px #404040}.wy-menu-horiz li.divide-right{border-right:solid 1px #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#3a7ca8;height:32px;display:inline-block;line-height:32px;padding:0 1.618em;margin:12px 0 0 0;display:block;font-weight:bold;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:solid 1px #404040}.wy-menu-vertical li.divide-bottom{border-bottom:solid 1px #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:gray;border-right:solid 1px #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.wy-menu-vertical li code,.wy-menu-vertical li .rst-content tt,.rst-content .wy-menu-vertical li tt{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a{color:#404040;padding:.4045em 1.618em;font-weight:bold;position:relative;background:#fcfcfc;border:none;padding-left:1.618em -4px}.wy-menu-vertical li.on a:hover,.wy-menu-vertical li.current>a:hover{background:#fcfcfc}.wy-menu-vertical li.on a:hover span.toctree-expand,.wy-menu-vertical li.current>a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:solid 1px #c9c9c9;border-top:solid 1px #c9c9c9}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a{color:#404040}.wy-menu-vertical li.toctree-l1.current li.toctree-l2>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>ul{display:none}.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3.current>ul{display:block}.wy-menu-vertical li.toctree-l2.current>a{background:#c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{display:block;background:#c9c9c9;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3{font-size:.9em}.wy-menu-vertical li.toctree-l3.current>a{background:#bdbdbd;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{display:block;background:#bdbdbd;padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:normal}.wy-menu-vertical a{display:inline-block;line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980B9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980B9;text-align:center;padding:.809em;display:block;color:#fcfcfc;margin-bottom:.809em}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em auto;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a{color:#fcfcfc;font-size:100%;font-weight:bold;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search>a:hover,.wy-side-nav-search .wy-dropdown>a:hover{background:rgba(255,255,255,0.1)}.wy-side-nav-search>a img.logo,.wy-side-nav-search .wy-dropdown>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search>a.icon img.logo,.wy-side-nav-search .wy-dropdown>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:normal;color:rgba(255,255,255,0.3)}.wy-nav .wy-menu-vertical header{color:#2980B9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980B9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980B9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:before,.wy-nav-top:after{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:bold}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,0.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:gray}footer p{margin-bottom:12px}footer span.commit code,footer span.commit .rst-content tt,.rst-content footer span.commit tt{padding:0px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:1em;background:none;border:none;color:gray}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:before,.rst-footer-buttons:after{width:100%}.rst-footer-buttons:before,.rst-footer-buttons:after{display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:before,.rst-breadcrumbs-buttons:after{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:solid 1px #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:solid 1px #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:gray;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width: 768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-side-scroll{width:auto}.wy-side-nav-search{width:auto}.wy-menu.wy-menu-vertical{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width: 1100px){.wy-nav-content-wrap{background:rgba(0,0,0,0.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,footer,.wy-nav-side{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .icon{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0px}.rst-content div.figure.align-center{text-align:center}.rst-content .section>img,.rst-content .section>a>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;display:block;overflow:auto}.rst-content pre.literal-block,.rst-content div[class^='highlight']{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px 0}.rst-content pre.literal-block div[class^='highlight'],.rst-content div[class^='highlight'] div[class^='highlight']{padding:0px;border:none;margin:0}.rst-content div[class^='highlight'] td.code{width:100%}.rst-content .linenodiv pre{border-right:solid 1px #e6e9ea;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^='highlight'] pre{white-space:pre;margin:0;padding:12px 12px;display:block;overflow:auto}.rst-content div[class^='highlight'] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content pre.literal-block,.rst-content div[class^='highlight'] pre,.rst-content .linenodiv pre{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:12px;line-height:1.4}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^='highlight'],.rst-content div[class^='highlight'] pre{white-space:pre-wrap}}.rst-content .note .last,.rst-content .attention .last,.rst-content .caution .last,.rst-content .danger .last,.rst-content .error .last,.rst-content .hint .last,.rst-content .important .last,.rst-content .tip .last,.rst-content .warning .last,.rst-content .seealso .last,.rst-content .admonition-todo .last,.rst-content .admonition .last{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,0.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent !important;border-color:rgba(0,0,0,0.1) !important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha li{list-style:upper-alpha}.rst-content .section ol p,.rst-content .section ul p{margin-bottom:12px}.rst-content .section ol p:last-child,.rst-content .section ul p:last-child{margin-bottom:24px}.rst-content .line-block{margin-left:0px;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0px}.rst-content .topic-title{font-weight:bold;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0px 0px 24px 24px}.rst-content .align-left{float:left;margin:0px 24px 24px 0px}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content .toctree-wrapper p.caption .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink{visibility:hidden;font-size:14px}.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content .toctree-wrapper p.caption .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after,.rst-content .code-block-caption .headerlink:after{content:"";font-family:FontAwesome}.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content .toctree-wrapper p.caption:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after,.rst-content .code-block-caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:solid 1px #e1e4e5}.rst-content .sidebar p,.rst-content .sidebar ul,.rst-content .sidebar dl{font-size:90%}.rst-content .sidebar .last{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif;font-weight:bold;background:#e1e4e5;padding:6px 12px;margin:-24px;margin-bottom:24px;font-size:100%}.rst-content .highlighted{background:#F1C40F;display:inline-block;font-weight:bold;padding:0 6px}.rst-content .footnote-reference,.rst-content .citation-reference{vertical-align:baseline;position:relative;top:-0.4em;line-height:0;font-size:90%}.rst-content table.docutils.citation,.rst-content table.docutils.footnote{background:none;border:none;color:gray}.rst-content table.docutils.citation td,.rst-content table.docutils.citation tr,.rst-content table.docutils.footnote td,.rst-content table.docutils.footnote tr{border:none;background-color:transparent !important;white-space:normal}.rst-content table.docutils.citation td.label,.rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}.rst-content table.docutils.citation tt,.rst-content table.docutils.citation code,.rst-content table.docutils.footnote tt,.rst-content table.docutils.footnote code{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}.rst-content table.docutils td .last,.rst-content table.docutils td .last :last-child{margin-bottom:0}.rst-content table.field-list{border:none}.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content tt,.rst-content tt,.rst-content code{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;padding:2px 5px}.rst-content tt big,.rst-content tt em,.rst-content tt big,.rst-content code big,.rst-content tt em,.rst-content code em{font-size:100% !important;line-height:normal}.rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal{color:#E74C3C}.rst-content tt.xref,a .rst-content tt,.rst-content tt.xref,.rst-content code.xref,a .rst-content tt,a .rst-content code{font-weight:bold;color:#404040}.rst-content pre,.rst-content kbd,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace}.rst-content a tt,.rst-content a tt,.rst-content a code{color:#2980B9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:bold;margin-bottom:12px}.rst-content dl p,.rst-content dl table,.rst-content dl ul,.rst-content dl ol{margin-bottom:12px !important}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl:not(.docutils){margin-bottom:24px}.rst-content dl:not(.docutils) dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980B9;border-top:solid 3px #6ab0de;padding:6px;position:relative}.rst-content dl:not(.docutils) dt:before{color:#6ab0de}.rst-content dl:not(.docutils) dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dl dt{margin-bottom:6px;border:none;border-left:solid 3px #ccc;background:#f0f0f0;color:#555}.rst-content dl:not(.docutils) dl dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dt:first-child{margin-top:0}.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) code{font-weight:bold}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) code.descclassname{background-color:transparent;border:none;padding:0;font-size:100% !important}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname{font-weight:bold}.rst-content dl:not(.docutils) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:bold}.rst-content dl:not(.docutils) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-link,.rst-content .viewcode-back{display:inline-block;color:#27AE60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:bold}.rst-content tt.download,.rst-content code.download{background:inherit;padding:inherit;font-weight:normal;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content tt.download span:first-child,.rst-content code.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width: 480px){.rst-content .sidebar{width:100%}}span[id*='MathJax-Span']{color:#404040}.math{text-align:center}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-regular.eot");src:url("../fonts/Lato/lato-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-regular.woff2") format("woff2"),url("../fonts/Lato/lato-regular.woff") format("woff"),url("../fonts/Lato/lato-regular.ttf") format("truetype");font-weight:400;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bold.eot");src:url("../fonts/Lato/lato-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bold.woff2") format("woff2"),url("../fonts/Lato/lato-bold.woff") format("woff"),url("../fonts/Lato/lato-bold.ttf") format("truetype");font-weight:700;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bolditalic.eot");src:url("../fonts/Lato/lato-bolditalic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bolditalic.woff2") format("woff2"),url("../fonts/Lato/lato-bolditalic.woff") format("woff"),url("../fonts/Lato/lato-bolditalic.ttf") format("truetype");font-weight:700;font-style:italic}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-italic.eot");src:url("../fonts/Lato/lato-italic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-italic.woff2") format("woff2"),url("../fonts/Lato/lato-italic.woff") format("woff"),url("../fonts/Lato/lato-italic.ttf") format("truetype");font-weight:400;font-style:italic}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:400;src:url("../fonts/RobotoSlab/roboto-slab.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.ttf") format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:700;src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.ttf") format("truetype")} diff --git a/spitalhygiene/Docs/build/html/_static/doctools.js b/spitalhygiene/Docs/build/html/_static/doctools.js new file mode 100644 index 0000000..b33f87f --- /dev/null +++ b/spitalhygiene/Docs/build/html/_static/doctools.js @@ -0,0 +1,314 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { + this.initOnKeyListeners(); + } + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/spitalhygiene/Docs/build/html/_static/documentation_options.js b/spitalhygiene/Docs/build/html/_static/documentation_options.js new file mode 100644 index 0000000..4f9bc45 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_static/documentation_options.js @@ -0,0 +1,10 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '0.0.1', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false +}; \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/_static/down-pressed.png b/spitalhygiene/Docs/build/html/_static/down-pressed.png new file mode 100644 index 0000000..5756c8c Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/down-pressed.png differ diff --git a/spitalhygiene/Docs/build/html/_static/down.png b/spitalhygiene/Docs/build/html/_static/down.png new file mode 100644 index 0000000..1b3bdad Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/down.png differ diff --git a/spitalhygiene/Docs/build/html/_static/file.png b/spitalhygiene/Docs/build/html/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/file.png differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata-Bold.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata-Bold.ttf new file mode 100644 index 0000000..809c1f5 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata-Bold.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata-Regular.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata-Regular.ttf new file mode 100644 index 0000000..fc981ce Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata-Regular.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata.ttf new file mode 100644 index 0000000..4b8a36d Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Inconsolata.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato-Bold.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Lato-Bold.ttf new file mode 100644 index 0000000..1d23c70 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato-Bold.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato-Regular.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Lato-Regular.ttf new file mode 100644 index 0000000..0f3d0f8 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato-Regular.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.eot b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.eot new file mode 100644 index 0000000..3361183 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.eot differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.ttf new file mode 100644 index 0000000..29f691d Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.woff b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.woff new file mode 100644 index 0000000..c6dff51 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.woff differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.woff2 b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.woff2 new file mode 100644 index 0000000..bb19504 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bold.woff2 differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.eot b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.eot new file mode 100644 index 0000000..3d41549 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.eot differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.ttf new file mode 100644 index 0000000..f402040 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.woff b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.woff new file mode 100644 index 0000000..88ad05b Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.woff differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.woff2 b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.woff2 new file mode 100644 index 0000000..c4e3d80 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-bolditalic.woff2 differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.eot b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.eot new file mode 100644 index 0000000..3f82642 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.eot differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.ttf new file mode 100644 index 0000000..b4bfc9b Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.woff b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.woff new file mode 100644 index 0000000..76114bc Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.woff differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.woff2 b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.woff2 new file mode 100644 index 0000000..3404f37 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-italic.woff2 differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.eot b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.eot new file mode 100644 index 0000000..11e3f2a Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.eot differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.ttf b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.ttf new file mode 100644 index 0000000..74decd9 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.woff b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.woff new file mode 100644 index 0000000..ae1307f Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.woff differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.woff2 b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.woff2 new file mode 100644 index 0000000..3bf9843 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/Lato/lato-regular.woff2 differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab-Bold.ttf b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab-Bold.ttf new file mode 100644 index 0000000..df5d1df Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab-Bold.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab-Regular.ttf b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab-Regular.ttf new file mode 100644 index 0000000..eb52a79 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab-Regular.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot new file mode 100644 index 0000000..79dc8ef Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf new file mode 100644 index 0000000..df5d1df Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff new file mode 100644 index 0000000..6cb6000 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 new file mode 100644 index 0000000..7059e23 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot new file mode 100644 index 0000000..2f7ca78 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf new file mode 100644 index 0000000..eb52a79 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff new file mode 100644 index 0000000..f815f63 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 new file mode 100644 index 0000000..f2c76e5 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.eot b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000..e9f60ca Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.eot differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.svg b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000..855c845 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.ttf b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000..35acda2 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.ttf differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.woff b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000..400014a Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.woff differ diff --git a/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.woff2 b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.woff2 new file mode 100644 index 0000000..4d13fc6 Binary files /dev/null and b/spitalhygiene/Docs/build/html/_static/fonts/fontawesome-webfont.woff2 differ diff --git a/spitalhygiene/Docs/build/html/_static/jquery-3.2.1.js b/spitalhygiene/Docs/build/html/_static/jquery-3.2.1.js new file mode 100644 index 0000000..d2d8ca4 --- /dev/null +++ b/spitalhygiene/Docs/build/html/_static/jquery-3.2.1.js @@ -0,0 +1,10253 @@ +/*! + * jQuery JavaScript Library v3.2.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2017-03-20T18:59Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.2.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Simple selector that can be filtered directly, removing non-Elements + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + // Complex selector, compare the two sets, removing non-Elements + qualifier = jQuery.filter( qualifier, elements ); + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; + } ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( nodeName( elem, "iframe" ) ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( jQuery.isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( jQuery.isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ jQuery.camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ jQuery.camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( jQuery.camelCase ); + } else { + key = jQuery.camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, + scale = 1, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + do { + + // If previous iteration zeroed out, double until we get *something*. + // Use string for doubling so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + initialInUnit = initialInUnit / scale; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // Break the loop if scale is unchanged or perfect, or if we've just had enough. + } while ( + scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations + ); + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: jQuery.isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( ">tbody", elem )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + + if ( match ) { + elem.type = match[ 1 ]; + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( isFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rmargin = ( /^margin/ ); + +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + div.style.cssText = + "box-sizing:border-box;" + + "position:relative;display:block;" + + "margin:auto;border:1px;padding:1px;" + + "top:1%;width:50%"; + div.innerHTML = ""; + documentElement.appendChild( container ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = divStyle.marginLeft === "2px"; + boxSizingReliableVal = divStyle.width === "4px"; + + // Support: Android 4.0 - 4.3 only + // Some styles come back with percentage values, even though they shouldn't + div.style.marginRight = "50%"; + pixelMarginRightVal = divStyle.marginRight === "4px"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + + "padding:0;margin-top:1px;position:absolute"; + container.appendChild( div ); + + jQuery.extend( support, { + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelMarginRight: function() { + computeStyleTests(); + return pixelMarginRightVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a property mapped along what jQuery.cssProps suggests or to +// a vendor prefixed property. +function finalPropName( name ) { + var ret = jQuery.cssProps[ name ]; + if ( !ret ) { + ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; + } + return ret; +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { + var i, + val = 0; + + // If we already have the right measurement, avoid augmentation + if ( extra === ( isBorderBox ? "border" : "content" ) ) { + i = 4; + + // Otherwise initialize for horizontal or vertical properties + } else { + i = name === "width" ? 1 : 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); + } + + if ( isBorderBox ) { + + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // At this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } else { + + // At this point, extra isn't content, so add padding + val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // At this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with computed style + var valueIsBorderBox, + styles = getStyles( elem ), + val = curCSS( elem, name, styles ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test( val ) ) { + return val; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && + ( support.boxSizingReliable() || val === elem.style[ name ] ); + + // Fall back to offsetWidth/Height when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + if ( val === "auto" ) { + val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; + } + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + + // Use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + "float": "cssFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + } ) : + getWidthOrHeight( elem, name, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = extra && getStyles( elem ), + subtract = extra && augmentWidthOrHeight( + elem, + name, + extra, + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + styles + ); + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ name ] = value; + value = jQuery.css( elem, name ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = jQuery.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 13 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( jQuery.isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + jQuery.proxy( result.stop, result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value; + + if ( typeof stateVal === "boolean" && type === "string" ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( jQuery.isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( type === "string" ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = value.match( rnothtmlwhite ) || []; + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, isFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; +} ); + +jQuery.fn.extend( { + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +} ); + + + + +support.focusin = "onfocusin" in window; + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = jQuery.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = jQuery.isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( jQuery.isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match == null ? null : match; + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 13 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available, append data to url + if ( s.data ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + "throws": true + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( jQuery.isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Index
  • + + +
  • + + + +
  • + +
+ + +
+
+
+
+ + +

Index

+ +
+ A + | B + | C + | D + | E + | F + | G + | H + | I + | L + | M + | N + | P + | Q + | R + | S + | T + | U + | W + +
+

A

+ + + +
+ +

B

+ + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + +
+ +

G

+ + + +
+ +

H

+ + +
+ +

I

+ + + +
+ +

L

+ + +
+ +

M

+ + +
+ +

N

+ + + +
+ +

P

+ + + +
+ +

Q

+ + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + +
+ +

U

+ + + +
+ +

W

+ + + +
+ + + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/index.html b/spitalhygiene/Docs/build/html/index.html new file mode 100644 index 0000000..65b6dd3 --- /dev/null +++ b/spitalhygiene/Docs/build/html/index.html @@ -0,0 +1,239 @@ + + + + + + + + + + + Welcome to the VRE Model documentation! — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/model.html b/spitalhygiene/Docs/build/html/model.html new file mode 100644 index 0000000..82a1041 --- /dev/null +++ b/spitalhygiene/Docs/build/html/model.html @@ -0,0 +1,1052 @@ + + + + + + + + + + + vre/src/main/python/vre/model folder — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

vre/src/main/python/vre/model folder

+

This folder contains the definitions for all classes used in the VRE model.

+
+
+

File: Appointment.py

+

This script contains the Appointment class used in the VRE model.

+
+
+
+class Appointment.Appointment(termin_id, is_deleted, termin_bezeichnung, termin_art, termin_typ, termin_datum, dauer_in_min)
+

Models an appointment from RAP.

+
+
+static add_appointment_to_case(lines, cases, appointments)
+

Adds Appointment() objects to the SAP cases based on lines read from a csv file.

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table in the Atelier_DataScience is called V_DH_FACT_TERMINPATIENT and structured as follows:

+ +++++ + + + + + + + + + + + + + + + + +

TERMINID

PATIENTID

FALLID

35672314

00008210020

0005660334

17255155

00002042800

0004017880

+
+
Parameters
+
    +
  • lines (iterator() object) – csv iterator from which data will be read

  • +
  • cases (dict) –

    Dictionary mapping case ids to Case() objects

    +

    –> { "0003536421" : Case(), "0003473241" : Case(), ...}

    +

  • +
  • appointments (dict) –

    Dictionary mapping appointment ids to Appointment() objects

    +

    –> { '36830543' : Appointment(), ... }

    +

  • +
+
+
+
+ +
+
+add_device(device)
+

Adds a device to the self.devices() list of this appointment.

+
+
Parameters
+

device (Device() Object) – Device() object to append to this appointment.

+
+
+
+ +
+
+add_employee(employee)
+

Adds an employee to the self.employees() list of this appointment.

+
+
Parameters
+

employee (Employee() Object) – Employee() object to append to this appointment.

+
+
+
+ +
+
+add_room(room)
+

Adds a room to the self.rooms() list of this appointment.

+
+
Parameters
+

room (Room() Object) – Room() object to append to this appointment.

+
+
+
+ +
+
+static create_termin_map(lines)
+

Loads the appointments from a csv reader instance.

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table in the Atelier_DataScience is named V_DH_DIM_TERMIN_CUR and structured as follows:

+ +++++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

TERMINID

IS_DELETED

TERMINBEZEICHNUNG

TERMINART

TERMINTYP

TERMINDATUM

DAUERINMIN

957219

0

K90 HINF

K90 HINF

Patiententermin

2005-02-04 00:00:00.0000

90.00000

957224

0

Konsultation 15’

Konsultation 15’

Patiententermin

2005-02-03 00:00:00.0000

15.00000

+
+
Parameters
+

lines (iterator() object) – csv iterator from which data will be read

+
+
Returns
+

Dictionary mapping appointment ids to Appointment() objects

+

–> { '36830543' : Appointment(), ... }

+

+
+
Return type
+

dict

+
+
+
+ +
+ +
+
+

File: Bed.py

+

This script contains the Bed class used in the VRE model.

+
+
+
+class Bed.Bed(name)
+

Models a Bed.

+
+
+add_move(move)
+

Adds a move to the self.moves() list of this bed.

+
+
Parameters
+

move (Move() Object) – Move() object to append.

+
+
+
+ +
+ +
+
+

File: Care.py

+

This script contains the Care class used in the VRE model.

+
+
+
+class Care.Care(patient_patientid, patient_typ, patient_status, fall_nummer, fall_typ, fall_status, datum_betreuung, dauer_betreuung_in_min, mitarbeiter_personalnummer, mitarbeiter_anstellungsnummer, mitarbeiter_login, batch_run_id)
+

Models an entry in TACS.

+
+
+static add_care_to_case(lines, cases, employees)
+

Adds the entries from TACS as instances of Care() objects to the respective Case().

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table in the Atelier_DataScience is called TACS_DATEN and structured as follows:

+ ++++++++++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

patient_patientid

patient_typ

patient_status

fall_nummer

fall_typ

fall_status

datum_betreuung

dauer_betreuung_in_min

mitarbeiter_personalnummer

mitarbeiter_anstellungsnummer

mitarbeiter_login

BATCH_RUN_ID

00013768220

Standard Patient

aktiv

0006422111

Standard Fall

aktiv

2018-03-11 00:00:00.0

3

0301119

00026556

I0301119

870

00000552828

Standard Patient

aktiv

0006454306

Standard Fall

aktiv

2018-04-10 00:00:00.0

20

0025908

00014648

I0025908

870

+
+
Parameters
+
    +
  • lines (iterator() object) – csv iterator from which data will be read

  • +
  • cases (dict) –

    Dictionary mapping case ids to Case() objects

    +

    –> {"0003536421" : Case(), "0003473241" : Case(), ...}

    +

  • +
  • employees (dict) –

    Dictionary mapping employee_ids to Employee() objects

    +

    –> {'0032719' : Employee(), ... }

    +

  • +
+
+
+
+ +
+
+add_employee(employee)
+

Assigns an employee to the self.employee attribute.

+
+

Note

+

Only one employee can be assigned to Care() objects!

+
+
+
Parameters
+

employee (Employee() Object) – Employee() object to assign.

+
+
+
+ +
+ +
+
+

File: Chop.py

+

This script contains the CHOP class used in the VRE model.

+
+
+
+class Chop.Chop(chop_code, chop_verwendungsjahr, chop, chop_code_level1, chop_level1, chop_code_level2, chop_level2, chop_code_level3, chop_level3, chop_code_level4, chop_level4, chop_code_level5, chop_level5, chop_code_level6, chop_level6, chop_status, chop_sap_katalog_id)
+

Models a CHOP code.

+
+
+add_case(case)
+

Adds a case to the self.cases list.

+
+
Parameters
+

case (Case() Object) – Case() object to append.

+
+
+
+ +
+
+chop_code_stats(chops)
+

Print frequency of different chop codes to console.

+
+
Parameters
+

chops (dict) –

Dictionary mapping the chopcode_katalogid entries to Chop() objects

+

\(\longrightarrow\) { 'Z39.61.10_11': Chop(), ... }

+

+
+
+
+ +
+
+static create_chop_dict(lines)
+

Creates and returns a dict of all chop codes.

+

The key of a chop code is <code>_<catalog> - different catalogs exist for different years. This function +will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). The underlying +table in Atelier_DataScience is called V_DH_REF_CHOP and structured as follows:

+ +++++++++++++++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

CHOPCODE

CHOPVERWENDUNGSJAHR

CHOP

CHOPCODELEVEL1

CHOPLEVEL1

CHOPCODELEVEL2

CHOPLEVEL2

CHOPCODELEVEL3

CHOPLEVEL3

CHOPCODELEVEL4

CHOPLEVEL4

CHOPCODELEVEL5

CHOPLEVEL5

CHOPCODELEVEL6

CHOPLEVEL6

CHOPSTATUS

CHOPSAPKATALOGID

Z62.99.30

2016

Entnahme von Hoden- oder Neben…

C11

Operationen an den männlichen Geschlechtsorganen (60–64)

Z62

Operationen an den Hoden

Z62.9

Sonstige Operationen an den Hoden

Z62.99

Sonstige Operationen an den Hoden

Z62.99.0

Detail der Subkategorie 62.99

Z62.99.30

Entnahme von Hoden- oder Nebenh…

0

16

Z62.99.99

2011

Sonst. Operationen an den Ho…

C9

Operationen am Verdauungstrakt (42–54)

Z62

Operationen an den Hoden

Z62.9

Sonstige Operationen an den Hoden

Z62.99

Sonstige Operationen an den Hoden

Z62.99.99

Sonstige Operationen an den Hoden…

1

10

+
+
Parameters
+

lines (iterator() object) – csv iterator from which data will be read

+
+
Returns
+

Dictionary mapping the chopcode_katalogid entries to Chop() objects

+

\(\longrightarrow\) { 'Z39.61.10_11': Chop(), ... }

+

+
+
Return type
+

dict

+
+
+
+ +
+
+get_detailed_chop()
+

Returns description text from the highest available level for this CHOP.

+
+
Returns
+

Highest available level for this CHOP code.

+
+
Return type
+

str

+
+
+
+ +
+
+get_lowest_level_code()
+

Returns the lowest level of the CHOP code.

+

The lowest level is is level 2 and represented as the first number of the Code:

+

Z89.07.24 \(\longrightarrow\) Z89

+
+
Returns
+

Lowest available level for this CHOP code.

+
+
Return type
+

str

+
+
+
+ +
+ +
+
+

File: Devices.py

+

This script contains the Device class used in the VRE model.

+
+
+
+class Device.Device(geraet_id, geraet_name)
+

Models a device from RAP.

+
+
+static add_device_to_appointment(lines, appointments, devices)
+

Adds the device in devices to the respective appointment in appointments.

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table in the Atelier_DataScience is called V_DH_FACT_TERMINGERAET and structured as follows:

+ +++++++ + + + + + + + + + + + + + + + + + + + + + + +

TERMINID

GERAETID

TERMINSTART_TS

TERMINENDE_TS

DAUERINMIN

26266554

123223

2015-04-03 13:45:00.0000

2015-04-03 15:45:00.0000

120.000000

23678836

38006

2014-07-31 10:00:00.0000

2014-07-31 10:30:00.0000

30.000000

+
+
Parameters
+
    +
  • lines (iterator() object) – csv iterator from which data will be read

  • +
  • appointments (dict) –

    Dictionary mapping appointment ids to Appointment() objects

    +

    \(\longrightarrow\) { '36830543' : Appointment(), ... }

    +

  • +
  • devices (dict) –

    Dictionary mapping device_ids to Device() objects

    +

    \(\longrightarrow\) {'64174' : Device(), ... }

    +

  • +
+
+
+
+ +
+
+static create_device_map(lines)
+

Loads all devices into a dictionary based on lines in the csv file.

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table in the Atelier_DataScience is called V_DH_DIM_GERAET_CUR and structured as follows:

+ ++++ + + + + + + + + + + + + + +

GERAETID

GERAETNAME

82250

ANS-Fix

162101

Waage

+
+
Parameters
+

lines (iterator() object) – csv iterator from which data will be read

+
+
Returns
+

Dictionary mapping device ids to Device() objects

+

\(\longrightarrow\) {'64174' : Device(), ... }

+

+
+
Return type
+

dict

+
+
+
+ +
+ +
+
+

File: Employee.py

+

This script contains the Employee class used in the VRE model.

+
+
+
+class Employee.Employee(mitarbeiter_id)
+

Models an employee (doctor, nurse, etc) from RAP.

+
+
+static add_employee_to_appointment(lines, appointments, employees)
+

Adds Employee() in employees to an Appointment().

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table is V_DH_FACT_TERMINMITARBEITER, and is identical to the one defined in the +create_employee_map() method above.

+
+
Parameters
+
    +
  • lines (iterator() object) – csv iterator from which data will be read

  • +
  • appointments (dict) –

    Dictionary mapping appointment ids to Appointment() objects

    +

    \(\longrightarrow\) { '36830543' : Appointment(), ... }

    +

  • +
  • employees (dict) –

    Dictionary mapping employee_ids to Employee() objects

    +

    \(\longrightarrow\) {'0032719' : Employee(), ... }

    +

  • +
+
+
+
+ +
+
+static create_employee_map(lines)
+

Reads the appointment to employee file and creates an Employee().

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table in the Atelier_DataScience is called V_DH_FACT_TERMINMITARBEITER and structured as follows:

+ +++++++ + + + + + + + + + + + + + + + + + + + + + + +

TERMINID

MITARBEITERID

TERMINSTART_TS

TERMINENDE_TS

DAUERINMIN

521664

0063239

2003-11-11 07:30:00.000

2003-11-11 08:00:00.0000

30.000000

521754

X33671

2003-11-10 09:15:00.000

2003-11-10 09:45:00.0000

30.000000

+
+
Parameters
+

lines (iterator() object) – csv iterator from which data will be read

+
+
Returns
+

Dictionary mapping employee_ids to Employee() objects

+

\(\longrightarrow\) {'0032719' : Employee(), ... }

+

+
+
Return type
+

dict

+
+
+
+ +
+ +
+
+

File: ICD.py

+

This script contains the ICD class used in the VRE model.

+
+
+
+class ICD.ICD(falnr, dkey1, dkat, diadt, drg_cat)
+

Models an ICD object.

+
+
+add_case(case)
+

Adds a case to this ICD’s self.cases attribute.

+
+
Parameters
+

case (Case() Object) – Case() object to add.

+
+
+
+ +
+
+static add_icd_to_case(lines, cases)
+

Adds ICD codes to cases based on the ICD.fall_nummer attribute.

+

For details on how each line in the lines iterator object is formatted, please refer to the function +create_icd_dict() above.

+
+
Parameters
+
    +
  • lines (iterator() object) – csv iterator from which data will be read

  • +
  • cases (dict) – Dictionary mapping case ids to Case() objects +\(\longrightarrow\) {"0003536421" : Case(), "0003473241" : Case(), ...}

  • +
+
+
+
+ +
+
+static create_icd_dict(lines)
+

Creates and returns a dictionary of all icd codes.

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table in the Atelier_DataScience is LA_ISH_NDIA and structured as follows:

+ +++++++ + + + + + + + + + + + + + + + + + + + + + + +

‘FALNR’

‘DKEY1’

‘DKAT1’

‘DIADT’

‘DRG_CATEGORY’

‘0001832682’

‘A41.9’

‘17’

‘2018-02-27’

‘P’

‘0001832682’

‘R65.1’

‘17’

‘2018-02-27’

‘S’

+
+
Parameters
+

lines (iterator() object) – csv iterator from which data will be read

+
+
Returns
+

Dictionary mapping the icd_code entries to ICD() objects

+

\(\longrightarrow\) { 'Z12.8': ICD(), ... }

+

+
+
Return type
+

dict

+
+
+
+ +
+ +
+
+

File: Medication.py

+

This script contains the Medication class used in the VRE model.

+
+
+
+class Medication.Medication(patient_id, case_id, drug_text, drug_atc, drug_quantity, drug_unit, drug_dispform, drug_submission)
+

Models a Medication object.

+
+
+static add_medication_to_case(lines, cases)
+

Adds Medication() objects to Case() objects.

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object), and +will add all Medication() objects to their corresponding Case() object in cases. The underlying table is +identical to the one used in the create_drug_map function.

+
+
Parameters
+
    +
  • lines (iterator() object) – csv iterator from which data will be read

  • +
  • cases (dict) –

    Dictionary mapping case ids to Case()

    +

    \(\longrightarrow\) {'0005976205' : Case(), ... }

    +

  • +
+
+
+
+ +
+
+static create_drug_map(lines)
+

Creates a dictionary of ATC codes to human readable drug names.

+

This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). +The underlying table in the Atelier_DataScience is called V_LA_IPD_DRUG_NORM and structured as follows:

+ ++++++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

PATIENTID

CASEID

DRUG_TEXT

DRUG_ATC

DRUG_QUANTITY

DRUG_UNIT

DRUG_DISPFORM

DRUG_SUBMISSION

00001711342

0006437617

Torem Tabl 10 mg (Torasemid)

C03CA04

2.0000000000000

Stk

p.o.

2018-03-24 09:52:28.0000000

00001711342

0006437617

Ecofenac Sandoz Lipogel 1 % (Diclofenac)

M02AA15

1.0000000000000

Dos

lokal / topisch

2018-03-24 09:52:28.0000000

+
+
Parameters
+

lines (iterator() object) – csv iterator from which data will be read

+
+
Returns
+

Dictionary mapping drug codes to their respective text description

+

\(\longrightarrow\) {'B02BA01' : 'NaCl Braun Inf Lös 0.9 % 500 ml (Natriumchlorid)', ... }

+

+
+
Return type
+

dict

+
+
+
+ +
+
+is_antibiotic()
+

Returns the antibiotic status of a Medication.

+

Antibiotics are identified via the prefix J01.

+
+
Returns
+

Whether or not the medication self.drug_atc attribute starts with J01.

+
+
Return type
+

bool

+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/objects.inv b/spitalhygiene/Docs/build/html/objects.inv new file mode 100644 index 0000000..b830bad Binary files /dev/null and b/spitalhygiene/Docs/build/html/objects.inv differ diff --git a/spitalhygiene/Docs/build/html/py-modindex.html b/spitalhygiene/Docs/build/html/py-modindex.html new file mode 100644 index 0000000..706c77f --- /dev/null +++ b/spitalhygiene/Docs/build/html/py-modindex.html @@ -0,0 +1,325 @@ + + + + + + + + + + + Python Module Index — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Python Module Index
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ + +

Python Module Index

+ +
+ a | + b | + c | + d | + e | + f | + h | + i | + m | + n | + p | + q +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ a
+ Appointment +
 
+ b
+ Bed +
 
+ c
+ Care +
+ Chop +
 
+ d
+ data_compiler +
+ Device +
 
+ e
+ Employee +
 
+ f
+ feature_extractor +
 
+ h
+ HDFS_data_loader +
 
+ i
+ ICD +
 
+ m
+ Medication +
 
+ n
+ networkx_graph +
 
+ p
+ preprocessor +
 
+ q
+ Query_Atelier_Data +
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/resources.html b/spitalhygiene/Docs/build/html/resources.html new file mode 100644 index 0000000..53a24ff --- /dev/null +++ b/spitalhygiene/Docs/build/html/resources.html @@ -0,0 +1,375 @@ + + + + + + + + + + + resources folder — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

resources folder

+

This folder contains important functions for loading data from SQL into CSV, thereby preparing the “raw” data used for +building the actual network models.

+

Most importantly, this folder also contains the file Update_Model.sh, which is a bash script controlling all steps +in the VRE calculation. Since all VRE data are recalculated once per day, this includes (in order):

+
    +
  1. Backing up data from the previous calculation cycle in the HDFS file system (from step 4 in the previous run)

  2. +
  3. Reloading data from SQL into CSV

  4. +
  5. Running the VRE analysis (all steps of the analysis are controlled with feature_extractor.py)

  6. +
  7. Adding new data (SQL files, CSV files, other data) to the HDFS file system

  8. +
+
+

File: Query_Atelier_Data.py

+

This script controls the data update for all “raw” data in the VRE Model. It will execute all SQL queries +(i.e. all files with a ‘.sql’ extension’) found in SQL_DIR, and save extracted data to CSV files in CSV_DIR. +CSV and SQL files will be named identically, e.g.:

+

LA_ISH_NBEW.sql \(\longrightarrow\) LA_ISH_NBEW.csv

+

The Atelier_DataScience is queried directly via the pyodbc module, and requires an additional connection file +containing details on the ODBC connection to the Atelier (see VRE Model Overview for more information).

+
+
+Query_Atelier_Data.WriteSQLToCSV(path_to_sql, path_to_csv, csv_sep, connection_file, trusted_connection=True)
+

Executes an SQL query and writes the results to path_to_csv.

+
+
Parameters
+
    +
  • path_to_sql (str) – Path to .sql file containing the query to be executed

  • +
  • path_to_csv (str) – Path to .csv file destination, to which data will be written to in “csv_sep” +delimited fashion

  • +
  • csv_sep (str) – Delimiter used in the csv file

  • +
  • connection_file (str) – path to file containing information used for server connection and authentication, +as well as database selection (read and passed to pyodbc.connect() ) +This information is read from an external file so as to avoid hard-coding usernames +and passwords

  • +
  • trusted_connection (bool) – additional argument passed to pyodbc.connect(), converted to “yes” if True and +“no” otherwise (defaults to True)

  • +
+
+
+
+ +
+
+

File: preprocesor.py

+

This script contains various functions for pre-processing data required in the VRE project. +This includes:

+
    +
  • Regenerating the ward screening overview data stored in the Atelier_DataScience.dbo.WARD_SCREENINGS table

  • +
  • +
+

Please refer to the script code for details on the various functions.

+
+
+
+preprocessor.ExecuteSQL(sql_command, connection_file, trusted_connection=True)
+

Executes an arbitrary SQL command, but does not return any results.

+
+
Parameters
+
    +
  • sql_command (str) – SQL command to be executed

  • +
  • connection_file (str) – path to file containing information used for server connection and authentication, +as well as database selection (read and passed to pyodbc.connect() )

  • +
  • trusted_connection (bool) – additional argument passed to pyodbc.connect(), converted to “yes” if True and +“no” otherwise (defaults to True)

  • +
+
+
+
+ +
+
+preprocessor.RecreateHospitalMap(path_to_config_file, csv_sep=';')
+

Recreates the Hospital Map in the Atelier_DataScience.

+

This map links the following important entities in the model:

+
    +
  • fachliche OE

  • +
  • pflegerische OE

  • +
  • official abbreviation of pflegerische OE

  • +
  • building in which pflegerische OE is located

  • +
  • floor of building in which pflegerische OE is located

  • +
+

All information required is found in the [...]/vre_input/maps/insel_map.csv file. Its contents +are used to create a query for updating the Atelier_DataScience.dbo.INSEL_MAP table. This query is written +to the [...]/vre_output/manual_sql_queries folder and named update_insel_map.sql, since the +Atelier_Datascience_Reader does not have permission to execute TRUNCATE, DELETE, INSERT or UPDATE statements.

+
+

Note

+

Floors are very important, since rooms are exported “floor-wise” from Waveware.

+
+
+
Parameters
+
    +
  • path_to_config_file (str) – path to the BasicConfig.ini file

  • +
  • csv_sep (str) – separator used in the read file (defaults to ;)

  • +
+
+
+
+ +
+
+preprocessor.RecreatePflegerischeOEMap(path_to_config_file, csv_sep=';')
+

Recreates the map for pflegerische OEs in the Atelier_DataScience.

+
+
This map links “free-text” pflegerische OE names to the official names in the OE_pflege_abk column of the

Atelier_DataScience.dbo.INSEL_MAP table.

+
+
+

All information required is found in the [...]/vre_input/maps/oe_pflege_map.csv file. Its contents +are used to create a query for updating the Atelier_DataScience.dbo.OE_PFLEGE_MAP table. This query is written +to the [...]/vre_output/manual_sql_queries folder and named update_oe_pflege_map.sql, since the +Atelier_Datascience_Reader does not have permission to execute TRUNCATE, DELETE, INSERT or UPDATE statements.

+
+
Parameters
+
    +
  • path_to_config_file (str) – path to the BasicConfig.ini file

  • +
  • csv_sep (str) – separator used in the read file (defaults to ;)

  • +
+
+
+
+ +
+
+preprocessor.RecreateScreeningData(path_to_config_file, csv_sep=';')
+

Recreates all screening data in the Atelier_DataScience.

+

All information required is found in the [...]/vre_input/screening_data/vre_screenings.csv file. Its contents +are used to create a query for updating the Atelier_DataScience.dbo.VRE_SCREENING_DATA table. This query is +written to the [...]/vre_output/manual_sql_queries folder and named update_VRE_SCREENING_DATA.sql, since the +Atelier_Datascience_Reader does not have permission to execute TRUNCATE, DELETE, INSERT or UPDATE statements.

+
+
Parameters
+
    +
  • path_to_config_file (str) – path to the BasicConfig.ini file

  • +
  • csv_sep (str) – separator used in the read file (defaults to ;)

  • +
+
+
+
+ +
+
+preprocessor.RecreateWardOverviewData(path_to_config_file, csv_sep=';')
+

Recreates dates at which specific screening types were active in various clinics.

+

This information is found in the [...]/vre_input/screening_data/screening_overview.csv file. Its contents +are used to create a query for updating the Atelier_DataScience.dbo.WARD_SCREENINGS table. This query is written +to the [...]/vre_output/manual_sql_queries folder and named update_ward_screenings.sql, +since the Atelier_Datascience_Reader does not have permission to execute TRUNCATE, DELETE, INSERT or UPDATE +statements.

+
+
To Do:

Find a solution to automate this part.

+
+
+
+
Parameters
+
    +
  • path_to_config_file (str) – path to the BasicConfig.ini file

  • +
  • csv_sep (str) – separator used in the read file (defaults to ;)

  • +
+
+
+
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/search.html b/spitalhygiene/Docs/build/html/search.html new file mode 100644 index 0000000..2ae886d --- /dev/null +++ b/spitalhygiene/Docs/build/html/search.html @@ -0,0 +1,214 @@ + + + + + + + + + + + Search — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Search
  • + + +
  • + + + +
  • + +
+ + +
+
+
+
+ + + + +
+ +
+ +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/searchindex.js b/spitalhygiene/Docs/build/html/searchindex.js new file mode 100644 index 0000000..5b1b430 --- /dev/null +++ b/spitalhygiene/Docs/build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["Patient_Test_Data","Unused","VRE_Model","index","model","resources","sql","vre"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,sphinx:56},filenames:["Patient_Test_Data.rst","Unused.rst","VRE_Model.rst","index.rst","model.rst","resources.rst","sql.rst","vre.rst"],objects:{"":{Appointment:[4,0,0,"-"],Bed:[4,0,0,"-"],Care:[4,0,0,"-"],Chop:[4,0,0,"-"],Device:[4,0,0,"-"],Employee:[4,0,0,"-"],HDFS_data_loader:[7,0,0,"-"],ICD:[4,0,0,"-"],Medication:[4,0,0,"-"],Query_Atelier_Data:[5,0,0,"-"],data_compiler:[7,0,0,"-"],feature_extractor:[7,0,0,"-"],networkx_graph:[7,0,0,"-"],preprocessor:[5,0,0,"-"]},"Appointment.Appointment":{add_appointment_to_case:[4,2,1,""],add_device:[4,3,1,""],add_employee:[4,3,1,""],add_room:[4,3,1,""],create_termin_map:[4,2,1,""]},"Bed.Bed":{add_move:[4,3,1,""]},"Care.Care":{add_care_to_case:[4,2,1,""],add_employee:[4,3,1,""]},"Chop.Chop":{add_case:[4,3,1,""],chop_code_stats:[4,3,1,""],create_chop_dict:[4,2,1,""],get_detailed_chop:[4,3,1,""],get_lowest_level_code:[4,3,1,""]},"Device.Device":{add_device_to_appointment:[4,2,1,""],create_device_map:[4,2,1,""]},"Employee.Employee":{add_employee_to_appointment:[4,2,1,""],create_employee_map:[4,2,1,""]},"HDFS_data_loader.HDFS_data_loader":{get_csv_file:[7,3,1,""],get_hdfs_pipe:[7,3,1,""],patient_data:[7,3,1,""]},"ICD.ICD":{add_case:[4,3,1,""],add_icd_to_case:[4,2,1,""],create_icd_dict:[4,2,1,""]},"Medication.Medication":{add_medication_to_case:[4,2,1,""],create_drug_map:[4,2,1,""],is_antibiotic:[4,3,1,""]},"feature_extractor.feature_extractor":{export_csv:[7,2,1,""],export_gephi:[7,2,1,""],get_contact_patients:[7,3,1,""],get_contact_patients_for_case:[7,2,1,""],prepare_features_and_labels:[7,2,1,""]},"networkx_graph.surface_model":{NodeBetweennessException:[7,5,1,""],add_edge_infection:[7,3,1,""],add_network_data:[7,3,1,""],export_node_betweenness:[7,3,1,""],export_patient_degree_ratio:[7,3,1,""],export_shortest_path_length_overview:[7,3,1,""],export_total_degree_ratio:[7,3,1,""],identify_id:[7,3,1,""],identify_node:[7,3,1,""],inspect_network:[7,3,1,""],load_from_json:[7,2,1,""],new_device_node:[7,3,1,""],new_edge:[7,3,1,""],new_employee_node:[7,3,1,""],new_generic_node:[7,3,1,""],new_patient_node:[7,3,1,""],new_room_node:[7,3,1,""],parse_filename:[7,2,1,""],remove_isolated_nodes:[7,3,1,""],save_to_json:[7,2,1,""],trim_model:[7,3,1,""],update_edge_attributes:[7,3,1,""],update_node_attributes:[7,3,1,""],update_shortest_path_statistics:[7,3,1,""],write_node_files:[7,3,1,""]},Appointment:{Appointment:[4,1,1,""]},Bed:{Bed:[4,1,1,""]},Care:{Care:[4,1,1,""]},Chop:{Chop:[4,1,1,""]},Device:{Device:[4,1,1,""]},Employee:{Employee:[4,1,1,""]},HDFS_data_loader:{HDFS_data_loader:[7,1,1,""]},ICD:{ICD:[4,1,1,""]},Medication:{Medication:[4,1,1,""]},Query_Atelier_Data:{WriteSQLToCSV:[5,4,1,""]},feature_extractor:{feature_extractor:[7,1,1,""]},networkx_graph:{create_model_snapshots:[7,4,1,""],surface_model:[7,1,1,""]},preprocessor:{ExecuteSQL:[5,4,1,""],RecreateHospitalMap:[5,4,1,""],RecreatePflegerischeOEMap:[5,4,1,""],RecreateScreeningData:[5,4,1,""],RecreateWardOverviewData:[5,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","staticmethod","Python static method"],"3":["py","method","Python method"],"4":["py","function","Python function"],"5":["py","exception","Python exception"]},objtypes:{"0":"py:module","1":"py:class","2":"py:staticmethod","3":"py:method","4":"py:function","5":"py:exception"},terms:{"10_11":4,"1st":7,"case":[0,4,7],"class":[4,7],"default":[5,7],"export":[5,7],"final":0,"function":[4,5,6,7],"import":[0,5,6,7],"int":7,"m\u00e4nnlichen":4,"new":[5,7],"null":7,"return":[4,5,7],"static":[4,7],"true":[0,5,7],ANS:4,Adding:5,And:[0,7],Dos:4,For:[0,4,7],IDs:[0,7],Its:5,NOT:7,OEs:5,The:[0,2,4,5,7],These:7,Will:7,_pdr:7,_tdr:7,a41:4,abbrevi:5,abov:[4,7],absolut:7,accord:7,activ:5,actual:[5,7],add:[4,7],add_appointment_to_cas:4,add_care_to_cas:4,add_cas:4,add_devic:4,add_device_to_appoint:4,add_edge_infect:7,add_employe:4,add_employee_to_appoint:4,add_icd_to_cas:4,add_medication_to_cas:4,add_mov:4,add_network_data:7,add_node_clust:7,add_room:4,added:7,addit:[5,7],air:[],aktiv:4,alchemi:7,all:[4,5,7],allow:7,along:7,alphabet:7,alreadi:7,also:[5,7],alwai:7,analysi:5,ani:[5,7],antibiot:4,append:[4,7],appoint:[0,3,7],approach:[],approxim:7,arbitrari:5,archiv:1,argument:[5,7],arrai:7,aspect:7,assign:4,associ:[0,7],assum:7,atc:4,ateli:5,atelier_datasci:[4,5],atelier_datascience_read:5,att_dict:7,attribut:[4,7],attribute_dict:7,attribute_subset:7,authent:5,autom:5,automat:7,avail:4,avoid:[5,7],b02ba01:4,back:5,base:[4,7],bash:5,basi:[2,7],basicconfig:[5,7],batch_run_id:4,becaus:7,bed:3,been:7,being:7,below:7,between:7,big:[],bitbucket:[],bool:[4,5,7],both:7,braun:4,build:[5,7],built:7,c03ca04:4,c11:4,calcul:[5,7],call:[4,7],can:[4,7],care:3,case_id:4,caseid:4,catalog:4,central:7,charact:7,check:7,chop:[0,3],chop_cod:4,chop_code_level1:4,chop_code_level2:4,chop_code_level3:4,chop_code_level4:4,chop_code_level5:4,chop_code_level6:4,chop_code_stat:4,chop_level1:4,chop_level2:4,chop_level3:4,chop_level4:4,chop_level5:4,chop_level6:4,chop_sap_katalog_id:4,chop_statu:4,chop_verwendungsjahr:4,chopcod:4,chopcode_katalogid:4,chopcodelevel1:4,chopcodelevel2:4,chopcodelevel3:4,chopcodelevel4:4,chopcodelevel5:4,chopcodelevel6:4,choplevel1:4,choplevel2:4,choplevel3:4,choplevel4:4,choplevel5:4,choplevel6:4,chopsapkatalogid:4,chopstatu:4,chopverwendungsjahr:4,chosen:7,clinic:5,close:7,cluster:7,code:[0,4,5,7],column:[5,7],combin:7,command:5,common:7,compat:7,compil:7,complet:0,connect:[5,7],connection_fil:5,consid:7,consist:0,consol:4,contact:7,contact_pat:7,contain:[1,4,5,6,7],content:[3,5,7],contrast:7,control:[5,7],convert:5,copi:7,correspond:[4,7],creat:[4,5,7],create_chop_dict:4,create_device_map:4,create_drug_map:4,create_employee_map:4,create_icd_dict:4,create_model_snapshot:7,create_termin_map:4,creation:7,cronjob:7,csv:[4,5,7],csv_dir:5,csv_path:7,csv_sep:[5,7],current:[1,7],cycl:5,dai:5,data:[2,3,4,5,6,7],data_compil:3,data_dir:7,databas:5,datafil:7,datafram:7,dataset:0,date:[5,7],datetim:7,datum_betreuung:4,dauer_betreuung_in_min:4,dauer_in_min:4,dauerinmin:4,dbo:5,defin:[4,7],definit:4,degre:7,delet:5,delimit:5,den:4,depend:7,der:4,descript:[4,7],dest_pat_id:7,dest_path:7,destin:5,detail:[4,5,7],devic:[0,3,7],device_id:4,diadt:4,diagnost:7,diclofenac:4,dict:[4,7],dictionari:[4,7],dictvector:7,differ:4,directli:[5,7],directori:7,distribut:7,dkat1:4,dkat:4,dkey1:4,doc:7,docstr:7,doctor:4,document:[2,7],doe:[5,7],done:7,dov:7,drg_cat:4,drg_categori:4,drug:4,drug_atc:4,drug_dispform:4,drug_quant:4,drug_submiss:4,drug_text:4,drug_unit:4,dure:7,each:[4,7],earlier:7,ecofenac:4,edg:7,edge_infect:7,edge_list:7,edge_tupl:7,edge_typ:7,edges_infect:[],either:7,employe:[3,7],employee_id:4,empti:7,encount:7,end:7,end_overlap_dt:7,entir:7,entiti:5,entnahm:4,entri:[0,4,7],etc:[4,7],everi:7,exampl:7,except:[0,7],execut:[5,7],executesql:5,exist:[4,7],export_csv:7,export_gephi:7,export_node_between:7,export_path:7,export_patient_degree_ratio:7,export_shortest_path_length_overview:7,export_total_degree_ratio:7,extens:5,extern:5,extract:[0,5,6,7],extrem:7,fachlich:5,fact:[],factor:7,fall:4,fall_numm:4,fall_statu:4,fall_typ:4,fallid:4,falnr:4,fals:7,fashion:[5,7],faulti:7,featur:7,feature_extract:7,feature_extractor:[3,5],few:7,file:[1,3],file_path:7,filenam:7,filepath:7,filter:0,find:5,first:[4,7],fit:7,fix:4,flag:7,floor:5,focus_nod:7,folder:[2,3],follow:[0,4,5,7],form:[0,7],format:[4,7],found:[5,7],frac:7,fraction:7,free:5,frequenc:4,from:[4,5,7],full:7,gener:7,gephi:7,geraet_id:4,geraet_nam:4,geraetid:[0,4],geraetnam:4,german:0,geschlechtsorganen:4,get_contact_pati:7,get_contact_patients_for_cas:7,get_csv_fil:7,get_detailed_chop:4,get_hdfs_pip:7,get_lowest_level_cod:4,github:2,given:7,graph:7,hadoop:7,handshak:7,hard:[5,7],has:7,have:[5,7],hdf:[5,7],hdfs_data_load:[3,4],hdfs_pipe:7,header:7,highest:4,highli:7,hinf:4,histori:7,hoden:4,hospit:5,how:[4,7],howev:7,huge:[],human:4,i0025908:4,i0301119:4,icd:3,icd_cod:4,ident:[0,4,5],identifi:[4,7],identify_id:7,identify_nod:7,ids:[4,7],ignor:7,importantli:5,improperli:7,includ:[0,5,7],increas:7,independ:7,index:[],indic:7,indirectli:7,ineffici:7,inf:4,infect:7,inform:[5,7],ini:[5,7],insel:2,insel_map:5,insert:5,inspect:7,inspect_network:7,instanc:[4,7],instead:7,integ:7,integr:7,intens:7,interact:7,intern:7,involv:7,is_antibiot:4,is_delet:4,isol:7,iter:[4,7],its:7,itself:7,j01:4,json:7,k90:4,kei:[4,7],known:7,konsult:4,kontakt_org:7,kontakt_raum:7,la_ish_nbew:5,la_ish_ndia:4,label:7,larger:7,last:7,lead:7,least:7,left:7,len:7,length:7,level:[4,7],line:4,link:[5,7],lipogel:4,list:[4,7],load:[4,5,7],load_from_json:7,load_test_data:[],locat:5,log:7,log_warn:7,lokal:4,longer:1,longrightarrow:[4,5,7],lot:7,lowest:4,m02aa15:4,mai:7,main:3,mainli:7,make:7,mani:[],manual_sql_queri:5,map:[4,5,7],math:[],max_path_length:7,maximum:7,mbox:[],mean:7,measur:7,medic:3,memori:7,messag:7,method:[4,7],mitarbeiter_anstellungsnumm:4,mitarbeiter_id:4,mitarbeiter_login:4,mitarbeiter_personalnumm:4,mitarbeiterid:4,model:[0,1,5,7],modul:[5,7],mono:[],more:[5,7],most:[5,7],move:[4,7],multigraph:7,multipl:7,must:7,nacl:4,name:[4,5,7],natriumchlorid:4,ndarrai:7,neben:4,nebenh:4,neg:7,neo4j:7,network:[5,7],networkx:7,networkx_graph:3,new_device_nod:7,new_edg:7,new_employee_nod:7,new_generic_nod:7,new_patient_nod:7,new_room_nod:7,newli:7,node1:7,node2:7,node3:7,node4:7,node:7,node_attribut:7,node_data_dict:7,node_files_written:7,node_id:7,node_list:7,node_typ:7,node_x:7,nodebetweennessexcept:7,non:7,none:7,note:7,number:[4,7],numpi:7,nurs:4,object:[4,7],obtain:7,obviou:7,occur:7,odbc:5,oder:4,oe_pflege_abk:5,oe_pflege_map:5,offici:5,oldest:7,onc:[5,7],one:[0,4,7],onli:[0,4,7],open:7,operationen:4,order:[5,7],organ:7,orig_model:7,origin:7,other:[5,7],otherwis:[5,7],over:7,overflow:7,overview:[3,5,7],overwritten:7,page:[],pair:7,panda:7,parallel:7,param:7,paramet:[4,5,7],pars:7,parse_filenam:7,part:5,particular:7,partner:0,pass:5,password:5,path:[5,7],path_to_config_fil:5,path_to_csv:5,path_to_fil:7,path_to_sql:5,patient:[3,4,7],patient_data:[4,7],patient_data_overview:7,patient_dict:7,patient_id:4,patient_patientid:4,patient_statu:4,patient_typ:4,patiententermin:4,patientid:4,per:[2,5,7],perform:7,permiss:5,pflegerisch:5,pleas:[4,5,7],point:7,pos:7,possibl:7,potenti:7,pre:5,preced:7,prefix:[4,7],prepar:[5,7],prepare_features_and_label:7,preprocesor:3,preprocessor:5,prerequisit:7,present:7,previou:5,print:[4,7],proach:7,problemat:7,process:[5,7],progress:7,project:5,properli:7,properti:7,propos:7,provid:7,pyodbc:5,python:3,qualiti:7,queri:[0,5,7],query_atelier_data:3,r65:4,rap:4,ratio:7,raw:5,read:[4,5,7],readabl:4,reader:[4,7],readm:7,reason:7,recalcul:5,record:7,recreat:5,recreatehospitalmap:5,recreatepflegerischeoemap:5,recreatescreeningdata:5,recreatewardoverviewdata:5,refer:[4,5,7],regard:[],regardless:7,regener:5,relat:7,relev:7,relevant_cas:7,reload:5,remov:7,remove_isolated_nod:7,replac:7,replace_char:7,repo:[],repositori:2,repres:[4,7],represent:7,requir:[5,7],resourc:3,respect:[4,7],ressourc:7,restrict:0,result:[5,7],risk:7,risk_dict:7,risk_onli:7,room:[4,5,7],room_id:7,room_nam:7,root:2,row:7,run:[5,7],same:7,sandoz:4,sap:4,save:[5,7],save_to_json:7,saved_object:7,scienc:2,score:7,screen:[5,7],screening_data:5,screening_overview:5,script:[1,4,5,7],search:[],second:7,see:[5,7],select:[0,5],self:[4,7],separ:[5,7],server:5,set:7,shortest:7,shortest_paths_through_this_nod:7,should:7,silent:7,simpl:0,sinc:[5,7],singl:[0,7],sklearn:7,small:0,smaller:7,snapshot:7,snapshot_dt:7,snapshot_dt_list:7,solut:5,sonst:4,sonstig:4,sort:7,sourc:7,source_id:7,source_pat_id:7,source_typ:7,specif:[5,7],specifi:7,sphinx:2,spitalhygien:2,sql:[0,3,5,7],sql_command:5,sql_dir:5,src:3,stai:7,standard:4,start:[4,7],start_overlap_dt:7,statement:[0,5],statist:7,statu:4,step:[5,7],stk:4,store:[5,7],str:[4,5,7],string:7,string_id:7,strip:7,structur:[4,7],sub:0,subkategori:4,subsequ:7,subset:[0,7],suffix:7,sum:7,support:7,surfac:7,surface_model:7,system:[5,7],tabl:[4,5],tac:4,tacs_daten:4,target:7,target_id:7,target_typ:7,tdr:7,termin:0,termin_art:4,termin_bezeichnung:4,termin_datum:4,termin_id:4,termin_typ:4,terminart:4,terminbezeichnung:4,termindatum:4,terminende_t:4,terminid:[0,4],terminstart_t:4,termintyp:4,test:3,text:[4,5],than:7,therebi:5,theu:7,thi:[0,1,2,4,5,6,7],those:0,three:0,time:7,timepoint:7,topisch:4,torasemid:4,torem:4,total:7,total_shortest_path:7,transmiss:7,transmit:7,trigger:7,trim:7,trim_model:7,truncat:5,trusted_connect:5,tupl:7,two:7,txt:7,type:[4,5,7],unchang:7,underli:4,undirect:7,uniqu:7,unknown:7,unproblemat:7,unus:3,updat:[5,7],update_edge_attribut:7,update_insel_map:5,update_model:5,update_node_attribut:7,update_oe_pflege_map:5,update_shortest_path_statist:7,update_vre_screening_data:5,update_ward_screen:5,use:7,used:[0,1,4,5,7],usernam:5,using:7,usual:7,v_dh_dim_geraet_cur:4,v_dh_dim_termin_cur:4,v_dh_fact_termingeraet:4,v_dh_fact_terminmitarbeit:4,v_dh_fact_terminpati:4,v_dh_ref_chop:4,v_la_ipd_drug_norm:4,valu:7,variat:7,variou:[5,7],vector:7,verdauungstrakt:4,veri:5,version:1,via:[4,5,7],visual:7,visualis:7,von:4,vre:[0,1,5],vre_input:5,vre_output:5,vre_screen:5,vre_screening_data:5,vre_statu:7,waag:4,ward:[5,7],ward_nam:7,ward_screen:5,warn:7,warn_log:7,wavewar:5,weight:7,welcom:2,well:5,were:[5,7],what:7,when:7,where:[0,7],whether:[4,7],which:[4,5,7],wise:5,without:7,work:7,would:7,write:[5,7],write_node_fil:7,writesqltocsv:5,written:[5,7],x33671:4,year:[4,7],yes:5,z00:0,z01:0,z12:4,z34:0,z36:0,z39:[0,4],z50:0,z51:0,z54:0,z62:4,z88:0,z89:[0,4],z94:0,z99:0},titles:["Patient Test Data","Unused folder","VRE Model Overview","Welcome to the VRE Model documentation!","vre/src/main/python/vre/model folder","resources folder","sql folder","vre/src/main/python/vre folder"],titleterms:{appoint:4,bed:4,care:4,chop:4,code:[],data:0,data_compil:7,detail:[],devic:4,document:3,employe:4,feature_extractor:7,file:[4,5,7],folder:[1,4,5,6,7],hdfs_data_load:7,icd:4,indic:[],main:[4,7],medic:4,model:[2,3,4],networkx_graph:7,overview:2,patient:0,pleas:[],preprocesor:5,python:[4,7],query_atelier_data:5,refer:[],resourc:5,script:[],sql:6,src:[4,7],tabl:[],test:0,unus:1,vre:[2,3,4,7],welcom:3}}) \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/sql.html b/spitalhygiene/Docs/build/html/sql.html new file mode 100644 index 0000000..960bb8a --- /dev/null +++ b/spitalhygiene/Docs/build/html/sql.html @@ -0,0 +1,209 @@ + + + + + + + + + + + sql folder — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

sql folder

+

This folder contains important functions for extracting SQL data.

+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/build/html/vre.html b/spitalhygiene/Docs/build/html/vre.html new file mode 100644 index 0000000..3dc6fc6 --- /dev/null +++ b/spitalhygiene/Docs/build/html/vre.html @@ -0,0 +1,1067 @@ + + + + + + + + + + + vre/src/main/python/vre folder — VRE Model 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

vre/src/main/python/vre folder

+

This folder contains all files relevant for building the actual VRE model.

+
+
+

File: data_compiler.py

+

This script contains all functions for compiling data from CSV or HDFS, and controls the creation of all +objects required for the VRE model. This process has multiple steps and is structured as follows:

+
    +
  • Reading the BasicConfig.ini file in this directory

  • +
  • Loading all VRE-relevant data using the HDFS_data_loader class

  • +
  • Creation of the feature vector using the feature_extractor class

  • +
  • Export of this feature vector to CSV

  • +
  • Creation of the surface model using the surface_model class

  • +
  • Export of various results from the surface model using its built-in functions

  • +
+

This script is called in the cronjob and triggers the build of the VRE model!

+

Please refer to the script code for details.

+
+
+
+

File: feature_extractor.py

+

This script contains the feature_extractor class, which controls the following aspects of VRE:

+
    +
  • Preparation of the feature vector

  • +
  • Extraction of patient-patient contacts

  • +
  • Export to various sources (Gephi, CSV, Neo4J, etc.)

  • +
+
+
+
+class feature_extractor.feature_extractor
+

Creates pandas dataframes with features, labels and relevant dates, and provides export functions to various +target systems.

+
+
+static export_csv(features, labels, dates, v, file_path)
+

Function for exporting features, labels and dates to CSV.

+

Combines features, labels and dates in a pandas.DataFrame(), and exports all data to the csv file given +in file_path.

+
+
Parameters
+
    +
  • features (numpy.ndarray()) – numpy.ndarray() with one row per patient containing “fitted” risk factors +for each patient in the one-of-K fashion

  • +
  • labels (numpy.ndarray()) – 1-D numpy.ndarray() containing the labels for each patient +(integers between -1 and 3)

  • +
  • dates (numpy.ndarray()) – 1-D numpy.ndarray() containing risk dates for each patient

  • +
  • vsklearn.feature_extraction.DictVectorizer() object with which the features +parameter was created

  • +
  • file_path (str) – Absolute path to exported csv file

  • +
+
+
+
+ +
+
+static export_gephi(features, labels, dates, v, dest_path='.', csv_sep=', ')
+

Exports the node and edge list in Gephi-compatible format for visualisation.

+

Edges and nodes will be exported to files dest_path/edge_list.csv and dest_path/node_list.csv, +respectively.

+
+
Parameters
+
    +
  • features (numpy.ndarray()) – numpy.ndarray() with one row per patient containing the “fitted” risk +factors for each patient in the one-of-K fashion.

  • +
  • labels (numpy.ndarray()) – 1-D numpy.ndarray() containing the labels for each patient +(integers between -1 and 3)

  • +
  • dates (numpy.ndarray()) – 1-D numpy.ndarray() containing risk dates for each patient

  • +
  • vsklearn.feature_extraction.DictVectorizer() object with which the features +parameter was created

  • +
  • dest_path (str) – path into which the edge_list.csv and node_list.csv files will be exported +(defaults to the current working directory)

  • +
  • csv_sep (str) – separator for exported csv files (defaults to ,)

  • +
+
+
+
+ +
+
+get_contact_patients(patients)
+

Extracts all patient contacts.

+

Extract all contacts between patients in the same room and same ward which occurred during the last year.

+
+
Parameters
+

patients (dict) – Dictionary mapping patient ids to Patient() objects –> {“00001383264” : Patient(), +“00001383310” : Patient(), …}

+
+
Returns
+

List containing tuples of length 6 of either the format

+

(source_pat_id, dest_pat_id, start_overlap_dt, end_overlap_dt, room_name, “kontakt_raum”)

+

or the format

+

(source_pat_id, dest_pat_id, start_overlap_dt, end_overlap_dt, ward_name, “kontakt_org”)

+

+
+
Return type
+

list

+
+
+
+ +
+
+static get_contact_patients_for_case(cases, contact_pats)
+

Extracts contact patients for specific cases.

+

Appends tuples of length 6 (see param contact_pats) directly to contact_pats, which is a list recording all +patient contacts.

+
+
Parameters
+
    +
  • cases (dict) – Dictionary mapping case ids to Case() objects –> {“0003536421” : Case(), +“0003473241” : Case(), …}

  • +
  • contact_pats (list) – List containing tuples of length 6 of either the format: (source_pat_id, +dest_pat_id, start_overlap_dt, end_overlap_dt, room_name, “kontakt_raum”) in the +case of a contact room, or the format (source_pat_id, dest_pat_id, +start_overlap_dt, end_overlap_dt, ward_name, “kontakt_org”) in the case of a +contact organization.

  • +
+
+
+
+ +
+
+static prepare_features_and_labels(patients)
+

Internal function used in various data exports.

+

Creates the feature np.array() and label np.array(), along with relevant dates.

+
+
Parameters
+

patients (dict) – Dictionary mapping patient ids to Patient() objects of the form +{"00001383264":  Patient(), "00001383310":  Patient(), ...}

+
+
Returns
+

tuple of length 4 of the form \(\longrightarrow\) (features, labels, dates, v)

+

Please refer to function code for more details.

+

+
+
Return type
+

tuple

+
+
+
+ +
+ +
+
+

File: HDFS_data_loader.py

+

This script contains all functions for loading data from CSV or HDFS, and controls the creation of all +objects required for the VRE model.

+
+
+
+class HDFS_data_loader.HDFS_data_loader(hdfs_pipe=True)
+

Loads all the csv files from HDFS and creates the data model.

+
+
+get_csv_file(csv_path)
+

Loads a datafile from CSV.

+

Loads the datafile specified in csv_path, and returns the file without header as a csv.reader() instance. +csv_path must be an absolute filepath. This function is used in the method patient_data() if hdfs_pipe +is False.

+
+
Parameters
+

csv_path (str) – full path to csv file.

+
+
+
+

Important

+

Since the csv.reader() instance is returned by this functions via open(csv_path, ...), these files may +not be properly closed !

+
+
+
Returns
+

csv.reader() instance not containing the header of the file.

+
+
+
+ +
+
+get_hdfs_pipe(path)
+

Loads a datafile from HDFS.

+

Loads the datafile specified in path from the Hadoop file system, and returns the file without header as a +csv.reader() instance. This function is used in the method patient_data() if hdfs_pipe is True +(the default).

+
+
Parameters
+

path (str) – full path to file in HDFS to be loaded.

+
+
Returns
+

csv.reader() instance not containing the header of the file.

+
+
+
+ +
+
+patient_data(risk_only=False)
+

Prepares patient data based on all results obtained from the SQL queries.

+

If self.hdfs_pipe is True, this will use the get_hdfs_pipe() method. Otherwise, the +get_csv_file() method is used.

+
+
Parameters
+

risk_only (bool) – Whether or not to use only risk data (defaults to False).

+
+
Returns
+

Dictionary containing all VRE-relevant objects of the form

+
+

{ “rooms” \(\longrightarrow\) Rooms,

+

”wards” \(\longrightarrow\) Wards, etc. }

+
+

Please refer to the vre/src/main/python/vre/model folder documentation for more details on the +various objects.

+

+
+
Return type
+

dict

+
+
+
+ +
+ +
+
+

File: networkx_graph.py

+

This script contains the code to run statistics on various types of network models that will be used as proposed by +Theus (see README.md for more details).

+

Surface Model: This model assumes that VRE is transmitted on “surfaces”, and contains all potential transmitting +surfaces as nodes. These currently include:

+
    +
  • Patients

  • +
  • Employees

  • +
  • Rooms

  • +
  • Devices

  • +
+

Edges are based on contact between two nodes if the contact (e.g. a patient being in a particular room) has occurred +during a patient’s relevant stay.

+

Handshake Model: This model assumes that VRE is transmitted via “handshakes”, and contains only Patients as nodes. +In contrast to the Surface model, edges in this model correspond to the transmission vectors, and represent common rooms +or employees via which two patients may have been (indirectly) in contact with each other.

+
+
+
+networkx_graph.create_model_snapshots(orig_model, snapshot_dt_list)
+

Creates model snapshots based on the datetime.datetime() values provided in snapshot_dt_list.

+
+

Note

+

For obvious reasons, all of the values provided must be smaller (earlier than) orig_model.snapshot_dt (i.e. +the snapshot date of the model used as a basis for the other models).

+
+
+
Parameters
+
    +
  • orig_model (surface_model) – Original surface_model() object that will be used as starting point for all +subsequent snapshots

  • +
  • snapshot_dt_list (list) – List of dt.dt() objects, all smaller than self.snapshot_dt

  • +
+
+
Returns
+

List of independent surface_model() objects corresponding to the various model snapshots in increasing +order (i.e. the oldest snapshot is first in the list). The last entry in the returned list contains orig_model, +meaning the list has length len(snapshot_dt_list) + 1. If no snapshot creation is possible, None is +returned instead.

+
+
Return type
+

list

+
+
+
+ +
+
+class networkx_graph.surface_model(data_dir='.', edge_types=None)
+

Represents the Surface Model graph in networkx.

+

Nodes can be one of:

+
    +
  • Patients \(\longrightarrow\) added with attribute dict {'type' : "Patient" }

  • +
  • Employees \(\longrightarrow\) added with attribute dict {'type' : "Employee" }

  • +
  • Rooms \(\longrightarrow\) added with attribute dict {'type' : "Room" }

  • +
  • Devices \(\longrightarrow\) added with attribute dict {'type' : "Device" }

  • +
+

A few details on how the model graph is set up:

+
    +
  • All node objects will be represented in the form of string objects, where unique identifiers are as follows:

    +
    +
      +
    • Patients \(\longrightarrow\) patient ID

    • +
    • Rooms: \(\longrightarrow\) room name

    • +
    • Employees: \(\longrightarrow\) employee ID

    • +
    • Devices: \(\longrightarrow\) Device ID

    • +
    +
    +
  • +
  • All node objects will have at least two attributes:

    +
    +
      +
    • type (see docstring above)

    • +
    • cluster (for visual representation with Alchemy) –> added via the function add_node_clustering()

    • +
    +
    +
  • +
  • All edge objects will have at least the following attributes:

    +
    +
      +
    • from \(\longrightarrow\) indicates start of the interaction (a dt.dt() object)

    • +
    • to \(\longrightarrow\) indicates end of the interaction (a dt.dt() object)

    • +
    • type \(\longrightarrow\) indicates what node types are linked by this edge, e.g. “Patient-Room”, +“Patient-Employee”, etc.

    • +
    • origin \(\longrightarrow\) indicates the source that was used to add this edge (e.g. “Move”, +‘Appointment”, etc.)

    • +
    +
    +
  • +
  • Edge descriptions always contain the nodes in alphabetical order, i.e. Device-Room (NOT Room-Device), +Employee-Patient (NOT Patient-Employee), etc.

  • +
  • Each node is only present once

  • +
  • Edges from a node to itself are not supported

  • +
  • Multiple parallel edges between two nodes are supported (\(\longrightarrow\) MultiGraph)

  • +
  • Edges do not have associated weights, but have at least one attribute “type” (e.g. Patient-Room, Employee-Patient)

  • +
  • The edge attribute “type” is not sorted alphabetically, and instead features the following 6 variations:

    +
    +
      +
    • Patient-Room

    • +
    • Device-Patient

    • +
    • Employee-Patient

    • +
    • Device-Room

    • +
    • Employee-Room

    • +
    • Device-Employee

    • +
    +
    +
  • +
  • The graph is undirected

  • +
  • The python built-in None-type object should not be used for attributes according to the networkx docs. +Instead, unknown values will be set to “NULL” or “”

  • +
+
+
+exception NodeBetweennessException
+

Class-specific exception used for betweenness calculation functions.

+
+ +
+
+add_edge_infection()
+

Sets “infected” attribute to all edges.

+

This function will iterate over all edges in the network and set an additional attribute infected, which +will be set to True if it connects to a patient node for which the vre_status attribute is set to +pos. For all other edges, this attribute will be set to False.

+
+ +
+
+add_network_data(patient_dict, subset='relevant_case', snapshot=datetime.datetime(2019, 7, 31, 18, 21, 24, 991461))
+

Adds nodes and edges data to the network.

+

Nodes and edges are added based on the data in patient_dict according to the subset specified (see description +of parameters below).

+
+
Parameters
+
    +
  • patient_dict (dict) – Dictionary containing all data required to build the graph. Please refer to +“Patient_Data_Overview.dov” for details on this argument.

  • +
  • subset (str) –

    Subset of data to be used, can be one of:

    +
      +
    • relevant_case \(\longrightarrow\) includes patients with a relevant case +(regardless of involvement in VRE screenings) and the data of relevant cases

    • +
    • risk \(\longrightarrow\) includes patients with an associated risk (i.e. +at least one VRE screening) and data of relevant cases

    • +
    +

  • +
  • snapshot (dt.dt()) – datetime.datetime() object specifying to which point in time data are to be +imported. Defaulting to the time of execution, this parameter can be used to create +a “snapshot” of the model, and will ignore (i.e. not add) edges in patient_dict +for which the ‘to’ attribute is larger than this parameter. Note that all nodes from +patient_dict will be added, but most “new” nodes will be created in isolation. And +since a call to this function is usually followed by a call to +remove_isolated_nodes(), these isolated nodes will then be stripped from the +network.

  • +
+
+
+
+ +
+
+export_node_betweenness(csv_sep=';', export_path=None)
+

Exports node betweenness.

+

This function will export node betweenness for all nodes from the network. This is done by exporting the sum +of all fractions of shortest paths a particular node is in, which is found in the attribute keys starting with +the “SP-” prefix (e.g. “SP-Node1-Node4”). These tuples contain 2 entries, the first being the fraction of +shortest path this node is found in (between the given pair of nodes), and the second one being the total +number of shortest paths. This function can only be executed once the function update_shortest_path_statistics() +has been called. Nodes without an attribute key starting with the “SP-” prefix will have node betweenness of 0.

+

The exported file is written into self.data_dir and contains 3 columns:

+
    +
  • Node ID

  • +
  • Node Type

  • +
  • Betweenness Score

  • +
+
+
Parameters
+
    +
  • csv_sep (str) – separator to be used in export file.

  • +
  • export_path (str) – Path to which node files will be written. If set to None (the default), the +exported file will be written to self.data_dir.

  • +
+
+
+
+ +
+
+export_patient_degree_ratio(csv_sep=';', export_path=None)
+

Calculates and exports patient degree ratio for all nodes in the network.

+

The patient degree ratio is defined for a single node_x as:

+

number of infected edges between node_x and patients / number of total edges between node_x and patients

+
+
Parameters
+
    +
  • csv_sep (str) – Separator for created csv file.

  • +
  • export_path (str) – Path to which export file will be written. If set to None (the default), the +exported file will be written to self.data_dir.

  • +
+
+
+

The result file will be written to the path [self.data_dir]/[self.snapshot_dt]_pdr.txt, and contains the +following columns: +- Node ID +- Node type +- Degree ratio +- Number of infected edges (always patient-related) +- Total number of patient-related edges +- Total number of edges (i.e. degree of node_x)

+
+ +
+
+export_shortest_path_length_overview(focus_nodes=None, csv_sep=';', export_path=None)
+

Exports an overview of shortest path lengths in the network to self.data_dir.

+

Exports an overview of shortest path lengths of all nodes in the network (if focus_nodes is None, +default). If a list of node identifiers is provided in focus_nodes instead, only these nodes will be +considered for the data export. Data are exported to the self.data_dir directory.

+
+

Note

+

This overview only considers one possible shortest path between any given pair of nodes, not all paths.

+
+
+
Parameters
+
    +
  • focus_nodes (list or None) – List of nodes considered for distribution.

  • +
  • csv_sep (str) – Separator used in export file.

  • +
  • export_path (str) – Path to which node files will be written. If set to None (the default), the +exported file will be written to self.data_dir.

  • +
+
+
+
+ +
+
+export_total_degree_ratio(csv_sep=';', export_path=None)
+

Exports total degree ratio (TDR) for all nodes in the network.

+

Will calculate and export the total degree ratio for all nodes in the network, which is defined for a +single node_x as:

+

\(TDR = \frac{Number~of~infected~edges~between~node +\_x~and~patients}{Total~number~of~edges~leading~to~node\_x}\)

+

The result file will be written to a file [self.data_dir]/[self.snapshot_dt]_tdr.txt, and contains the +following columns:

+
    +
  • Node ID

  • +
  • Node type

  • +
  • Degree ratio

  • +
  • Number of infected edges (always patient-related)

  • +
  • Total number of edges for node_x (also includes non-patient-related edges)

  • +
+
+
Parameters
+
    +
  • csv_sep (str) – Separator for created csv file.

  • +
  • export_path (str) – Path to which node files will be written. If set to None (the default), the +exported file will be written to self.data_dir.

  • +
+
+
+
+ +
+
+identify_id(string_id)
+

Checks whether a node with string_id exists in the network.

+

Returns the type (e.g. ‘Patient’, ‘Employee’, etc.) of string_id in the network. If string_id does not exist +in the network, None is returned instead.

+
+
Parameters
+

string_id (str) – string identifier of the node to be identified.

+
+
Returns
+

The type of node of string_id, or None if string_id is not found in the network.

+
+
Return type
+

str or None

+
+
+
+ +
+
+identify_node(node_id, node_type)
+

Checks whether node_id is found in self.Nodes[node_type].

+

This function is more performant than identify_id(), since it already assumes that the node type of the string +to be identified is known.

+
+
Parameters
+
    +
  • node_id (str) – String identifier of the node to be identified.

  • +
  • node_type (str) – Type of node to be identified (e.g. ‘Patient’)

  • +
+
+
Returns
+

True if node_id is found in self.Nodes[node_type], False otherwise.

+
+
Return type
+

bool

+
+
+
+ +
+
+inspect_network()
+

Important inspect function for the graph.

+

An important function that will inspect all properties of the network and return diagnostic measures on the +“quality”. This includes:

+
    +
  • Total number of nodes in the network

  • +
  • Number of isolated nodes in the network

  • +
  • Number of nodes in the network of type:

    +
    +
      +
    • Patient

    • +
    • Device

    • +
    • Employee

    • +
    • Room

    • +
    +
    +
  • +
  • Total number of edges in the network

  • +
  • Number of edges in the network of type:

    +
    +
      +
    • Patient-Device

    • +
    • Patient-Room

    • +
    • Patient-Employee

    • +
    • Employee-Device

    • +
    • Employee-Room

    • +
    • Device-Room

    • +
    +
    +
  • +
  • Number of improperly formatted edges. These include:

    +
    +
      +
    • Edges for which at least one node is empty, i.e. “”

    • +
    • Edges for which any one of the from, to, type, origin, and infected +(if self.edge_infected == True) attributes are not present

    • +
    +
    +
  • +
+

All result statistics are printed to log.

+
+ +
+
+static load_from_json(path_to_file)
+

Loads the .json file specified in path_to_file.

+
+
Parameters
+

path_to_file (str) – Path to .json file to be loaded.

+
+
Returns
+

The object loaded from path_to_file.

+
+
+
+ +
+
+new_device_node(string_id, name, warn_log=False)
+

Add a device node to the network.

+

Automatically sets the ‘type’ attribute to “Device”. Note that if string_id is empty (‘’), no node will be +added to the network and a warning will be logged if warn_log is True.

+
+
Parameters
+
    +
  • string_id (str) – string identifier of device to be added.

  • +
  • name (str) – name of device.

  • +
  • warn_log (bool) – flag indicating whether or not to log warning messages.

  • +
+
+
+
+ +
+
+new_edge(source_id, source_type, target_id, target_type, att_dict, log_warning=False)
+

Adds a new edge to the network.

+

The added edge will link source_id of source_type to target_id of target_type. Note that the edge will ONLY be +added if both source_id and target_id are found in the self.Nodes attribute dictionary. In addition, all +key-value pairs in att_dict will be added to the newly created edge.

+
+
Parameters
+
    +
  • source_id (str) – String identifying the source node

  • +
  • source_type (str) – source_id type, which must be one of [‘Patient’, ‘Room’, ‘Device’, ‘Employee’]

  • +
  • target_id (str) – String identifying the target node

  • +
  • target_type (str) – target_id type, which must be one of [‘Patient’, ‘Room’, ‘Device’, ‘Employee’]

  • +
  • att_dict (dict) – dictionary containing attribute key-value pairs for the new edge.

  • +
  • log_warning (bool) – flag indicating whether or not to log a warning each time a faulty edge is encountered

  • +
+
+
+
+ +
+
+new_employee_node(string_id, warn_log=False)
+

Add an employee node to the network.

+

Automatically sets the ‘type’ attribute to “employee”. Note that if string_id is empty (‘’), no node will be +added to the network and a warning will be logged if warn_log is True.

+
+
Parameters
+
    +
  • string_id (str) – string identifier of employee to be added.

  • +
  • warn_log (bool) – flag indicating whether or not to log warning messages.

  • +
+
+
+
+ +
+
+new_generic_node(string_id, attribute_dict)
+

Adds a new generic node to the graph.

+

String_id will be used as the unique identifier, and all key-value pairs in attribute_dict as additional +information to add to the node. If a node already exists, only new entries in attribute_dict will be added to +it, but it will otherwise be left unchanged.

+
+
Parameters
+
    +
  • string_id (str) – string identifier for node

  • +
  • attribute_dict (dict) – dictionary of key-value pairs containing additional information

  • +
+
+
+
+ +
+
+new_patient_node(string_id, risk_dict, warn_log=False)
+

Add a patient node to the network.

+

Automatically sets the ‘type’ attribute to “Patient”. Also adds risk_dict to the ‘risk’ attribute as defined +for patient nodes. It will also add an attribute ‘vre_status’ (‘pos’ or ‘neg’) depending on whether or not code +32 is found in risk_dict. Note that if string_id is empty (‘’), no node will be added to the network and a +warning will be logged if warn_log is True.

+
+
Parameters
+
    +
  • string_id (str) – string identifier of patient to be added.

  • +
  • risk_dict (dict) – dictionary mapping dt.dt() to Risk() objects corresponding to a patient’s VRE screening +history.

  • +
  • warn_log (bool) – flag indicating whether or not to log warning messages.

  • +
+
+
+
+ +
+
+new_room_node(string_id, ward=None, room_id=None, warn_log=False)
+

Add a room node to the network.

+

Automatically sets the ‘type’ attribute to “Room” and ward to the “ward” attribute, and sets room_id to either +the specified value or “NULL”. Note that if string_id is empty (‘’), no node will be added to the network and a +warning will be logged if warn_log is True.

+
+
Parameters
+
    +
  • string_id (str) – string identifier of room to be added.

  • +
  • ward (str) – name of ward of this room

  • +
  • room_id (str) – room id (in string form) of this room

  • +
  • warn_log (bool) – flag indicating whether or not to log warning messages.

  • +
+
+
+
+ +
+
+static parse_filename(filename, replace_char='@')
+

Parses filename and replaces problematic characters with replace_char.

+
+
Parameters
+
    +
  • filename (str) – Name of file to parse.

  • +
  • replace_char (str) – Replacement problematic characters.

  • +
+
+
Returns
+

parsed (unproblematic) filename

+
+
Return type
+

str

+
+
+
+ +
+
+remove_isolated_nodes(silent=False)
+

Removes all isolated nodes from the network.

+

Isolated nodes are identified as having degree 0.

+
+
Parameters
+

silent (bool) – Flag indicating whether or not to log progress (defaults to False)

+
+
+
+ +
+
+static save_to_json(path_to_file, saved_object)
+

Saves object to path_to_file in JSON format.

+
+
Parameters
+
    +
  • path_to_file (str) – Path to file to be saved (must include .json suffix)

  • +
  • saved_object – Object to be saved to file in JSON format.

  • +
+
+
+
+ +
+
+trim_model(snapshot_dt)
+

Trims the current model.

+

Removes all edges for which the to attribute is larger than snapshot_dt, and updates the self.snapshot_dt +attribute. However, this function does NOT remove isolated nodes.

+
+
Parameters
+

snapshot_dt (dt.dt()) – dt.dt() object specifying to which timepoint the model should be trimmed

+
+
+
+ +
+
+update_edge_attributes(edge_tuple, attribute_dict)
+

Updates the edge identified in edge_tuple.

+

Add all key-value pairs in attribute_dict. Existing attributes will be overwritten.

+
+
Parameters
+
    +
  • edge_tuple (tuple) – Tuple of length 3 identifying the edge \(\longrightarrow\) (source_id, +target_id, key) (key is required to uniquely identify MultiGraph() edges)

  • +
  • attribute_dict (dict) – dictionary of key-value pairs with which to update the edge

  • +
+
+
+
+ +
+
+update_node_attributes(node_id, attribute_dict)
+

Updates the node identified in node_id.

+

The node will be updated with all key-value pairs in attribute_dict. Note that existing attributes will be +overwritten with the values in attribute_dict.

+
+
Parameters
+
    +
  • node_id (str) – string identifier for the node

  • +
  • attribute_dict (dict) – dictionary of key-value pairs with which the node will be updated

  • +
+
+
+
+ +
+
+update_shortest_path_statistics(focus_nodes=None, approximate=False, max_path_length=None)
+

Prerequisite function for calculating betweenness centrality.

+

Adds new attributes to all nodes in focus_nodes, where each attribute is a pair of nodes (sorted alphabetically) +with a SP- prefix to tuples of length 2 containing (shortest_paths_through_this_node, total_shortest_paths) +For example:

+

{ 'SP-Node1-Node2': (2, 5) , 'SP-Node1-Node3': (1, 8), ... } }

+
+

Note

+

This may result in a lot of additional attributes for nodes which are integral to the network. This ap-

+

proach is chosen because the networkx module does not allow updates to a dict-of-a-dict type +of attributes - i.e. if these attributes were to be combined in a 1st-level key ‘shortest-paths’, the entire +content would have to be copied every time a new node-pair attribute is added, which would make the function +extremely inefficient.

+
+

This is an important prerequisite function for the calculation of betweenness centrality.

+
+
Parameters
+
    +
  • focus_nodes (list) – list of node IDs. If set to None (the default), all nodes in the network will be +considered. WARNING: this may be extremely ressource-intensive !

  • +
  • approximate (bool) – Flag indicating whether to consider all shortest paths in the network +(False, default) or approximate the betweenness statistic using the +max_path_length argument. Note that if this is set to False, attributes of all +nodes will be written to file so as to avoid memory overflows. This requires a +preceding call to self.write_node_files().

  • +
  • max_path_length (int) – Maximum path length to consider for pairs of nodes when approximate == True. +If set to None (default), all possible shortest paths will be considered.

  • +
+
+
+
+ +
+
+write_node_files(attribute_subset=None, export_path=None)
+

Writes a JSON representation of all nodes in the network to file.

+

For each node in the network, this function will write a JSON representation containing a dictionary of +node_data_dict[keys], where keys are all entries in node_attributes if that key is actually found in the node +data dictionary. The file is named [node_id].json and written to the self.data_dir directory.

+

This function is mainly used to “hard-store” values for highly ressource intensive calculations such as +betweenness centrality and avoid memory overflows.

+
+

Note

+

This function will also set the self.node_files_written flag to True.

+
+
+
Parameters
+
    +
  • attribute_subset (list) – List of keys in a node’s attribute_dict to be included in the written JSON +representation of the node. If set to None (the default), all attributes will +be included.

  • +
  • export_path (str) – Path to which node files will be written. If set to None (the default), all +files will be written to self.data_dir.

  • +
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Docs/make.bat b/spitalhygiene/Docs/make.bat new file mode 100644 index 0000000..4d9eb83 --- /dev/null +++ b/spitalhygiene/Docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/spitalhygiene/Docs/source/Patient_Test_Data.rst b/spitalhygiene/Docs/source/Patient_Test_Data.rst new file mode 100644 index 0000000..1bb2794 --- /dev/null +++ b/spitalhygiene/Docs/source/Patient_Test_Data.rst @@ -0,0 +1,258 @@ +******************** +Patient Test Data +******************** + +The test patient dataset for the VRE model consists of a small subset of the complete dataset. The SQL queries used to +extract the test patient data are `identical` to those used for the true dataset, with the exception that data are +restricted to a small subset in the form of a simple ``WHERE`` statement. This filter includes the following 3 patient +IDs: + +- 00003067149 +- 00008301433 +- 00004348346 + +Associated with these patients are the following 40 case IDs: + +=========== =========== +Patient ID Case ID +=========== =========== +00003067149 0006280483 +00003067149 0006314210 +00003067149 0006336438 +00003067149 0005889802 +00003067149 0005873082 +00003067149 0006065973 +00003067149 0006091736 +00003067149 0006148746 +00003067149 0006334066 +00003067149 0006059391 +00003067149 0005976205 +00003067149 0006057834 +00003067149 0005983693 +00003067149 0006520444 +00003067149 0006931777 +00003067149 0006812114 +00003067149 0005965462 +00003067149 0006452545 +00003067149 0006433446 +00003067149 0006466165 +00004348346 0001927876 +00004348346 0004555507 +00004348346 0004728517 +00004348346 0001928507 +00004348346 0002802610 +00004348346 0004204668 +00004348346 0004181978 +00003067149 0006951942 +00003067149 0005880782 +00008301433 0002289902 +00008301433 0004411153 +00008301433 0004411005 +00008301433 0006565152 +00008301433 0003962974 +00008301433 0006594482 +00008301433 0006596375 +00008301433 0006551728 +00003067149 0005864325 +00003067149 0005877026 +00003067149 0006069476 +=========== =========== + +For partners, only a single entry associated to one of the three test patients is used: + +- 0010000990 + +For CHOP codes, the following sub-selection of codes will be imported: + +- Z99.B8.11 +- Z50.27.32 +- Z00.99.60 +- Z88.38.60 +- Z99.85 +- Z89.07.24 +- Z50.27.32 +- Z00.99.60 +- Z88.38.60 +- Z99.85 +- Z50.23.13 +- Z50.12.09 +- Z88.79.50 +- Z00.9A.13 +- Z39.32.41 +- Z50.93 +- Z34.84 +- Z34.89.99 +- Z39.29.89 +- Z50.52 +- Z00.93.99 +- Z00.90.99 +- Z00.99.10 +- Z50.27.32 +- Z00.99.60 +- Z88.38.60 +- Z99.85 +- Z99.04.10 +- Z94.8X.40 +- Z99.B7.12 +- Z99.07.3C +- Z99.04.15 +- Z99.05.47 +- Z99.B7.13 +- Z99.0A +- Z99.28.11 +- Z50.52 +- Z54.52 +- Z00.93.99 +- Z00.90.99 +- Z51.22.11 +- Z39.29.89 +- Z99.00 +- Z54.12.11 +- Z50.12.12 +- Z88.79.50 +- Z54.25 +- Z36.11.22 +- Z36.11.26 +- Z36.1C.12 +- Z39.61.10 +- Z39.63 +- Z39.64 +- Z88.79.50 +- Z01.16.12 +- Z99.00 + +For appointments (german: "Termine"), only the following 98 TerminIDs are used: + +- 38515699 +- 38321122 +- 35416924 +- 1164130 +- 38470639 +- 41827160 +- 39893063 +- 38411180 +- 35571391 +- 35130813 +- 36160483 +- 40766840 +- 42155710 +- 39491988 +- 36067632 +- 37374631 +- 36129549 +- 39001478 +- 39425469 +- 34338471 +- 35630084 +- 35139096 +- 38431954 +- 38452040 +- 40344805 +- 13831398 +- 38063644 +- 38539785 +- 34220024 +- 39819467 +- 39423020 +- 38386995 +- 42394432 +- 38446243 +- 42213628 +- 38565198 +- 39893320 +- 37244357 +- 37554138 +- 41124954 +- 39051017 +- 36129560 +- 35621237 +- 38772701 +- 21130116 +- 38063650 +- 39608858 +- 39427731 +- 21131159 +- 38331618 +- 38062724 +- 24171386 +- 14908956 +- 41909560 +- 39114133 +- 14091256 +- 38939623 +- 35626775 +- 35139491 +- 36006751 +- 38329080 +- 41909690 +- 35130747 +- 36129541 +- 1278803 +- 38507433 +- 1192059 +- 39456191 +- 14091249 +- 39933520 +- 24291359 +- 36071093 +- 36160474 +- 19096210 +- 40218521 +- 1162144 +- 38660148 +- 42211133 +- 39613790 +- 24230235 +- 38262758 +- 35417252 +- 19252406 +- 39215737 +- 38446041 +- 36830543 +- 35200182 +- 40766156 +- 36070942 +- 34310589 +- 37232112 +- 34337667 +- 38446523 +- 34482529 +- 17297480 +- 39298995 +- 36830574 +- 1405150 + +And finally for devices, the subset is restricted to the following GeraetIDs: + +- 134074 +- 125922 +- 137160 +- 125916 +- 125981 +- 125981 +- 64174 +- 125921 +- 125981 +- 125981 +- 125981 +- 125981 +- 125981 +- 28609 +- 86293 +- 125981 +- 125981 +- 125981 +- 86293 +- 125981 +- 125981 +- 64174 +- 125981 +- 125981 +- 125981 +- 125981 +- 125981 +- 125974 +- 28609 +- 125981 + +------ diff --git a/spitalhygiene/Docs/source/Unused.rst b/spitalhygiene/Docs/source/Unused.rst new file mode 100644 index 0000000..bf77846 --- /dev/null +++ b/spitalhygiene/Docs/source/Unused.rst @@ -0,0 +1,6 @@ +************************ +``Unused`` folder +************************ + +This folder contains archived files and scripts no longer used in the current version of the VRE model. + diff --git a/spitalhygiene/Docs/source/VRE_Model.rst b/spitalhygiene/Docs/source/VRE_Model.rst new file mode 100644 index 0000000..84f8a6b --- /dev/null +++ b/spitalhygiene/Docs/source/VRE_Model.rst @@ -0,0 +1,11 @@ +******************** +VRE Model Overview +******************** + +Welcome to the VRE Model Documentation! + +The VRE model is documented with `Sphinx` on a `per-folder` basis. The root folder for this documentation is the +``spitalhygiene`` folder of the ``vre-data-science`` repository on the Insel GitHub. + + + diff --git a/spitalhygiene/Docs/source/conf.py b/spitalhygiene/Docs/source/conf.py new file mode 100644 index 0000000..d0d1dbf --- /dev/null +++ b/spitalhygiene/Docs/source/conf.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('../../resources')) +sys.path.insert(0, os.path.abspath('../../vre')) +sys.path.insert(0, os.path.abspath('../../vre/src/main/python/vre')) +sys.path.insert(0, os.path.abspath('../../vre/src/main/python/vre/model')) + + +# -- Project information ----------------------------------------------------- + +project = 'VRE Model' +copyright = '2019, SZ' +author = 'SZ' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = '0.0.1' + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.githubpages', + 'sphinxcontrib.napoleon', +] + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_init_with_doc = False +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = False +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True +napoleon_use_keyword = True +napoleon_custom_sections = None + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'VREModeldoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'VREModel.tex', 'VRE Model Documentation', + 'SZ', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'vremodel', 'VRE Model Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'VREModel', 'VRE Model Documentation', + author, 'VREModel', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True diff --git a/spitalhygiene/Docs/source/index.rst b/spitalhygiene/Docs/source/index.rst new file mode 100644 index 0000000..fa32ed8 --- /dev/null +++ b/spitalhygiene/Docs/source/index.rst @@ -0,0 +1,21 @@ +.. VRE Model documentation master file, created by + sphinx-quickstart on Tue Apr 23 15:53:29 2019. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to the VRE Model documentation! +======================================= + +.. toctree:: + :caption: Contents: + + VRE_Model.rst + resources.rst + sql.rst + Unused.rst + vre.rst + model.rst + Patient_Test_Data.rst + + + diff --git a/spitalhygiene/Docs/source/model.rst b/spitalhygiene/Docs/source/model.rst new file mode 100644 index 0000000..0d52e1f --- /dev/null +++ b/spitalhygiene/Docs/source/model.rst @@ -0,0 +1,65 @@ +***************************************** +``vre/src/main/python/vre/model`` folder +***************************************** + +This folder contains the definitions for all classes used in the VRE model. + + +----- + +File: Appointment.py +=============================== + +.. automodule:: Appointment + :members: + + +File: Bed.py +=============================== + +.. automodule:: Bed + :members: + + +File: Care.py +=============================== + +.. automodule:: Care + :members: + + +File: Chop.py +=============================== + +.. automodule:: Chop + :members: + + +File: Devices.py +=============================== + +.. automodule:: Device + :members: + + +File: Employee.py +=============================== + +.. automodule:: Employee + :members: + + +File: ICD.py +=============================== + +.. automodule:: ICD + :members: + + +File: Medication.py +=============================== + +.. automodule:: Medication + :members: + + diff --git a/spitalhygiene/Docs/source/resources.rst b/spitalhygiene/Docs/source/resources.rst new file mode 100644 index 0000000..a0a0813 --- /dev/null +++ b/spitalhygiene/Docs/source/resources.rst @@ -0,0 +1,28 @@ +************************ +``resources`` folder +************************ + +This folder contains important functions for loading data from SQL into CSV, thereby preparing the "raw" data used for +building the actual network models. + +Most importantly, this folder also contains the file ``Update_Model.sh``, which is a bash script controlling `all steps` +in the VRE calculation. Since all VRE data are recalculated once per day, this includes (in order): + +1) Backing up data from the previous calculation cycle in the HDFS file system (from step 4 in the previous run) +2) Reloading data from SQL into CSV +3) Running the VRE analysis (all steps of the analysis are controlled with ``feature_extractor.py``) +4) Adding new data (SQL files, CSV files, other data) to the HDFS file system + + +File: Query_Atelier_Data.py +=============================== + +.. automodule:: Query_Atelier_Data + :members: + + +File: preprocesor.py +=============================== + +.. automodule:: preprocessor + :members: diff --git a/spitalhygiene/Docs/source/sql.rst b/spitalhygiene/Docs/source/sql.rst new file mode 100644 index 0000000..4ccc191 --- /dev/null +++ b/spitalhygiene/Docs/source/sql.rst @@ -0,0 +1,6 @@ +************************ +``sql`` folder +************************ + +This folder contains important functions for extracting SQL data. + diff --git a/spitalhygiene/Docs/source/vre.rst b/spitalhygiene/Docs/source/vre.rst new file mode 100644 index 0000000..15213bf --- /dev/null +++ b/spitalhygiene/Docs/source/vre.rst @@ -0,0 +1,36 @@ +***************************************** +``vre/src/main/python/vre`` folder +***************************************** + +This folder contains all files relevant for building the actual VRE model. + + +----- + +File: data_compiler.py +=============================== + +.. automodule:: data_compiler + :members: + + +File: feature_extractor.py +=============================== + +.. automodule:: feature_extractor + :members: + + +File: HDFS_data_loader.py +=============================== + +.. automodule:: HDFS_data_loader + :members: + + +File: networkx_graph.py +=============================== + +.. automodule:: networkx_graph + :members: + diff --git a/spitalhygiene/README.md b/spitalhygiene/README.md new file mode 100644 index 0000000..0fe7988 --- /dev/null +++ b/spitalhygiene/README.md @@ -0,0 +1,27 @@ +# Directory Overview + +Folder `vre` + +Contains all functions relevant for data processing, model creation, feature vector export, and testing. + +----- + +Folder `sql`: + +Contains 3 folders: + - `tests` → Contains SQL queries yielding the test dataset + - `vre` → Contains SQL queries yielding the complete model dataset + - `quality` → Contains SQL queries yielding data used for quality assessment + +----- + +Folder `resources`: + +Contains various output files from the model and the `update_db.*` scripts, which control the querying and export of data from Atelier_DataScience to CSV. + +----- + +Folder `Docs`: + +Contains the Sphinx documentation for the entire VRE model code. + diff --git a/spitalhygiene/Unused/AM aus PDMS_VRE_2018-08-18_fg.xlsx b/spitalhygiene/Unused/AM aus PDMS_VRE_2018-08-18_fg.xlsx new file mode 100644 index 0000000..65e9aa8 Binary files /dev/null and b/spitalhygiene/Unused/AM aus PDMS_VRE_2018-08-18_fg.xlsx differ diff --git a/spitalhygiene/Unused/BWTYP-BWART.csv b/spitalhygiene/Unused/BWTYP-BWART.csv new file mode 100644 index 0000000..a7c5efe --- /dev/null +++ b/spitalhygiene/Unused/BWTYP-BWART.csv @@ -0,0 +1,72 @@ +BWART,Text +4-BE,Besuch Krankh. +4-BG,Besuch Gutacht. +4-BI,B. ex. Institut +4-BS,Besuch Schwang. +4-BU,Besuch Unfall +4-BV,Besuch Vorsorge +4-BW,Besuch Wissen. +4-GB,Besuch GG +4-GK,B.ex.KH GG +4-KG,B.ex.KH Gutach. +4-KK,B.ex.KH Krankh. +4-KS,B.ex.KH Schw. +4-KU,B.ex.KH Unfall +4-KV,Kostenvoransch. +4-KW,B.ex.KH Wissen. +4-SB,stat. Besuch +4-VM,Vers.-Mutation +1-AE,Aufn. Entbind. +1-AG,Aufn. Gutachten +1-AK,Aufn. Krankheit +1-AN,Aufn. Neugeb. +1-AO,Aufn.Organentn. +1-AS,Aufn.Muttersch +1-AT,Totgeburt +1-AU,Aufn. Unfall +1-AW,Aufn. Wissensch +1-AX,Aufn. unklar +1-EE,Ex.KH Enbind. +1-EG,Ex.KH Gutachten +1-EI,Ex. Institut +1-EK,Ex.KH Krankheit +1-EN,Ex.KH Neugeb. +1-ES,Ex.KH Muttersch +1-EU,Ex.KH Unfall +1-EW,Ex.KH Wissen. +1-EX,Ex.KH unklar +1-GA,Aufnahme GG +1-GE,Ex.KH GG +1-GN,Neugeborenes GG +1-NA,Neugeb. <28T +1-WA,Wartepat. Aufn. +3-KA,Klassen+Abt.-W. +3-KE,Klassen+Einh.-W +3-KF,Klassen+Fachr.W +3-KZ,Klassen+ZimmerW +3-WA,Abteil.-Wechsel +3-WB,Bettenwechsel +3-WE,Einh.-Wechsel +3-WF,Fachr.-Wechsel +3-WK,Klass.-Wechsel +3-WS,Stufenwechsel +3-WZ,Zimmerwechsel +6-BA,Behandlung Akut +6-BK,Behandlung ExKh +6-RA,Rehosp aus +6-UB,Urlaub-Beginn +6-UO,Ohne Erlaubnis +7-EA,Ende Akutbehand +7-EK,Ende ExBehandl. +7-RE,Rehosp ein +7-UE,Urlaub-Ende +2-EA,Ambulanz ex.KH +2-EB,Entl. Tarifbere +2-EI,Entl.ext.Instit +2-EK,Entl. in ex.KH +2-EN,Entlassung +2-EO,Entl.Organentn. +2-ET,Totgeburt +2-EV,Verstorben +2-EW,EntLeigner Wun +2-WE,Wartepat. Entl. \ No newline at end of file diff --git a/spitalhygiene/Unused/PDMS_ATC.csv b/spitalhygiene/Unused/PDMS_ATC.csv new file mode 100644 index 0000000..d5bfc4d --- /dev/null +++ b/spitalhygiene/Unused/PDMS_ATC.csv @@ -0,0 +1 @@ +pharmaID,PharmaName,Mat.Nr.,Pharmacode,ATC 1001253, NaCl 0.9% Perf.,,#N/A,#N/A 1000358,Acidum Folicum Tbl,10043243,2107469,R05X 1001112,Actemra Inf Lsg 200mg/10ml,10045552,2107469,R05X 1001113,Actemra Inf Lsg 400mg/m20ml,10045553,2107469,R05X 1001114,Actemra Inf Lsg 80mg/4ml,10045554,2107469,R05X 1001115,Actemra Inj Lsg 162mg/0.9 ml,,#N/A,#N/A 1000840,Actilyse Amp 20mg Lsg.,10058684,1014062,N05CD06 1000823,Actilyse Amp.50mg Lsg,10028877,1057433,A01AD02 1000263,Actilyse Lsg,,#N/A,#N/A 1000457,Adalat CR Retardtbl 30 mg,10026080,9000173,L01XD04 1000876,Adalat retard Tbl 20 mg,10027662,1057433,A01AD02 1001069,Addaven,10079435,,J01XX08 1000750,Adrenalin 100 g/ml Bolus,10056070,2107469,R05X 71,Adrenaline 1mg/ml,10055448,2107469,R05X 56,Adrenaline Perfusor,,#N/A,#N/A 1001179,Adriblastin Inf Lsg,10027448,1057433,A01AD02 1001165,Advate TS 1000 E Inj Lsg,10085047,,J01XX08 1001213,Advate TS 1500 E Inj Lsg,10085043,,J01XX08 1001166,Advate TS 2000 U,10085044,,J01XX08 1001190,Advate TS 250 E Inj Lsg,10085045,,J01XX08 1001191,Advate TS 500 E Inj Lsg,10085048,,J01XX08 1001102,Aggrastat Inf Lsg,10028703,1057433,A01AD02 1000285,Akineton Inj Lsg 5 mg/ml,10028417,1057433,A01AD02 1000999,Albumin 20 %,10067721,9000089,J04AB02 1000054,Albumin 20%,10067721,9000089,J04AB02 1001000,Albumin 5 %,10067720,9000089,J04AB02 1001007,Albumin 5 % 500ml,10067720,9000089,J04AB02 1000541,Albumin 5%,10067720,9000089,J04AB02 1000505,Aldactone Filmtbl 100 mg,10030858,2107469,R05X 1000506,Aldactone Filmtbl 25 mg,10030857,2107469,R05X 1000877,Aldactone Filmtbl 50,10030822,2107469,R05X 1001103,Alimta Inf Lsg 100 mg,10047984,2107469,R05X 1001107,Alimta Inf Lsg 500 mg,10000494,9000173,L01XD04 1000548,Alucol Gel,10030439,1057433,A01AD02 1000669,AmBisome Stechamp 50mg,10028953,1057433,A01AD02 1000267,Aminophyllin 240 mg,10026177,9000173,L01XD04 1000719,Amlodipin 5mg Tbl.,10060451,9000089,J04AB02 1000873,Amlodipin Tbl 10 mg,10058597,1014062,N05CD06 1000872,Amlodipin Tbl 5 mg,10060451,9000089,J04AB02 291,Amoxicillin tabl 1 mg,10047014,2107469,R05X 1000878,Ampho-Moronal Lutschtabletten,10072239,9000089,J04AB02 1000622,Ampho-Moronal Susp 10%,10000510,9000173,L01XD04 1000767,Andere nicht aufgel. Medis Tropfenform,,#N/A,#N/A 1000609,Andere nicht aufgelistete Medikamente,,#N/A,#N/A 1000784,Andere nicht aufgelistete Perfusoren,,#N/A,#N/A 1001241,Andere nicht aufgelistete Perfusoren 1,,#N/A,#N/A 1000820,Andere nicht augelistete Medi (IE),,#N/A,#N/A 1000356,Anexate Amp,10085804,,J01XX08 1001092,Angina Lutschtabl.,10025884,9000173,L01XD04 1000842,Angiox Trockensubstanz,10030302,1057433,A01AD02 1000788,Anticholium Inj Lsg,10053394,2107469,R05X 23,Antithrombin inf 50 U/ml,,#N/A,#N/A 1000465,Antra Infusionslsung 40 mg,10027481,1057433,A01AD02 6,Antra inj 40 mg,10027481,1057433,A01AD02 1000602,Aqua ad injectabila 100ml Amp.,10000438,9000173,L01XD04 1000824,Aqua ad injectabila 10ml Amp.,10055470,2107469,R05X 1000853,Aqua Dest Spl Lsg 1000ml,10080381,,J01XX08 1000841,Argatra Inf Lsg,10083236,,J01XX08 1000642,Arginin Chlorid Amp 20 ml,,#N/A,#N/A 1000253,Aspgic Inj Lsg 500 mg/5ml,10030533,1057433,A01AD02 1000255,Aspirin cardio Tbl 300 mg,10054986,2107469,R05X 1000256,Aspirin Tbl 100 mg,10054986,2107469,R05X 1000257,Aspirin Tbl 500 mg,10027954,1057433,A01AD02 1000276,ATG Fresenius Inf Lsg,10024527,9000173,L01XD04 1000879,Atorvastatin Tbl 10 mg,10060446,9000089,J04AB02 1000871,Atorvastatin Tbl 20 mg,10060447,9000089,J04AB02 1000880,Atorvastatin Tbl 40 mg,10060448,9000089,J04AB02 1000881,Atorvastatin Tbl 80 mg,10060449,9000089,J04AB02 1001106,Atriance Inf Lsg,10036601,2107469,R05X 11,Atropin Sulfat Amp,10094221,,J01XX08 1000388,Atrovent Inhal Lsg,10026802,9000173,L01XD04 1000389,Atrovent N Dosieraerosol 20ug,10028847,1057433,A01AD02 1000812,Augmentin 2.2 Inf Lsg,10088383,,J01XX08 1000781,Augmentin AD Tbl 1 g,10079780,,J01XX08 1000274,Augmentin Inj Lsg.,,#N/A,#N/A 1000272,Augmentin Tabl 625 mg,10079779,,J01XX08 1000854,Avalox Filmtbl 400 mg,10027981,1057433,A01AD02 1000855,Avalox Inf Lsg 400 mg,10062046,9000089,J04AB02 1001105,Avastin Inf Lsg 100 mg/4ml,10000492,9000173,L01XD04 1001137,Avastin Inf Lsg 400 mg/16 ml,10000493,9000173,L01XD04 1000713,Axotide Dosieraerosol 125ug,10027189,9000173,L01XD04 1001193,Bactrim Amp 400/80 mg Inf Lsg,10027968,1057433,A01AD02 1001198,Bactrim Amp. 400/80 mg Inf Lsg,10027968,1057433,A01AD02 1000507,Bactrim forte Lacktbl,10053393,2107469,R05X 1000508,Bactrim Inf Lsg,10027968,1057433,A01AD02 1000643,Bactroban Nasensalbe,10026436,9000173,L01XD04 1000803,Baldrian Tinktur,10055923,2107469,R05X 1000647,Baldrian Trpf.,10055923,2107469,R05X 1000557,Basis G5-K,,#N/A,#N/A 1000766,BasisG5-K+20KCL,,#N/A,#N/A 1000740,Becotal Inj Lsg,a.H.,#N/A,#N/A 1000538,Becozym forte Drag,10027562,1057433,A01AD02 1000758,Beloc Perfusor,10035256,2107469,R05X 1000440,Beloc zok ret Tbl 25 mg,10092351,,J01XX08 1000439,Beloc zok ret Tbl 50 mg,10092352,,J01XX08 1000438,Beloc zok ret. Tbl 100 mg,10092350,,J01XX08 1001143,Benefix TS 1000 U,10055888,2107469,R05X 1001144,Benefix TS 500 U,10055889,2107469,R05X 1000516,Benerva Inj Lsg,10028006,1057433,A01AD02 1000882,Benerva Tbl 100 mg,10027749,1057433,A01AD02 1000515,Benerva Tbl 300 mg,10027566,1057433,A01AD02 225,Benuron supp 1000 mg,10024765,9000173,L01XD04 1000686,Bepanthen Amp 500mg/2ml,10065251,9000089,J04AB02 1000326,Bepanthen Inj Lsg 500 mg,10065251,9000089,J04AB02 1001184,Beriate TS 1000 E Inj lsg,10064265,9000089,J04AB02 1001183,Beriate TS 500 E Inj Lsg,10064264,9000089,J04AB02 1000289,Berinert HS 500E,10073573,,J01XX08 1001186,Berinin P TS 1200 E Inj Lsg,10055890,2107469,R05X 1001185,Berinin P TS 600 E Inj Lsg,10055891,2107469,R05X 1001187,Beriplex P/N 500 TS Inj Lsg,10085046,,J01XX08 146,Betamethasone inj 4 mg/ml,,#N/A,#N/A 1000721,Bilol 5 mg Tbl.,10028167,1057433,A01AD02 1001244,Bivalirudin Accord TS 250mg,10092659,,J01XX08 1000556,Breivik-Periduralinfusion,,#N/A,#N/A 1000839,Brilique Tbl 90 mg,10058049,2107469,R05X 1000883,Brufen Filmtbl. 200 mg,10073545,,J01XX08 1000884,Brufen Filmtbl. 400 mg,10025818,9000173,L01XD04 1000885,Brufen Filmtbl. 600 mg,10025820,9000173,L01XD04 1000367,Bulboid Supp,10030441,1057433,A01AD02 1000704,Bupivacain 0.125%,10072780,,J01XX08 1000886,Buscopan Drag. 10 mg,10044883,2107469,R05X 1000500,Buscopan Inj Lsg 20 mg/ml,10045380,2107469,R05X 1000501,Buscopan Supp 10 mg,10030916,2107469,R05X 1000290,Ca-Acetat-Phosphatbinder 400 mg,10025933,9000173,L01XD04 1000888,Calcimagon D3 Tbl.,10090180,,J01XX08 1000889,Calciparine 12500 E/ 0.5ml,10047941,2107469,R05X 1000597,Calciparine 5000 U/ 0.2 ml,10025394,9000173,L01XD04 1000291,Calcium Sandoz FF Brausetabl 1000 mg,10059697,1014062,N05CD06 1000891,Calcium Sandoz FF BrauseTbl.,10059697,1014062,N05CD06 1000292,Calcium Sandoz Lsg 10% 10 ml,,#N/A,#N/A 1000687,Calciumchlorid 5% reg.AK,10025301,9000173,L01XD04 1001125,Calciumgluconat Braun Lsg 10% 10 ml,10042472,2107469,R05X 1000618,Cancidas Amp 50 mg,10028704,1057433,A01AD02 1000617,Cancidas Amp 70 mg,10028705,1057433,A01AD02 1000582,"CAPD 2, 1.5% Glucose",10060313,9000089,J04AB02 1000749,"CAPD, 2.3% Glucose",10060314,9000089,J04AB02 1001076,"CAPD, 4.25% Glucose",10060370,9000089,J04AB02 1001077,"CAPD, Extraneal Icodextrin",10025341,9000173,L01XD04 1000890,Ca--Phosphatbinder,10073601,,J01XX08 292,Captopril tabl 1 mg,,#N/A,#N/A 1000813,Captopril Tabl 25 mg,10027134,9000173,L01XD04 460,Carbon granulae,,#N/A,#N/A 1000288,Carbostesin Inj Lsg 0.25% Amp,10031148,2107469,R05X 1001172,Carbovit Susp 15 g/100ml,10056129,2107469,R05X 1000334,Cardura CR Tbl 4 mg,10030237,1057433,A01AD02 1001082,Carvedilol Tabl 12.5 mg,10072946,,J01XX08 1000728,Catapresan 25g/ml Perfusor,10030610,1057433,A01AD02 410,Catapresan inj 150 g/ml,10030610,1057433,A01AD02 380,Catapresan Tabl 150 mikrogr,10036010,2107469,R05X 1000825,Cefepime Orpha 2g Amp.,10042576,2107469,R05X 1000300,Cefepime Sandoz 1g Amp.,10042576,2107469,R05X 1001220,Cefuroxim Inj Lsg 1500 mg,10084277,,J01XX08 1000307,Celebrex Kps 100 mg,10027575,1057433,A01AD02 1000892,CellCept Amp 500 mg,10027434,1057433,A01AD02 1000786,CellCept Kps 250 mg,10086940,,J01XX08 1000730,CellCept Susp,10028531,1057433,A01AD02 1000448,CellCept Tbl 500 mg,10086941,,J01XX08 1000666,Cepimex Amp 1g,,#N/A,#N/A 1001035,Certican Tbl 0.25 mg,10086941,,J01XX08 1001034,Certican Tbl 0.5 mg,10029803,1057433,A01AD02 1001033,Certican Tbl 0.75mg,10029804,1057433,A01AD02 1001032,Certican Tbl 1mg,10042815,2107469,R05X 1000680,Chemotherapie,,#N/A,#N/A 1000378,Chinin 3.25% Amp,,#N/A,#N/A 1000626,Chinin Drg,10046473,2107469,R05X 1000998,Chinin HCl Inj Lsg,10062104,9000089,J04AB02 1001110,Cimzia Inj Lsg,10062104,9000089,J04AB02 1001256,Cipralex Tabl 10 mg,10030069,1057433,A01AD02 176,Ciproxin Infusion 200 mg,10025663,9000173,L01XD04 1000894,Ciproxin Infusion 400 mg,10027277,9000173,L01XD04 1000315,Ciproxin Tbl 500 mg,10035408,2107469,R05X 1000893,Ciproxin Tbl 500 mg,10035408,2107469,R05X 1001002,Citalopram Tbl 20mg,10029172,1057433,A01AD02 1000592,Citra-Lock Amp 5 ml,10057830,2107469,R05X 1000273,Clamoxyl Inj Lsg 1g,10028759,1057433,A01AD02 1000601,Clamoxyl Inj Lsg 2g,10073608,,J01XX08 1001177,Clazosentan Reverse Studie,,#N/A,#N/A 1001176,Clazosentan Reverse-Studie,,#N/A,#N/A 1000759,Clazosentan/Placebo,,#N/A,#N/A 1000863,Clexane 0.2 ml s/c,10072182,9000089,J04AB02 1000864,Clexane 0.4 ml s/c,10072183,9000089,J04AB02 1000895,Clexane 0.6 ml s/c,10041028,2107469,R05X 1000865,Clexane 0.8 ml s/c,10041030,2107469,R05X 1001016,Clexane 1.0 ml s/c,10041032,2107469,R05X 1001170,Clindamycin Kps 300 mg,10066660,9000089,J04AB02 1001168,Clindamycin Phosphat 300 mg/2ml,10066662,9000089,J04AB02 1001169,Clindamycin Phosphat 600 mg/4ml,10066663,9000089,J04AB02 1000641,Clyssie Klistier,10028800,1057433,A01AD02 1000896,Co Dafalgan Tabl 500 mg,10028800,1057433,A01AD02 1001097,Co-Amoxi 1.2 g Inj Lsg,10067731,9000089,J04AB02 1001098,Co-Amoxi 2.2g Inf Lsg,10088383,,J01XX08 1001096,Co-Amoxi Tbl 1g,10079780,,J01XX08 1001095,Co-Amoxi Tbl 625 mg,10079779,,J01XX08 1000720,CoAprovel,,#N/A,#N/A 1000578,Cocain Chlorid Blau 5%,,#N/A,#N/A 1001005,Colidimin Tbl 200 mg,10060132,9000089,J04AB02 1000589,Coliquifilm Augensalbe,,#N/A,#N/A 1001233,Colistin 1 Mio E,10053345,2107469,R05X 1001234,Colistin 1 Mio IE Inf Lsg,10053345,2107469,R05X 1000897,Colosan Mite,10025751,9000173,L01XD04 1000898,Concor Tbl 5 mg,10029025,1057433,A01AD02 431,Cordarone inj 50 mg/ml,10030835,2107469,R05X 1000268,Cordarone Tbl 200 mg,10028700,1057433,A01AD02 1000441,Corotrop,10026338,9000173,L01XD04 1000385,Corvert Inf Lsg,a.H.,#N/A,#N/A 1000419,Cosaar Tbl 50 mg,10031336,2107469,R05X 1000470,Creon forte Kps,10034462,2107469,R05X 1000899,Creon Kps,10088784,,J01XX08 1001167,Crestor Filmtbl 20 mg,10042469,2107469,R05X 1001222,Cubicin Inf Lsg,10042865,2107469,R05X 1001010,Cyklokapron Infusion,10054463,2107469,R05X 27,Cyklokapron inj 100 mg/ml,10054463,2107469,R05X 1000362,Cymevene Amp 500 mg,10000434,9000173,L01XD04 1001181,Cytosar Inf Lsg,10000477,9000173,L01XD04 1001022,Cytotect 2500 IE,,#N/A,J06BB09 1001020,Cytotect 500 IE,,#N/A,J06BB09 1001021,Cytotect CP 1000 E/10ml,10064963,9000089,J04AB02 1000471,Dafalgan Brausetbl 1g,10027835,1057433,A01AD02 1000472,Dafalgan Brausetbl 500 mg,10025744,9000173,L01XD04 1000900,Dafalgan ODIS Schmelz Tabl 500 mg,10067010,9000089,J04AB02 1000473,Dafalgan Supp 600 mg,10026265,9000173,L01XD04 1000683,Dafalgan Tabl 1g,10057234,2107469,R05X 275,Dafalgan tabl 500 mg,10025743,9000173,L01XD04 1000829,Dalacin C 600 Phosphat Amp.,10066663,9000089,J04AB02 1000320,Dalacin C Kps 300 mg,10066660,9000089,J04AB02 1000321,Dalacin C Phosphat Inj Lsg 300 mg,10066662,9000089,J04AB02 1000322,Dalacin Phosphat Inj Lsg 600 mg,10066663,9000089,J04AB02 1000696,DAM,,#N/A,#N/A 1001182,Daunoblastin Inf Lsg,10057545,2107469,R05X 1000651,Deltadex 40,,#N/A,#N/A 1000652,Deltadex 40,,#N/A,#N/A 1000644,Dequonal Gurgellsung,10031089,2107469,R05X 1000474,Deroxat Tbl 20 mg,10026437,9000173,L01XD04 1000667,Dex/Placebo,,#N/A,#N/A 1000668,Dex/Propofol+Placebo,,#N/A,#N/A 1000847,Dexdor Inf. Lsg,10062085,9000089,J04AB02 1001260,Diabetes Qualitativ,,#N/A,#N/A 1000198,Dialysis sol 13.6 mg/ml,,#N/A,#N/A 1000199,Dialysis sol 22.7 mg/ml,,#N/A,#N/A 1000200,Dialysis sol 38.6 mg/ml,,#N/A,#N/A 438,Diamox Amp,10025622,9000173,L01XD04 1000254,Diamox Tbl 250 mg,10030096,1057433,A01AD02 246,Diazepam tabl 5 mg,10027653,1057433,A01AD02 1001081,Dibenzyran Kps 10 mg,10058410,1014062,N05CD06 1000673,Dicodid Inj Lsg 15mg/ml,a.H.,#N/A,#N/A 1000355,Diflucan Inf Lsg 200 mg,10025870,9000173,L01XD04 1000353,Diflucan Kps 200 mg,10025868,9000173,L01XD04 1000354,Diflucan Kps 50 mg,10025867,9000173,L01XD04 1000675,Diflucan Susp Forte,10026587,9000173,L01XD04 1,Digoxin inj 0.25 mg/ml,10030699,68386,R05X 1000901,Digoxin Tabl 0.125 mg,10031062,2107469,R05X 348,Digoxin Tabl 0.25 mg,10030358,1057433,A01AD02 63,Dihydralazine inf 0.2 mg/ml,,#N/A,#N/A 1000297,Dilatrend Tabl 12.5 mg,10026821,9000173,L01XD04 1000298,Dilatrend Tabl 6.25 mg,10027390,1057433,A01AD02 1001071,Diltiazem Tbl 60 mg,10059044,1014062,N05CD06 1000331,Dilzem retard Filmtbl 120 mg,10025892,9000173,L01XD04 1000482,Dipiperon Tbl 40 mg,10025892,9000173,L01XD04 1000491,Disoprivan 1%,10087364,,J01XX08 208,Disoprivan 2% 20 mg/ml,10087365,,J01XX08 1000699,Disoprivan 2% BOLUS,10087365,,J01XX08 1000323,Distraneurin Mixt 500 mg/10 ml,10030975,2107469,R05X 1000332,Ditanrix 0.5 ml,a.H.,#N/A,#N/A 1000468,Ditropan Tbl 5mg,10025676,9000173,L01XD04 426,Dobutrex,10029983,1057433,A01AD02 1000700,Dormicum Bolus 5mg/ml,10024812,9000173,L01XD04 251,Dormicum inj 1 mg/ml,10025735,9000173,L01XD04 252,Dormicum inj 5 mg/ml,10024812,9000173,L01XD04 1000902,Dormicum Tbl 7.5 mg,10031400,2107469,R05X 1000679,Dormicum-Placebo,,#N/A,#N/A 1001075,Doxyclin Tbl 100 mg,10073607,,J01XX08 1000336,Droperidol Sintetica Inj Lsg 2.5 mg/ml,10030194,1057433,A01AD02 1000623,Dulcolax Supp 10mg,,#N/A,A06AB02 1000718,Dulcolax Supp.,,#N/A,A06AB02 322,Duphalac mixt 670 mg/ml,10065964,9000089,J04AB02 1000351,Durogesic TTS 50 ug,10029581,1057433,A01AD02 1000627,Durogesic TTS Matrix 100 ug,10029583,1057433,A01AD02 1000768,Durogesic TTS Matrix 12 ug,10029668,1057433,A01AD02 1000350,Durogesic TTS Matrix 25 ug,10029580,1057433,A01AD02 1000524,Dyrenium compositum Kps,a.H.,#N/A,#N/A 1000763,Ebrantil Inj Lsg,10024672,9000173,L01XD04 1001013,Ecalta Amp 100 mg,10055602,2107469,R05X 1001255,ECMO-Material,,#N/A,#N/A 1000722,Ecodurex 5 mg Tbl.,a.H.,#N/A,#N/A 1000579,Ecofenac Lipogel 1%,10061198,9000089,J04AB02 1000782,Efexor Kps 75 mg,10075159,,J01XX08 1000790,Efient Tbl 10 mg,10055945,2107469,R05X 1000789,Efient Tbl 5 mg,10047763,2107469,R05X 1000100,EK,,#N/A,#N/A 1000743,EK Pflege,,#N/A,#N/A 1001003,Eltroxin Tbl 0.05mg,10056344,2107469,R05X 1001004,Eltroxin Tbl 0.10mg,10051918,2107469,R05X 1000341,Enatec Tbl 10 mg,a.H.,#N/A,#N/A 1000342,Enatec Tbl 20 mg,a.H.,#N/A,#N/A 1000343,Enatec Tbl 5 mg,a.H.,#N/A,#N/A 1001118,Enbrel Inj Lsg 25 mg/0.5 ml,10060394,9000089,J04AB02 1001108,Enbrel Inj Lsg 50 mg/ml,10047340,2107469,R05X 1001178,Endoxan Inf Lsg,10029153,1057433,A01AD02 1000903,Enlive Plus Apfel,10029610,1057433,A01AD02 1000904,Enlive Plus Orange,10029611,1057433,A01AD02 1000905,Enlive Plus Pfirsich,10029612,1057433,A01AD02 1000231,Ensure Plus,,#N/A,#N/A 1000906,Ensure Plus Banane,10027929,1057433,A01AD02 1000907,Ensure Plus Cassis,10027934,1057433,A01AD02 1000908,Ensure Plus Erdbeere,10027930,1057433,A01AD02 1000909,Ensure Plus Himbeer,10027931,1057433,A01AD02 1000910,Ensure Plus Kaffee,10027932,1057433,A01AD02 1000911,Ensure Plus Schokolade,10027933,1057433,A01AD02 1000912,Ensure Plus Vanille,10027935,1057433,A01AD02 1000802,Ensure TwoCAl Banane,10051829,2107469,R05X 1000810,Ensure TwoCAl Edbeer,10051828,2107469,R05X 1000811,Ensure TwoCAl Vanille,10051827,2107469,R05X 1000913,Ephedrin HCL Amp 50mg/ml,10053266,2107469,R05X 1000345,Ephedrin Inj Lsg 50 mg/10 ml,,#N/A,#N/A 1001104,Erbitux Inf Lsg 100 mg/20 ml,10038729,2107469,R05X 1001120,Erbitux Inf Lsg 500 mg/100 ml,,#N/A,#N/A 1000791,Erythrocin Inf Lsg,10027441,1057433,A01AD02 468,Erythropoietin inj 1000 U/ml,,#N/A,#N/A 1000346,Esmeron Amp. 100 mg /10 ml,10027353,9000173,L01XD04 1000498,Esmeron Inj Lsg,10027353,9000173,L01XD04 1000645,Ethanol Inf. Lsg 96%,10079563,,J01XX08 1000707,Euphyllin N Bolus,10029621,1057433,A01AD02 1000706,Euphyllin N Perfusor,10029621,1057433,A01AD02 153,Euthyrox tabl 0.1 mg,10093472,,J01XX08 1000914,Euthyrox Tabl 50 g,10093488,,J01XX08 1001136,Evoltra Inf Lsg 20 mg/20 ml,10046727,2107469,R05X 1001145,Faktor VII NF 600 U,10055897,2107469,R05X 1001088,Fasturtec Inj Lsg,10029448,1057433,A01AD02 1000514,Favistan Inj Lsg 40 mg/ml,,#N/A,#N/A 1001147,Feiba NF 2500 U,10065989,9000089,J04AB02 1001146,Feiba NF TS 1000 E Inj Lsg,10055882,2107469,R05X 1000580,Fenistil Gel 0.1%,10030624,1057433,A01AD02 1000251,Fentanyl inj,10066001,9000089,J04AB02 1000741,Fentanyl Perf,10079561,,J01XX08 1000915,Ferinject Amp 100mg/2ml,10039625,2107469,R05X 1000190,Fett 20%,,#N/A,#N/A 1000744,FFP Pflege,,#N/A,#N/A 1001149,Fibrogammin 1250 U,10085049,,J01XX08 1001148,Fibrogammin TS 250 U,10085050,,J01XX08 1000437,Flagyl Tbl 500 mg,10031106,2107469,R05X 1000585,Flammazine Creme,10030855,2107469,R05X 1000916,Flatulex Kautbl.,10026660,9000173,L01XD04 1000599,Florinef Tbl 0.1mg,10030601,1057433,A01AD02 1000352,Floxapen Inj Lsg 1g,10030524,1057433,A01AD02 1001235,Flucloxacillin TS Inj Lsg 1g,10088924,,J01XX08 1000726,Fluimucil Granulat,10025707,9000173,L01XD04 1000917,Fluimucil Granulat 100 mg,10025707,9000173,L01XD04 1000918,Fluimucil inj Lsg 300mg/3ml,,#N/A,#N/A 1001240,Flumazenil Inj Lsg,10085804,,J01XX08 1001261,Flssige Kost,,#N/A,#N/A 1000359,Folsure Amp,10025112,9000173,L01XD04 1000450,Fordtran Darmsplung,,#N/A,#N/A 1000732,Fordtran-Lsung,a.H.,#N/A,#N/A 1000830,Fortam 2g Inj Lsg,10026843,9000173,L01XD04 1001017,Fortam 500mg Inj Lsg,10026831,9000173,L01XD04 1000302,Fortam Stechamp 1g,10026842,9000173,L01XD04 1000769,Fortecortin Tbl 4 mg,10025906,9000173,L01XD04 1001126,Foscavir Inf Lsg 6000mg/250ml,10045551,2107469,R05X 1000451,Fraxiparine,,#N/A,#N/A 1000831,Fraxiparine 0.3 ml s/c,10061177,9000089,J04AB02 1000832,Fraxiparine 0.4 ml s/c,10061178,9000089,J04AB02 1000833,Fraxiparine 0.6 ml s/c,10061179,9000089,J04AB02 1000834,Fraxiparine 0.8 ml s/c,10026518,9000173,L01XD04 1000714,Freka MIx 2000 ml,,#N/A,#N/A 1000050,Fresh Frozen Plasma,,#N/A,#N/A 1000957,Fresubin Drink Cappuccino,,#N/A,#N/A 1000960,Fresubin Drink Frchte,,#N/A,#N/A 1000958,Fresubin Drink Nuss,,#N/A,#N/A 1000959,Fresubin Drink Schokolade,,#N/A,#N/A 1000961,Fresubin Drink Vanille,,#N/A,#N/A 1000962,Fresubin Drink Walderdbeere,,#N/A,#N/A 1001266,Fresubin HP Energy,10025197,9000173,L01XD04 1000970,Fresubin HP Energy 500ml,10025197,9000173,L01XD04 308,Fungizone Amp,10000506,9000173,L01XD04 1000232,Furosemide 10 mg/ml inj,10030332,1057433,A01AD02 1000702,G5%+KCl 20,,#N/A,#N/A 1000563,G5%+KCl 40,,#N/A,#N/A 1000363,Garamycin,10025485,9000173,L01XD04 1001257,Gastrotipp,,#N/A,#N/A 1001258,Gastrotipp ll,,#N/A,#N/A 1001262,Gastrotipp/Smooth Food,,#N/A,#N/A 1001259,Gemixt/Feingeschn.,,#N/A,#N/A 1000625,Glocose 5%,,#N/A,#N/A 1000366,Glucagon Novo Inj Lsg 1 mg,10026125,9000173,L01XD04 1000919,Glucophage Tbl 500mg,10084275,,J01XX08 1000591,GlucoSalin /500ml,10079402,,J01XX08 1000562,GlucoSalin 2:1,10080941,,J01XX08 1000566,GlucoSalin 2:1+ K-Cl 80,,#N/A,#N/A 1000565,GlucoSaline 2:1 + K-Cl 40,,#N/A,#N/A 1000020,Glucose 5%,10056018,2107469,R05X 1000742,Glucose 5% /500ml Pflege,10056018,2107469,R05X 1000816,Glucose 5%/250ml,10056017,2107469,R05X 1000690,Glucose 10% Ampulle,10027496,1057433,A01AD02 1000022,Glucose 10%/ 500ml,10025160,9000173,L01XD04 1000544,Glucose 20% /100ml,10025292,9000173,L01XD04 1000835,Glucose 20% /500ml,10025162,9000173,L01XD04 1000689,Glucose 20% Ampulle,10027497,1057433,A01AD02 1000746,Glucose 20%/100ml Pflege,10025292,9000173,L01XD04 1000545,Glucose 40%,,#N/A,#N/A 1000564,Glucose 5%+ K-Cl 80,,#N/A,#N/A 1000567,Glucose 50% /500ml,,#N/A,#N/A 1000793,Glypressin Inj Lsg,10025030,9000173,L01XD04 1001070,Grafalon Inf Lsg,10079437,,J01XX08 1000338,Gyno Canesten Ovula 100 mg,10052968,2107469,R05X 1000792,Gyno Canesten Vag Tbl 200 mg,10082063,,J01XX08 1000701,Haemate HS,10055451,2107469,R05X 1001151,Haemate P 1000 U/ 2400 U,10055450,2107469,R05X 1001150,Haemate P 500 U/1200 U,10055451,2107469,R05X 1000799,Haemocomplettan,10055893,2107469,R05X 1001153,Haemocomplettan P 2g,10055892,2107469,R05X 1001152,Haemocomplettan P TS 1g Inf Lsg,10055893,2107469,R05X 1001061,Haemopressin TS Inj Lsg,10069007,9000089,J04AB02 1000370,Haldol Inj Lsg 5 mg/ml,10031202,2107469,R05X 1000371,Haldol Trpf 2mg/ml,10066496,9000089,J04AB02 239,Haloperidol inj 2.5 mg/ml,10031202,2107469,R05X 1000574,Hmofiltrationslsung HF9,,#N/A,#N/A 1001154,Helixate M2V 2000 U,10067096,9000089,J04AB02 1001155,Helixate M2V TS 1000 E Inj Lsg,10055452,2107469,R05X 1001212,Helixate M2V TS 500 E Inj Lsg,10055453,2107469,R05X 1000629,Hemeran Creme 1%,10080419,,J01XX08 1000628,Hemeran Emulgel 1%,10080419,,J01XX08 1000373,Heparin Bichsel 500 E/ 5 ml,10029277,1057433,A01AD02 1001018,Hepatect CP 2000 IE,10040419,2107469,R05X 1001019,Hepatect CP 500 IE,10027373,9000173,L01XD04 1001243,HERACLES Infusion 100ml,,#N/A,#N/A 1001242,HERACLES Infusion 560ml,,#N/A,#N/A 1001134,Herceptin Inf Lsg 150mg,10031330,2107469,R05X 1001135,Herceptin Inf Lsg 440 mg,10000473,9000173,L01XD04 1000577,Hibitane 0.1%,,#N/A,#N/A 1001109,Humira Inj Lsg,10088749,,J01XX08 1000382,Hydrocodon Inj Lsg 15mg/ ml,1600,#N/A,#N/A 1000794,Hydrocodon Inj Lsg,1600,#N/A,#N/A 1000725,Hydrocortison/Placebo,,#N/A,#N/A 152,Hydrocortisone inj 125 mg/ml,,#N/A,#N/A 1000770,Hydrocortone Tbl 10 mg,10029769,1057433,A01AD02 1001101,Ilomedin 20 ug/ml Inf Lsg,10028226,1057433,A01AD02 1000386,"Ilomedin Inj Lsg 50ug/2,5 ml",10028227,1057433,A01AD02 1000575,Imazol Creme Neue Formel,10027183,9000173,L01XD04 1000576,Imazol Cremepaste,10024721,9000173,L01XD04 1001157,Immunate S/D 500 U/ 250 U,10055880,2107469,R05X 1001156,Immunate S/D TS 1000 E/ 500 E,10055881,2107469,R05X 1001158,Immunine STIM Plus 1200 U,10055886,2107469,R05X 1001159,Immunine STIM Plus 600 U,10055887,2107469,R05X 1000417,Imodium 2mg,10030746,2107469,R05X 1001011,IMP Telepressin/Placebo VIMC,,#N/A,#N/A 1000922,IMP/Placebo,,#N/A,#N/A 1000280,Imurek Inf Lsg,1790,9000173,L01XD04 1000281,Imurek Tabl 50 mg,10030752,2107469,R05X 1000492,Inderal Tbl 10 mg,10094679,,J01XX08 1000493,Inderal Tbl 40 mg,10094690,,J01XX08 1000413,Instillagel 2%,10063790,9000089,J04AB02 15,Insulin Actrapid inj 100 U/ml,10025741,9000173,L01XD04 1000381,Insulin Insulatard HM,10025756,9000173,L01XD04 1000963,Insulin Lantus,10028971,1057433,A01AD02 1000379,Insulin Mixtard 30 HM,10026092,9000173,L01XD04 1000931,Insulin Mixtard 50 HM,10026094,9000173,L01XD04 1000724,Insulin NovoRapid,10027737,1057433,A01AD02 1000921,Intestamin Neutral 500 ml,10060438,9000089,J04AB02 1000814,Intratect 10g Inf Lsg,10065998,9000089,J04AB02 1001041,Intratect10% Inf Lsg 5 g/50 ml,10065997,9000089,J04AB02 1001042,Intratect10% Inf Lsg 10 g/100ml,10065998,9000089,J04AB02 1001043,Intratect10% Inf Lsg 20 g/200ml,10065999,9000089,J04AB02 1001217,Irfen Tabl 400 mg,10025818,9000173,L01XD04 1000795,Isoket Dosieraerosol,10026194,9000173,L01XD04 94,Isoptin Inj Lsg,10028432,1057433,A01AD02 1000536,Isoptin Tbl 80 mg,10026531,9000173,L01XD04 1001029,Isosource Protein,10065996,9000089,J04AB02 1000216,Isosource Standard,10073152,,J01XX08 1000391,Isuprel 0.2 mg/ml,10079310,,J01XX08 1000423,Itinerol B 6 Supp,10044142,2107469,R05X 1000681,Joule,,#N/A,#N/A 1000396,Kalium Chlorid 15%,10028193,1057433,A01AD02 1000393,Kalium Effervetten,10035425,2107469,R05X 1000395,Kalium Phosphat,10026301,9000173,L01XD04 1000836,Kalium Phosphat fr Perfusor,10026301,9000173,L01XD04 1000397,Kaliumchlorid Lsg 1 mmol/ml 50 ml,10056072,2107469,R05X 1000612,Kalium-Chlorid Sirup,,#N/A,#N/A 1000608,KaliumChlorid Sirup 1mmol/ml,,#N/A,#N/A 1000080,K-Cl conc,,#N/A,#N/A 1001063,KCL ret. Hausmann Drg 10mmol (745mg),10030468,1057433,A01AD02 1000398,KCL Retard Drag,10030468,1057433,A01AD02 1000299,Kefzol Stechamp 1g,10000505,9000173,L01XD04 1000671,Kefzol Stechamp 2g,10087716,,J01XX08 1000676,Keppra Filmtbl.,10027922,1057433,A01AD02 1000756,Keppra Inf Lsg,10074759,,J01XX08 1001194,Ketalar 5mg/ml Perfusor Schmerzdienst,10000503,9000173,L01XD04 1000400,Ketalar Inj Lsg 500 mg/10 ml,10000503,9000173,L01XD04 1000857,Ketalar Perfusor,10000503,9000173,L01XD04 1001236,Ketamin Perfusor 250mg/50ml,2360,9000173,L01XD04 1000318,Klacid Amp,10026480,9000173,L01XD04 1000317,Klacid Tbl 500 mg,10026485,9000173,L01XD04 1001055,Kochsalz Tbl 1g,10069290,9000089,J04AB02 1001205,Kogenate SF Bio Set TS 1000 E Inj Lsg,10055871,2107469,R05X 1001204,Kogenate SF Bio Set TS 500 E Inj Lsg,10055872,2107469,R05X 1000674,Kolsuspension,,#N/A,#N/A 1000481,Konakion Amp,10025002,9000173,L01XD04 1000480,Konakion Kaudrg 10 mg,a.H.,#N/A,#N/A 1001263,Kopie von Adrenalin 100 g/ml,,#N/A,#N/A 1000082,K-Ph conc,10026301,9000173,L01XD04 1000258,Krenosin Inj Lsg,10026402,9000173,L01XD04 1001160,Kybernin P TS 1000 U,10055895,2107469,R05X 1001161,Kybernin P TS 500 U,10055896,2107469,R05X 1000779,Lamictal Tbl 100 mg,10026210,9000173,L01XD04 1000778,Lamictal Tbl 25 mg,10026208,9000173,L01XD04 1000850,Lamotrigin Tbl 100 mg,10026210,9000173,L01XD04 1000851,Lamotrigin Tbl 25 mg,10026208,9000173,L01XD04 4,Lasix 40 mg tabl,10030333,1057433,A01AD02 1000747,Lasix Bolus 20mg Amp.,10030332,1057433,A01AD02 482,Lasix Perfusor,10070532,9000089,J04AB02 1001175,Levetiracetam Inf Lsg,10074759,,J01XX08 1000411,Lidocain Amp 1%,10064402,9000089,J04AB02 1000924,Lidocain Amp 2%,10059085,1014062,N05CD06 1000727,Lidocain Studie Amp 1%,,#N/A,#N/A 1000703,LidocainCO2 2%+Adrenalin,10031088,2107469,R05X 34,Lidocaine inj 20 mg/ml,10059085,1014062,N05CD06 1000282,Lioresal Tabl 10 mg,10031108,2107469,R05X 1001252,Liquemin 25000 IE/5ml,10058414,1014062,N05CD06 107,Liquemin Amp,10058414,1014062,N05CD06 1000414,Lisinopril Tbl 20mg,10029326,1057433,A01AD02 1000415,Lisinopril Tbl 5 mg,10029324,1057433,A01AD02 1001197,LJ501-Placebo,,#N/A,#N/A 1001202,LJ-501-Placebo 100 ml NaCl 0.9%,,#N/A,#N/A 1001201,LJ-501-Placebo 250 ml NaCl 0.9%,,#N/A,#N/A 1001200,LJ-501-Placebo 500 ml NaCl 0.9%,,#N/A,#N/A 1001199,LJ-501-Placebo1000ml NaCl 0.9%,,#N/A,#N/A 1000442,Loniten Tbl 10 mg,10059171,1014062,N05CD06 1000443,Loniten Tbl 2.5 mg,10059500,1014062,N05CD06 1000293,Lopirin Tabl 12.5 mg,a.H.,#N/A,#N/A 1000294,Lopirin Tabl 25 mg,a.H.,#N/A,#N/A 1000436,Lopresor Inj Lsg,a.H.,#N/A,#N/A 1001080,Losartan Tabl 50 mg,10073530,,J01XX08 1000925,Lyman Gel,10082273,,J01XX08 1000846,Lyrica Kps 25 mg,10088939,,J01XX08 1000796,Lyrica Kps 75 mg,10088943,,J01XX08 1001015,Mabthera Amp 500 mg,10000462,9000173,L01XD04 1001014,Mabthera Inf Lsg 100mg/10ml,10027247,9000173,L01XD04 1000772,Madopar DR Tbl 200 mg,10027145,9000173,L01XD04 1000406,Madopar LIQ Tbl 125 mg,10026506,9000173,L01XD04 1000926,Madopar LIQ Tbl 62.5,10026505,9000173,L01XD04 1001045,Madopar Tbl 125 mg,10025699,9000173,L01XD04 1000420,Magnesiocard 5 mmol sachet,10061469,9000089,J04AB02 1000800,Magnesium Chlorid Inf Lsg,10025704,9000173,L01XD04 1000927,Magnesium Diasporal 12 mmol,10057616,2107469,R05X 1000421,Magnesium Sulfat 50% 2 ml,10029454,1057433,A01AD02 1000554,"Mannitol 20%, 20 g/100 ml",10066604,9000089,J04AB02 1000476,Marcoumar Tbl,10025821,9000173,L01XD04 1000218,Meal and drink,,#N/A,#N/A 1000646,Mebucane,10026136,9000173,L01XD04 1000930,Mephadolor Neo 500mg Tbl,10035922,2107469,R05X 1000325,Mephameson 4mg/ml,10000508,9000173,L01XD04 1000929,Mephameson Inj Lsg 8mg/ml,10030979,2107469,R05X 1000723,Mephanol Tbl 100mg,10030891,2107469,R05X 1000424,Meronem 1g,10073434,,J01XX08 1000425,Meronem 500 mg,10073435,,J01XX08 1001086,Meropenem 1g,10073434,,J01XX08 1001084,Meropenem 500 mg,10073435,,J01XX08 1000495,Mestinon Drg 10 mg,10027615,1057433,A01AD02 1000496,Mestinon Drg 60 mg,10027616,1057433,A01AD02 1000928,Metamizol ISPI,1840,9000173,L01XD04 215,Methadon inj 10 mg/ml,10024713,9000173,L01XD04 1000427,Methadon oral 0.5% 5mg/ml,1081,#N/A,#N/A 1000430,Methergin Inj Lsg 0.2 mg/ml,10030367,1057433,A01AD02 1000429,Methylenblau 1%,ZL?,Alte Materialnr.,ATC 1000428,Metolazon Tbl 5 mg,10028924,1057433,A01AD02 1000596,Metoprolol Tartrat Tbl 10 mg,10080441,,J01XX08 181,Metronidazole inf 500 mg/100ml,10080441,,J01XX08 1000070,Mg-Sulfat conc,10025147,9000173,L01XD04 1001195,Microlax Klistier,10082271,,J01XX08 142,Minirin inj,10024870,9000173,L01XD04 1001224,Minirin Nasenspray 10 mcg pro Hub,10024871,9000173,L01XD04 1000748,Mix-Augentropfen,1980,9000173,L01XD04 1000570,Modulamin Plus,a.H.,#N/A,#N/A 1001083,Moduretic mite Tabl 2.5 mg/25 mg,,#N/A,#N/A 1000787,Moduretic Tbl 50/5 mg,10030503,1057433,A01AD02 1000684,Morphin als PCA Pumpe,1148,#N/A,#N/A 1000932,Morphin HCl Lsg 20mg/ml,1642,#N/A,#N/A 1000659,Morphin HCL Perfusor,1028,#N/A,#N/A 1000444,Morphin Trpf 1% 10mg/ml,1641,#N/A,#N/A 212,Morphine inj 10 mg/ml,10025010,9000173,L01XD04 1000862,Morphine Inj 10mg,10025010,9000173,L01XD04 1000783,Motilium Tbl 10 mg,10030876,2107469,R05X 1000801,Moviprep Pulver,10054809,2107469,R05X 1000933,MST Continus ret Tbl 10 mg,10067725,9000089,J04AB02 1000445,MST Continus ret Tbl 30,10024727,9000173,L01XD04 1000446,MST Continus ret. Tbl 60,10025805,9000173,L01XD04 1000630,Mundisal Gel,10062019,9000089,J04AB02 1000348,Myambutol 400 mg/4ml,10025348,9000173,L01XD04 1001090,Mycamine Inf Lsg 100 mg,10074766,,J01XX08 1001119,Mycamine Inf Lsg 50 mg,10074765,,J01XX08 1000551,Mycostatin Susp,10030637,1057433,A01AD02 1000588,Mydriaticum Augentrp,10026389,9000173,L01XD04 1001030,Myfortic Tbl 180mg,10028774,1057433,A01AD02 1000947,Myfortic Tbl 360mg,10028775,1057433,A01AD02 1000193,Na- Bicarbonat 8.4%,10026285,9000173,L01XD04 1000449,Na Monohydrogenphosphat 10 ml,,#N/A,#N/A 1000454,Na Thiosulfat 25% 2ml,10025535,9000173,L01XD04 1000571,Na-Bicarbonat 1.4%,10026235,9000173,L01XD04 1000453,Na-Bicarbonat Inf Lsg 8.4%,10079597,,J01XX08 328,NaChromoglycate inhal 20 mg,,#N/A,#N/A 1000572,NaCl 0.45% 500ml,10051516,2107469,R05X 1000818,NaCl 0.9 % /1000ml,10072317,,J01XX08 1000826,NaCl 0.9 % /250ml,10079594,,J01XX08 1000827,NaCl 0.9 % /500ml,10072316,,J01XX08 1000197,NaCl 0.9 % flush,,#N/A,#N/A 1000738,NaCl 0.9 % Pflege,,#N/A,#N/A 1000194,NaCl 0.9 %.,,#N/A,#N/A 1000819,NaCl 0.9 %10,,#N/A,#N/A 1000624,NaCl 0.9%,,#N/A,#N/A 1001254,NaCl 0.9% Perf.,,#N/A,#N/A 1000546,NaCl 2.5% 500ml,1201,#N/A,#N/A 1000090,NaCl conc Amp 14.5%,10025356,9000173,L01XD04 1000560,"Nhrlsung ISPI 1000ml -F, - E",,#N/A,#N/A 1000452,Naloxon Inj Lsg,10029539,1057433,A01AD02 1000729,Na-Posph conc,10072881,,J01XX08 1000693,Naropin,10027090,9000173,L01XD04 1001091,Nasenspray Spirig 0.1%,10072789,,J01XX08 1000526,Navoban Inj Lsg 2 mg/ 2ml,10058601,1014062,N05CD06 1000948,Navoban Inj Lsg 5 mg/ 5ml,,#N/A,#N/A 1000949,Navoban Kps 5 mg,a.H.,#N/A,#N/A 1000950,Nebilet Tbl 5 mg,10072945,,J01XX08 1001087,Nebivolol Tabl 5 mg,10072945,,J01XX08 1000368,Neo Decongestine Past,10024787,9000173,L01XD04 1000296,Neo-Mercazole Tabl 5 mg,10030320,1057433,A01AD02 1000583,Neosporin Augentrp,a.H.,#N/A,#N/A 1000631,Neotracin Augensalbe,a.H.,#N/A,#N/A 1000598,Nephrotrans Kps,10025849,9000173,L01XD04 85,Nepresol inj,10025281,9000173,L01XD04 1000665,Nepro,10088280,,J01XX08 1000619,Neupogen 30 Mio U,10028848,1057433,A01AD02 1000620,Neupogen 48 Mio U,10028850,1057433,A01AD02 1000360,Neurontin Kps 100 mg,10026544,9000173,L01XD04 1000361,Neurontin Kps 300 mg,10026546,9000173,L01XD04 1000952,Nexium Mups 40mg TS i.v.,10029613,1057433,A01AD02 1000466,Nexium Mups Tbl 20 mg,10035636,2107469,R05X 1000951,Nexium Mups Tbl 40 mg,10035641,2107469,R05X 1000955,Nicorette Inhaler 10mg,10027768,1057433,A01AD02 1000953,Nicotinell Pflaster 1 stark,10030220,1057433,A01AD02 1000456,Nicotinell Pflaster 2 mittel,10079535,,J01XX08 1000954,Nicotinell Pflaster 3 Leicht,10079615,,J01XX08 124,Nimotop inf 0.2 mg/ml,10026044,9000173,L01XD04 125,Nimotop Tabl 30 mg,10025092,9000173,L01XD04 1001264,Nipruss 0.5mg/ml,10062703,9000089,J04AB02 1001265,Nipruss 1mg/ml,10062703,9000089,J04AB02 1001245,Nipruss 0.5mg/ml,10062703,9000089,J04AB02 1001246,Nipruss 1mg/ml,10062703,9000089,J04AB02 1000252,Nipruss inf,10062703,9000089,J04AB02 1000867,Nitriate 0.5mg/ml,,#N/A,#N/A 1000868,Nitriate 1mg/ml,,#N/A,#N/A 1000459,Nitroderm TTS 10,10031141,2107469,R05X 1000458,Nitroderm TTS 5,10031028,2107469,R05X 1000243,Nitroglycerin Inf,10055924,2107469,R05X 117,"Nitroglycerin Kps 0,8mg",10085809,,J01XX08 1001047,Nitroprussiat Fides 0.5mg/ml,10062703,9000089,J04AB02 1001048,Nitroprussiat Fides 1mg/ml,10062703,9000089,J04AB02 1001141,Nitroprusside DBL 0.5 mg/ml,10062703,9000089,J04AB02 1001142,Nitroprusside DBL 1mg/ml,10062703,9000089,J04AB02 1000462,Noradrenalin 1mg/ml,10057219,2107469,R05X 1000656,Noradrenalin 100 g/ml Perfusor,10057220,2107469,R05X 1000657,Noradrenalin 20 g/ml Perfusor,10057220,2107469,R05X 198,Norcuron inj 4mg/ml,a.H.,#N/A,#N/A 1001171,Norfloxacin Filmtbl 400 mg,10079385,,J01XX08 1000633,Norgolax Mikroklistier,a.H.,#N/A,#N/A 326,Noroxin tabl 400 mg,10024627,9000173,L01XD04 1000270,Norvasc Tabl 10 mg,10031219,2107469,R05X 1000271,Norvasc Tabl 5 mg,10031220,2107469,R05X 1000605,Novalgin inj ls 50% 2 ml,10030334,1057433,A01AD02 1000858,Novalgin Tbl. 500mg,10030336,1057433,A01AD02 1000632,Novalgin Trpf,10030337,1057433,A01AD02 1000731,Novasource GI Control,10028013,1057433,A01AD02 1000785,Novasource GI Forte + Fasern,10028721,1057433,A01AD02 1001208,NovoEight TS 1000 E Inj Lsg,10067025,9000089,J04AB02 1001209,NovoEight TS 2000 E Inj Lsg,10067026,9000089,J04AB02 1001206,NovoEight TS 250 E Inj Lsg,10067023,9000089,J04AB02 1001207,NovoEight TS 500 E Inj Lsg,10067024,9000089,J04AB02 1000798,NovoSeven,10055885,2107469,R05X 1001162,NovoSeven 1mg,10055885,2107469,R05X 1001163,NovoSeven 2mg,10055884,2107469,R05X 1001164,NovoSeven 5 mg,10055883,2107469,R05X 1001027,Noxafil Susp,10030323,1057433,A01AD02 1001078,Noxafil Tabl 100 mg,10072214,9000089,J04AB02 1000410,Nozinan Inj Lsg,a.H.,#N/A,#N/A 1000409,Nozinan Tbl 25 mg,10087713,,J01XX08 1000956,Nozinan Tbl 100 mg,10030434,1057433,A01AD02 1000558,"Nutriflex Lipid Spec 1250ml +F, +E",10056052,2107469,R05X 1000559,Nutriflex lipid spez 1875ml,10084247,,J01XX08 1000694,Nutriflex lipid spezial oE,,#N/A,#N/A 1000561,"Nutriflex spez 1000 ml, ohne Fett",,#N/A,#N/A 1000519,Obracin 80 mg,10030603,1057433,A01AD02 1000376,Octagam Inf Lsg,10059912,1014062,N05CD06 1001067,Ondansetron Inj Lsg 4mg/2ml,10066846,9000089,J04AB02 1001123,Orencia Inf Lsg 250 mg,10037060,2107469,R05X 1001124,Orencia Inj Lsg 125 mg,10060935,9000089,J04AB02 1000534,Orfiril Inj Lsung,10027540,1057433,A01AD02 1000531,Orfiril long Ret Kps 150 mg,10027487,1057433,A01AD02 1000532,Orfiril long Ret Kps 300 mg,10027488,1057433,A01AD02 1000533,Orfiril Sirup,10069897,9000089,J04AB02 1000635,Otrivin Nasenspray 0.1%,10029196,1057433,A01AD02 1000634,Otrivin Nasentropfen 0.1%,10029207,1057433,A01AD02 1000935,Oxycontin Ret Tbl 10mg,10028184,1057433,A01AD02 1000936,Oxycontin Ret Tbl 20mg,10028185,1057433,A01AD02 1000764,Oxycyte/Placebo,,#N/A,#N/A 1000937,Oxynorm Trpf 10mg/ml,10029214,1057433,A01AD02 1001093,Pantoprazol Tbl 20 mg,10075030,,J01XX08 1000604,Pantozol Inj Lsg 40 mg,10090973,,J01XX08 1000860,Pantozol Tbl 40 mg,10075031,,J01XX08 1000938,Paragol Sirup 500ml,10027830,1057433,A01AD02 1000708,Paravertebralblock-Bolus,,#N/A,#N/A 1000688,Paravertebralblock-Lsung,,#N/A,#N/A 197,Pavulon inj 2 mg/ml,a.H.,#N/A,#N/A 1000621,PCA-Fentanyl,,#N/A,#N/A 1001028,PCA-Hydromorphon 0.2mg/ml,2062,9000173,L01XD04 1000716,PCA-Morphin,1148,#N/A,#N/A 1000613,PCA-Morphin/Ketamin,,#N/A,#N/A 1000844,PDA-Bupivacain 0.125% Bolus,,#N/A,#N/A 1000843,PDA-Bupivacain 0.125% ohne Fentanyl,,#N/A,#N/A 1000717,PDA-Forte,1240,#N/A,#N/A 1000761,PDA-Forte Bolus,1240,#N/A,#N/A 1000762,PDA-Standard Bolus,1239,#N/A,#N/A 1000233,Penicillin 50 000 U/ml,10000483,9000173,L01XD04 1000284,Penicillin G 1 Mio,10073609,,J01XX08 1000607,Pentothal 0.5 Amp inj 25mg/ml,a.H.,#N/A,#N/A 202,Pentothal inj 25 mg/ml,a.H.,#N/A,#N/A 1000920,Perative liq neutral 500ml,10066608,9000089,J04AB02 1000934,Perenterol Kps,10047784,2107469,R05X 1000372,Perenterol Sachets 250 mg,10054766,2107469,R05X 1000489,Perfalgan 1g,10028904,1057433,A01AD02 1000490,Perfalgan 500 mg,10029001,1057433,A01AD02 1000734,Perlinganit Perfusor,10024688,9000173,L01XD04 214,Pethidine inj 50 mg/ml,10025008,9000173,L01XD04 1000478,Phenhydan Inf Lsg 750 mg,10024723,9000173,L01XD04 230,Phenhydan inj 50 mg/ml 5 ml,10030736,68386,R05X 304,Phenhydan tabl 100 mg,10030585,1057433,A01AD02 1000475,Phenobarbital 200 mg/2ml,10025080,9000173,L01XD04 1000394,Phoscap Phosphat Kps 3 mmol,10073526,,J01XX08 1000184,Physiogel,1236,#N/A,#N/A 1000737,Physiogel MV,1236,#N/A,#N/A 1000479,Physostigmin Amp,,#N/A,#N/A 1001044,Picoprep Pulver,10065994,9000089,J04AB02 1000837,Piperacillin-Tazobactam 2.25 Inj Lsg,10064430,9000089,J04AB02 1000266,PK-Merz 500 ml,10025034,9000173,L01XD04 1000245,Platelets,,#N/A,#N/A 1000324,Plavix Tbl 75 mg,10052838,2107469,R05X 1000939,Pradif Ret Tbl 0.4 mg,10029918,1057433,A01AD02 1000488,Prednison Tbl 50 mg,10053425,2107469,R05X 12,Primperan inj 10mg/2ml,10025902,9000173,L01XD04 1000940,Primperan Ls 1mg/ml,10079431,,J01XX08 1000941,Primperan Tbl 10mg,10025903,9000173,L01XD04 1001025,Privigen 10g Inf Lsg,10044020,2107469,R05X 1001023,Privigen 2.5g Inf Lsg,10067629,9000089,J04AB02 1001026,Privigen 20g Inf Lsg,10045139,2107469,R05X 1001024,Privigen 5g Inf Lsg,10044021,2107469,R05X 1000357,Procto Synalar N Supp,10026646,9000173,L01XD04 1001231,Prograf Inf Lsg,10027001,9000173,L01XD04 1000942,Prograf Kps 0.5mg,10027521,1057433,A01AD02 1000510,Prograf Kps 1mg,10026999,9000173,L01XD04 1000511,Prograf Kps 5 mg,10027000,9000173,L01XD04 1001188,Prolastin TS 1g Inf Lsg,10067628,9000089,J04AB02 1000328,Promit Inj Lsg 15% 20 ml,a.H.,#N/A,#N/A 1000804,Promote Fibres Plus,10058157,2107469,R05X 1001227,Propofol 1% BOLUS,10087364,,J01XX08 1001225,Propofol 2%,10087365,,J01XX08 1001226,Propofol 2% BOLUS,10087365,,J01XX08 1000455,Prostigmin 0.5mg/ml,,#N/A,#N/A 1000262,Prostin VR 0.5 mg/ml,10024773,9000173,L01XD04 1000494,Protamin Inj Lsg,10072158,9000089,J04AB02 1000755,Prothromplex 600IE,10055894,2107469,R05X 1000286,Pulmicort Dosieraerosol,10025896,9000173,L01XD04 1000287,Pulmicort Inhal Lsg 0.25 mg/ml,10026250,9000173,L01XD04 1001239,PVB Ropivacain 2mg/ml,,#N/A,#N/A 1000943,Quantalan Sachet,10066620,9000089,J04AB02 1001094,Quetiapin Tbl 100 mg,10075156,,J01XX08 1001074,Quetiapin Tbl 25 mg,10072580,,J01XX08 1000504,Rapamune Tbl 1mg,10028520,1057433,A01AD02 1000944,Rapamune Tbl 2mg,10029398,1057433,A01AD02 204,Rapifen Inj Lsg,10031179,2107469,R05X 1001211,Refacto AF FuseNGo TS 1000 E Inj Lsg,10055876,2107469,R05X 1001210,ReFacto AF FuseNGo TS 500 E Inj Lsg,10055877,2107469,R05X 1000404,Refludan Trockensubst 50 mg,a.H.,#N/A,#N/A 1000477,Regitin Inj Lsg 10 mg/ml,a.H.,#N/A,#N/A 1000945,Remeron subligual 15 mg,10028738,1057433,A01AD02 1000600,Remeron Tbl 30 mg,10031366,2107469,R05X 1000946,Remicade Amp 100mg,10027877,1057433,A01AD02 1001223,Remifentanil 100 g/ml Perfusor,10080449,,J01XX08 1000344,Reniten Inj Lsg 1 mg/ml,a.H.,#N/A,#N/A 1000593,Reniten iv,a.H.,#N/A,#N/A 1000614,ReoPro,10026957,9000173,L01XD04 1000484,Resonium A,10030457,1057433,A01AD02 1000965,Resource Aprikose,10028982,1057433,A01AD02 1000966,Resource Erdbeer,10036659,2107469,R05X 1000964,Resource Kaffee,10036657,2107469,R05X 1000967,Resource Neutral,10029708,1057433,A01AD02 1000968,Resource Sommerfrchte,10036661,2107469,R05X 1000969,Resource Vanille,10036662,2107469,R05X 1000369,Resyl Plus,10030486,1057433,A01AD02 1001065,Retransfusion Pflege,,#N/A,#N/A 1001058,Revatio Filmtbl 20 mg,10034536,2107469,R05X 1000327,Rheomacrodex Inf Lsg 10% NaCl 500 ml,a.H.,#N/A,#N/A 1001068,Rifampicin Filmtbl 600 mg,10066841,9000089,J04AB02 1001173,Rifampicin Inf Lsg,10072951,,J01XX08 1000390,Rifater Drg 50 mg,10024869,9000173,L01XD04 186,Rimactan inf 300 mg,10072951,,J01XX08 1000797,Rimactan Kps 300 mg,10030767,2107469,R05X 351,Rimactan Kps 600 mg,10030670,68386,R05X 1000030,Ringer Laktat Infusion,10087425,,J01XX08 1000866,Ringer Laktat Infusion 1000ml,10087425,,J01XX08 1000739,Ringer Laktat MV,,#N/A,#N/A 1000581,Ringer Spllsung,10080382,,J01XX08 1001216,RingerHemacetat Studienlsung 1000 ml,,#N/A,#N/A 1000780,Risperdal Tbl 1 mg,10026520,9000173,L01XD04 442,Rivotril inj,10030551,1057433,A01AD02 1001215,Rivotril Perfusor,10030551,1057433,A01AD02 1000710,Robinul Inj Lsg,10025251,9000173,L01XD04 1000330,Rocaltrol Kps 0.25 ug,10030836,2107469,R05X 1000971,Rocaltrol Kps 0.5 ug,10030837,2107469,R05X 1000838,Rocephin 2 g Inf Lsg,10063742,9000089,J04AB02 1000304,Rocephin 2g,10063742,9000089,J04AB02 1000648,Rocephin Inj Lsg,10063742,9000089,J04AB02 1000697,Rohypnol 1 mg,10027397,1057433,A01AD02 1000753,SAB_Tetraspan/Voluven,,#N/A,#N/A 1000584,Salicylvaseline 10%,10074355,,J01XX08 1000426,Salofalk Tbl 500 mg,10025013,9000173,L01XD04 1000310,Sandimmun 50 mg/ml,10024740,9000173,L01XD04 1001031,Sandimmun Neoral Kps 10 mg,10026558,9000173,L01XD04 1000311,Sandimmun Neoral Kps 100 mg,10026558,9000173,L01XD04 1000313,Sandimmun Neoral Kps 25 mg,10026558,9000173,L01XD04 1000312,Sandimmun Neoral Kps 50 mg,10026558,9000173,L01XD04 1000314,Sandimmun Neoral Trinklsg 100 mg,10026558,9000173,L01XD04 1000464,Sandostatin iv,10025753,9000173,L01XD04 1000856,Sandostatin Perfusor 1ml/25mcg,10025753,9000173,L01XD04 1000616,Sandostatin sc,10025753,9000173,L01XD04 1000611,SanPellegrinoMagnesia,,#N/A,#N/A 1000269,Saroten ret Kps 25 mg,10030581,1057433,A01AD02 1000972,Saroten ret Kps 50 mg,10030583,1057433,A01AD02 1000365,SDD Gentamycin/Polymyxin Kps,1075,#N/A,#N/A 1001012,Selenase i.v.,10028403,1057433,A01AD02 1001009,Selenase Trinkamp. 0.10mg,10028401,1057433,A01AD02 1000485,Selipran Tbl 20 mg,10065410,9000089,J04AB02 1000976,Seresta 15 mg,10046831,2107469,R05X 1000973,Seretide 100 Disk,10027594,1057433,A01AD02 1000974,Seretide 250 Disk,10027595,1057433,A01AD02 1001203,Seretide 250g Disk,10027595,1057433,A01AD02 1000975,Serevent Diskus,10026637,9000173,L01XD04 1000316,Seropram Tbl 20 mg,10026085,9000173,L01XD04 1000875,Seroquel Tbl 100 mg,10027843,1057433,A01AD02 1000874,Seroquel Tbl 25mg,10028202,1057433,A01AD02 1001073,Sertralin Tbl 50 mg,10043217,2107469,R05X 1000606,Simdax inf,10065246,9000089,J04AB02 1001111,Simponi Inj Lsg,10054691,2107469,R05X 1000757,Simulect Inj Lsg,10027311,9000173,L01XD04 1000503,Simvast Tbl 20 mg,10083288,,J01XX08 1000773,Sinemet CR Tbl 50/200 mg,10064964,9000089,J04AB02 1000805,"SmofKabiven NL 1500 ml +F,+E",10058641,1014062,N05CD06 1000806,"SmofKabiven NL 2000 ml +F,+E",10058675,1014062,N05CD06 1000852,"SmofKabiven NL1970 ml +F,-E",10058675,1014062,N05CD06 1000636,Soldactone Amp.,10030647,1057433,A01AD02 1001122,Soliris Inf Lsg 300 mg/30 ml,10051771,2107469,R05X 1000383,Solu Cortef Inj Lsg 100 mg/ 2 ml,10000498,9000173,L01XD04 1000433,Solu Medrol 125 mg,10091422,,J01XX08 1000434,Solu Medrol 500 mg,10026925,9000173,L01XD04 1000431,Solu Medrol Stechamp 1g,10031063,2107469,R05X 1000432,Solu Medrol Stechampullen 2g,10031063,2107469,R05X 1000435,Solumedrol 40 mg,10090120,,J01XX08 1000992,Sorbidilat Kps 20mg,a.H.,#N/A,#N/A 1000993,Sorbidilat Kps 40mg,a.H.,#N/A,#N/A 1000392,Sorbidilat SR Kps 40 mg,a.H.,#N/A,#N/A 1000399,Sorbisterit Calcium Pulver,10060829,9000089,J04AB02 1000995,Sortis Tabl 10 mg,10027232,9000173,L01XD04 1000279,Sortis Tabl 20 mg,10027234,9000173,L01XD04 1000994,Sortis Tabl 40 mg,10028688,1057433,A01AD02 138,Sotalol Tabl 80 mg,10053745,2107469,R05X 1000527,Spasmo Urgenin Neo drg,10026613,9000173,L01XD04 1001049,SPICE_Dexdor Inf. Lsg,,#N/A,#N/A 1001053,SPICE_Disoprivan 1% Bolus,,#N/A,#N/A 1001050,SPICE_Disoprivan 2%,,#N/A,#N/A 1001052,SPICE_Disoprivan 2% BOLUS,,#N/A,#N/A 1001054,SPICE_Dormicum Bolus 1mg/ml,,#N/A,#N/A 1001051,SPICE_Dormicum Perfusor 5mg/ml,,#N/A,#N/A 1001228,SPICE_Propofol 1% BOLUS,,#N/A,#N/A 1001230,SPICE_Propofol 2%,,#N/A,#N/A 1001229,SPICE_Propofol 2% BOLUS,,#N/A,#N/A 1000486,Spiricort Tbl 20 mg,10026490,9000173,L01XD04 1000487,Spiricort Tbl 5 mg,10026488,9000173,L01XD04 1000848,Spirit 3X8,,#N/A,#N/A 1000849,Spirit 5Y2,,#N/A,#N/A 1000678,Spllsung/Nacl 0.9%,10025159,9000173,L01XD04 1001121,Stelara Inj Lsg 90 mg/ml,10060721,9000089,J04AB02 1000991,Stilnox Tbl 10mg,10025942,9000173,L01XD04 110,Streptokinase inj 1500 U/ml,,#N/A,#N/A 1000807,"StructoKabiven NL 1500 ml +F, - E",10037730,2107469,R05X 1000752,Studie_NaCL/Ringerfundin,,#N/A,#N/A 1000509,Succinolin 100mg/ 2ml,10026181,9000173,M03AB01 303,Sufentanil inj 50 g/ml,10079439,,J01XX08 1000771,Sufentanyl Perfusor,10079439,,J01XX08 1001192,SUP-ICU Pantoprazol Verum/Placebo i.v.,,#N/A,#N/A 1000447,Supradyn Brausetbl,10085365,,J01XX08 1000525,Surmontil Tbl 100 mg,10031367,2107469,R05X 1001006,Surmontil Trpf 4%,10030453,1057433,A01AD02 1000573,Survimed OPD,10074435,,J01XX08 1001066,Survimed OPD HN,10074435,,J01XX08 1001131,Sutent Kps 12.5 mg,10043740,2107469,R05X 1001130,Sutent Kps 25 mg,10043741,2107469,R05X 1001129,Sutent Kps 50 mg,10042842,2107469,R05X 1000996,Symbicort 100/60,10028145,1057433,A01AD02 1000997,Symbicort 200/60,10028147,1057433,A01AD02 1000512,Synacthen Inj Lsg 0.25 mg,10068079,9000089,J04AB02 1000469,Syntocinon Inj Lsg 5 IE,10030369,1057433,A01AD02 1001099,Tamiflu Kps 30 mg,10046277,2107469,R05X 1000765,Tamiflu Kps 75 mg,10027761,1057433,A01AD02 1001100,Tamiflu Suspension,10063607,9000089,J04AB02 1001127,Tarceva Tbl 100 mg,10029731,1057433,A01AD02 1001128,Tarceva Tbl 150 mg,10029732,1057433,A01AD02 1000340,Tardyferon Drg,10030526,1057433,A01AD02 1001196,Targin 10/5 mg Tbl,10052071,2107469,R05X 1000989,Targin Retard Tbl 10/5mg,10052071,2107469,R05X 1000990,Targin Retard Tbl 20/10mg,10051668,2107469,R05X 1001249,Targocid Inf Lsg,10025986,9000173,L01XD04 1000408,Tavanic Inf Lsg 500 mg 100 ml,10027291,9000173,L01XD04 1000407,Tavanic Tbl 500 mg,10065718,9000089,J04AB02 1000319,Tavegyl Inj Lsg 2mg/2ml,10030572,1057433,A01AD02 405,"Tazobac 2,5 g inf",10064430,9000089,J04AB02 1000483,Tazobac Inf 4g,10089360,,J01XX08 1001174,Td-pur Inj,10030004,1057433,A01AD02 1000776,Tegretol CR Tbl 200 mg,10024902,9000173,L01XD04 1000777,Tegretol CR Tbl 400 mg,10024876,9000173,L01XD04 1000594,Tegretol Supp 250 mg,10024875,9000173,L01XD04 1000295,Tegretol Tabl 200 mg,10024902,9000173,L01XD04 1000711,Telebrix Gastro,10025779,9000173,L01XD04 1000387,Telebrix Gastro Lsg,10025779,9000173,L01XD04 1000239,Temesta 4 mg/ml inj,10025963,9000173,L01XD04 1000418,Temesta Expidet 1mg,10024861,9000173,L01XD04 1000988,Temesta Expidet 2.5 mg,10024863,9000173,L01XD04 1000987,Tenormin Tabl 100mg,10030700,68386,R05X 1000278,Tenormin mite Tabl 50 mg,10030903,2107469,R05X 141,Tenormin submite tabl 25 mg,10026108,9000173,L01XD04 1000655,test Adrenalin10 g/ml Bolus,,#N/A,#N/A 1000650,Test Adrenalin100 g/ml,,#N/A,#N/A 1000649,test Adrenalin20 g/ml,,#N/A,#N/A 1000658,Test Nor Adrenalin 10 g/ml Bolus,,#N/A,#N/A 1000664,test Zofran,,#N/A,#N/A 1000712,Tetagam N,10067304,9000089,J04AB02 1001247,Theospirex Bolus iv,10092629,,J01XX08 1001248,Theospirex Perfusor,10092629,,J01XX08 1001064,Thiamazol Inj Lsg 40,10074464,,J01XX08 1000201,Thrombocyte conc,,#N/A,#N/A 1000745,Thrombozyten Pflege,,#N/A,#N/A 1000715,Thymoglobuline,10027300,9000173,L01XD04 1000416,Thyrotardin 100 ug,10025568,9000173,L01XD04 1001059,Thyroxin TS Amp 500 ug/5ml,,#N/A,#N/A 1001062,TICAB Tbl,,#N/A,#N/A 1001085,TICH-2 Tranexamsure/Placebo,,#N/A,#N/A 1000518,Tienam Amp 500 mg,10037817,2107469,R05X 1001232,Tigacyl Amp 50 mg,,#N/A,#N/A 1000517,Timoptic Trpf Oph 0.25%,10030811,2107469,R05X 1000586,Tobrex Augensalbe,10026604,9000173,L01XD04 1000587,Tobrex Augentrp,10026603,9000173,L01XD04 1000760,Topamax Tbl 50 mg,10028064,1057433,A01AD02 1000401,Tora-dol Inj Lsg 30 mg,10026145,9000173,L01XD04 1000520,Torem Inj Lsg 20mg,10031373,2107469,R05X 1000521,Torem Tbl 10 mg,10031373,2107469,R05X 1000986,Torem Tbl 200 mg,10026383,9000173,L01XD04 1000522,Torem Tbl 5 mg,10026384,9000173,L01XD04 1000170,Trace elements,10026382,9000173,L01XD04 1001089,Tracleer Tabl 62.5 mg,10028536,1057433,A01AD02 199,Tracrium inj 10 mg/ml,10031165,2107469,R05X 1000822,Tracrium Perfusor,10031165,2107469,R05X 1001046,Tramal Amp. 100 mg,10035189,2107469,R05X 1000984,Tramal Kps 50 mg,10035188,2107469,R05X 1000985,Tramal Retard Tbl. 100 mg,10026889,9000173,L01XD04 1000523,Tramal Trpf 100 mg/ml,10035190,2107469,R05X 386,Trandate inj,10024755,9000173,L01XD04 1000828,Trandate Perfusor,10024755,9000173,L01XD04 1000845,Tranexamid/Placebo,,#N/A,#N/A 1001038,TransformCertican Tbl. 0.75m,,#N/A,#N/A 1001037,TransformMyfortic Tbl 360mg,,#N/A,#N/A 1001036,TransformNTX Simulect Inj Lsg,,#N/A,#N/A 1001039,TransformNTXPrograf Kps 0.5mg,,#N/A,#N/A 1001040,TransformSandimmun Neoral Kps,,#N/A,#N/A 1001214,Transipeg Pulver,10027238,9000173,L01XD04 1000637,Trasylol Amp.,,#N/A,B02AB01 1000774,Trileptal Tbl 300 mg,10028154,1057433,A01AD02 1000775,Trileptal Tbl 600 mg,10028155,1057433,A01AD02 1000672,Trinkmenge,,#N/A,#N/A 1000736,Trinkmenge Pflege,,#N/A,#N/A 1000869,Triofan Nasenspray,10062139,9000089,J04AB02 1000870,Triofan Nasentropfen,10062138,9000089,J04AB02 1000983,Trittico Tbl 100 mg,10024791,9000173,L01XD04 1000982,Trittico Tbl 50 mg,10024789,9000173,L01XD04 1000638,Tuberculin PPD,10086658,,J01XX08 1001008,Ulcogant Susp 0.2g/ml,a.H.,#N/A,#N/A 1001218,Ultiva 100 g/ml Perfusor,10026953,9000173,L01XD04 1000705,Ultiva Inj Lsg,10026953,9000173,L01XD04 1000513,Unifyl cont Tbl 400 mg,10026746,9000173,L01XD04 1001237,Urapidil Inj Lsg,10089770,,J01XX08 1001238,Urapidil Perfusor,10089770,,J01XX08 1000980,Urokinase 10000 UI,10027020,9000173,L01XD04 1000528,Urokinase 100000 UI,10027022,9000173,L01XD04 1000529,Urokinase 500000 UI,10027343,9000173,L01XD04 1000981,Ursofalk 250mg Kps.,10068072,9000089,J04AB02 1000979,Valcyte Filmtabl. 450mg,10028518,1057433,A01AD02 1000283,Valerianae tinct Haenseler,10055923,2107469,R05X 245,Valium inj 5 mg/ml,10027647,1057433,A01AD02 1000978,Valium Tbl. 10 mg,10027654,1057433,A01AD02 1000977,Valium Tbl. 5 mg,10027653,1057433,A01AD02 1000530,Valtrex Tbl 500 mg,10087403,,J01XX08 1000349,Valverde Verstopfung Sirup,10027541,1057433,A01AD02 189,Vancocin Amp 500 mg,10029113,1057433,A01AD02 331,Vancocin oral Kps 250 mg,10024998,9000173,L01XD04 113,Vasopressin inf 0.4 U/ml,10064031,9000089,J04AB02 112,Vasopressin inj 20 U/ml,10064031,9000089,J04AB02 1001132,Vectibix Inf Lsg 100 mg/5ml,10044988,2107469,R05X 1001133,Vectibix Inf Lsg 400 mg/20ml,10063603,9000089,J04AB02 1001116,Velcade Inf Lsg 1 mg,10052405,2107469,R05X 1001117,Velcade Inf Lsg 3.5 mg,10029689,1057433,A01AD02 1001072,Venlafaxin ER ret Kps 75 mg,10075159,,J01XX08 1000339,Venofer Inj Lsg 100 mg,10026837,9000173,L01XD04 1000808,Ventavis Inhal Lsg,10044141,2107469,R05X 1000682,Venticulte/Placebo,,#N/A,#N/A 1000499,Ventolin Dosieraerosol 0.1 mg,10027974,1057433,A01AD02 260,Ventolin inhal 0.5% 5 mg/ml,10024778,9000173,L01XD04 1001060,Ventolin Inhal Lsg 0.05%,10024778,9000173,L01XD04 92,Verapamil inf 0.25 mg/ml,10058302,2107469,R05X 1000663,Vfend Amp,10029643,1057433,A01AD02 1000661,Vfend Filmtbl 200 mg,10028835,1057433,A01AD02 1000662,Vfend Filmtbl 50 mg,10028834,1057433,A01AD02 1000610,Viagra Tbl. 50 mg,10027333,9000173,L01XD04 1000335,Vibravens Inj Lsg 100 mg 5 ml,10057912,2107469,R05X 1000553,Vi-De 3 Trpf,10030446,1057433,A01AD02 1001057,VIMC PCA-Hydromorphon 0.,,#N/A,#N/A 1001056,Vimpat Tbl. 50 mg,10056098,2107469,R05X 1001180,Vincristin Inj Lsg,10066622,9000089,J04AB02 1000754,VisMed knstliche Trnen,,#N/A,#N/A 1000639,Vitamin A Augensalbe,10026703,9000173,L01XD04 1000277,Vitamin C Streuli Lsg 10% 5 ml,10030405,1057433,A01AD02 1000211,Vitamine water sol,,#N/A,#N/A 1000150,"Vitamins fat sol, Vitalipid",,#N/A,#N/A 1000384,Vitarubin Depot 1000 ug/ml,10090183,,J01XX08 1000329,Voltaren Drg 50,10030640,1057433,A01AD02 1000809,Voltaren Inf Lsg,10030639,1057433,A01AD02 1000859,Voltaren Tbl. 75mg,75 mg gibt es nicht als Tbl,#N/A,#N/A 1000590,voluven,10066621,9000089,J04AB02 1000735,Voluven 6% MV,10066621,9000089,J04AB02 1000709,Voluven 6% Treasure,10066621,9000089,J04AB02 1001139,Voriconazol Filmtbl 200 mg,10079855,,J01XX08 1001138,Voriconazol Filmtbl 50 mg,10079854,,J01XX08 1001140,Voriconazol Inf Lsg 200 mg/ 20 ml,10079856,,J01XX08 1000923,Wasser,,#N/A,#N/A 1000213,Water,,#N/A,#N/A 1000204,Water sterile,,#N/A,#N/A 1001189,Willfact TS 1000 E Inj Lsg,10064576,9000089,J04AB02 1001250,Xarelto Tbl 15 mg,10059785,1014062,N05CD06 1001251,Xarelto Tbl 20 mg,10059793,1014062,N05CD06 1001079,Xifaxan Tabl 550 mg,10073506,,J01XX08 1000337,Xigris Amp 5mg,a.H.,#N/A,#N/A 1000640,X-Prep Liquid,10029977,1057433,A01AD02 1000412,Xylocain Spray 10%,10026201,9000173,L01XD04 1000405,Xyzal Filmtbl 5 mg,10028829,1057433,A01AD02 1000615,Xyzal Tbl. 5 mg,10028829,1057433,A01AD02 5,Zantic Inj Lsg 50 mg,10031049,2107469,R05X 1000497,Zantic Tbl 150 mg,10073875,,J01XX08 1001221,Zavedos Inf Lsg,10051946,2107469,R05X 1000260,Zentel Tbl 400 mg,10026651,9000173,L01XD04 1001219,Zerbaxa 1.5 g Inf Lsg,10084241,,J01XX08 1000305,Zinacef Amp 1.5 g,10030890,2107469,R05X 1000234,Zinacef inj 100 mg/ml,10030890,2107469,R05X 1000306,Zinat Tabl 500 mg,10025880,9000173,L01XD04 1000539,Zink Nutrimed Brausetbl 5mg,,#N/A,#N/A 1000540,Zinkchlorid Amp,1255,#N/A,#N/A 1000887,Zinkglukonat Tbl.30mg (Burgerstein),10025688,9000173,L01XD04 1000467,Zofran 4mg/2ml,10056449,2107469,R05X 1000861,Zofran Zydis 4mg,10056615,2107469,R05X 1000502,Zoloft Tbl 50 mg,10030070,1057433,A01AD02 190,Zovirax inf 250 mg/10 ml,10031081,2107469,R05X 1000603,Zurcal Tbl 20 mg,a.H.,#N/A,#N/A 1000261,Zyloric Tbl 300 mg,10030800,2107469,R05X 1000595,Zyprexa Filmtbl. 5 mg,10027192,9000173,L01XD04 1000821,zzCordarone Perfusor,,, 151,zzz,,, 1000188,zzz,,, 1000230,zzz#2,,, 1000060,zzz#6,,, 1000181,zzzAmino-Glucos 9 g/l - 10%,,, 1000698,zzzAminophyllin BOLUS,,, 159,zzzAmpicillin 500 mg / ml inj,,, 1000537,zzzBeco 5 Inj Lsg,,, 1000347,"zzzBrevibloc 2,5g/10 ml",,, 121,zzzDiltiazem tabl 30 mg,,, 37,zzzDopamine 2 mg/ml inf,,, 1000242,zzzDopamine 40mg/ml inj,,, 44,zzzDopexamine 10 mg/ml inf,,, 172,zzzErythromycin 1g amp,,, 1000237,zzzErythromycin 50 mg/ml inj,,, 416,zzzFluimucil inj Lsg 20%,,, 1000224,zzzGlucos 5% cum Na40 K20,,, 1000192,zzzGlucos 10% cum Na40 K20,,, 1000815,zzzGlucose 5%100,,, 49,zzzGlyceryl nitrate inj 1 mg/ml,,, 287,zzzIbuprofen tabl 400 mg,,, 1000402,zzzKohle Gel,,, 1000235,zzzLidocaine 4 mg/ml,,, 1000247,zzzMorphine Inf 2 mg/ml,,, 1000241,zzzNa 5% hypertonic infusion,,, 1000695,ZZZNipruss 1mg/ml,,, 77,zzzNoradrenalin inj 0.02 mg/ml,,, 1000691,zzztestDisoprivan BOLUS 2% 20mg/ml,,, 1000677,zzztestNO,,, 1000228,zzzVoluven 6%,,, 1000653,zzzz,,, 1001001,zzzz,,, 1000733,zzzz Infu,,, 1000817,zzzz NaCl 0.9 %250,,, 1000229,zzzzDC defibrillation,,, 1000692,zzzzFentanyl Bolus inj 50 g/ml,,, 1000568,zzzzInfu,,, 1000569,zzzzinfusi,,, 203,zzzztest,,, 300,zzzzz,,, 1000685,zzzzz,,, 1000555,zzzzzFentanyl Inf 10ug/ml,,, 1000670,zzzzzFloxapen Inj Lsg 2g,,, 163,zzzzzFortam Stechamp 2g,,, 1000660,zzzzzTest Cordarone,,, 1000654,"zzzzztestBupivacaine 0,125% PDA",,, 1000549,zzzzzzAugmentin Inj 2.2g,,, \ No newline at end of file diff --git a/spitalhygiene/Unused/query_device.sql b/spitalhygiene/Unused/query_device.sql new file mode 100644 index 0000000..4627707 --- /dev/null +++ b/spitalhygiene/Unused/query_device.sql @@ -0,0 +1,20 @@ +SELECT * FROM [Atelier_DataScience].[atl].[V_DH_DIM_MITARBEITER_CUR] WHERE MITARBEITERID in +(SELECT distinct MITARBEITERID from [Atelier_DataScience].[atl].[V_DH_FACT_TERMINMITARBEITER] WHERE TERMINID in + (SELECT TERMINID FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINGERAET] + WHERE GERAETID IN + (SELECT GERAETID + FROM [Atelier_DataScience].[atl].[V_DH_DIM_GERAET_CUR] + WHERE GERAETNAME ='ECC') + AND TERMINSTART_TS > '2018-01-01') +) + + +SELECT * FROM [Atelier_DataScience].[atl].[V_DH_DIM_RAUM_CUR] WHERE RAUMID in +(SELECT distinct RAUMID from [Atelier_DataScience].[atl].[V_DH_FACT_TERMINRAUM] WHERE TERMINID in + (SELECT TERMINID FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINGERAET] + WHERE GERAETID IN + (SELECT GERAETID + FROM [Atelier_DataScience].[atl].[V_DH_DIM_GERAET_CUR] + WHERE GERAETNAME ='ECC') + AND TERMINSTART_TS > '2018-01-01') +) \ No newline at end of file diff --git a/spitalhygiene/Unused/tree_all.html b/spitalhygiene/Unused/tree_all.html new file mode 100644 index 0000000..cb0bf81 --- /dev/null +++ b/spitalhygiene/Unused/tree_all.html @@ -0,0 +1,169 @@ + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Unused/tree_devices.html b/spitalhygiene/Unused/tree_devices.html new file mode 100644 index 0000000..c66b113 --- /dev/null +++ b/spitalhygiene/Unused/tree_devices.html @@ -0,0 +1,169 @@ + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Unused/tree_employees.html b/spitalhygiene/Unused/tree_employees.html new file mode 100644 index 0000000..8cde4aa --- /dev/null +++ b/spitalhygiene/Unused/tree_employees.html @@ -0,0 +1,169 @@ + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Unused/tree_nomed.html b/spitalhygiene/Unused/tree_nomed.html new file mode 100644 index 0000000..744b437 --- /dev/null +++ b/spitalhygiene/Unused/tree_nomed.html @@ -0,0 +1,169 @@ + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Unused/tree_rooms.html b/spitalhygiene/Unused/tree_rooms.html new file mode 100644 index 0000000..df1d056 --- /dev/null +++ b/spitalhygiene/Unused/tree_rooms.html @@ -0,0 +1,169 @@ + + + + + + \ No newline at end of file diff --git a/spitalhygiene/Unused/umgebungsabklaerung.cypher b/spitalhygiene/Unused/umgebungsabklaerung.cypher new file mode 100644 index 0000000..c0976c7 --- /dev/null +++ b/spitalhygiene/Unused/umgebungsabklaerung.cypher @@ -0,0 +1,10 @@ +MATCH (n:Patient{PATNR:'00000532290'})-[k]-(m:Patient) +WHERE k.from > n.risk_datetime - duration('P8D') +AND n.PATNR <> m.PATNR +RETURN n.PATNR, + m.PATNR, + k.from AS von, + k.to AS bis, + k.room AS ort, + CASE type(k) WHEN 'kontakt_raum' THEN 'Zimmer' WHEN 'kontakt_org' THEN 'Abteilung' END AS typ, + CASE "True" IN labels(m) WHEN true THEN 'Ja' WHEN false THEN 'Nein' END AS infiziert diff --git a/spitalhygiene/pom.xml b/spitalhygiene/pom.xml new file mode 100644 index 0000000..550d910 --- /dev/null +++ b/spitalhygiene/pom.xml @@ -0,0 +1,15 @@ + + + 4.0.0 + io.sqooba.insel + spitalhygiene + 0.1.0-SNAPSHOT + pom + + + vre + + + + diff --git a/spitalhygiene/resources/Query_Atelier_Data.py b/spitalhygiene/resources/Query_Atelier_Data.py new file mode 100644 index 0000000..fc0cb18 --- /dev/null +++ b/spitalhygiene/resources/Query_Atelier_Data.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +"""This script controls the data update for all "raw" data in the VRE Model. It will execute all SQL queries +(i.e. all files with a '.sql' extension') found in ``SQL_DIR``, and save extracted data to CSV files in ``CSV_DIR``. +CSV and SQL files will be named identically, e.g.: + +``LA_ISH_NBEW.sql`` :math:`\\longrightarrow` ``LA_ISH_NBEW.csv`` + +The Atelier_DataScience is queried directly via the `pyodbc` module, and requires an additional connection file +containing details on the ODBC connection to the Atelier (see VRE Model Overview for more information). +""" + +import configparser +import os +import datetime +import pyodbc +import csv + + +def WriteSQLToCSV(path_to_sql, path_to_csv, csv_sep, connection_file, trusted_connection=True): + """Executes an SQL query and writes the results to path_to_csv. + + Args: + path_to_sql (str): Path to .sql file containing the query to be executed + path_to_csv (str): Path to .csv file destination, to which data will be written to in "csv_sep" + delimited fashion + csv_sep (str): Delimiter used in the csv file + connection_file (str): path to file containing information used for server connection and authentication, + as well as database selection (read and passed to ``pyodbc.connect()`` ) + This information is read from an external file so as to avoid hard-coding usernames + and passwords + trusted_connection (bool): additional argument passed to pyodbc.connect(), converted to "yes" if ``True`` and + "no" otherwise (defaults to ``True``) + """ + connection_string = ';'.join([line.replace('\n', '') for line in open(connection_file, 'r')]) + + conn = pyodbc.connect(connection_string, trusted_connection='yes' if trusted_connection else 'no') + cursor = conn.cursor() + + # Read the SQL file + query = ' '.join([line.replace('\n', '') for line in open(path_to_sql, 'r')]) + + # execute query + cursor.execute(query) + + # Then write results to SQL + # --> register special dialect to control csv delimiter and proper newline formatting + csv.register_dialect('sql_special', delimiter=csv_sep, lineterminator='\n') + with open(path_to_csv, 'w') as writefile: + csv_writer = csv.writer(writefile, dialect = 'sql_special') + csv_writer.writerow([i[0] for i in cursor.description]) # write headers + csv_writer.writerows(cursor) + + # close connection + conn.close() + +if __name__ == '__main__': # Necessary to avoid code parsing by Sphinx + ################################################################################################################ + # Extract correct filepath + this_filepath = os.path.dirname(os.path.realpath(__file__)) + # contains the directory in which this script is located, irrespective of the current working directory + + # Load config file: + config_reader = configparser.ConfigParser() + config_reader.read(os.path.join(this_filepath, '../vre/src/main/python/vre/BasicConfig.ini')) + + SQL_DIR = os.path.join(this_filepath, "../sql/tests") if config_reader['PARAMETERS']['data_basis'] == 'test' \ + else os.path.join(this_filepath, "../sql/vre") # absolute or relative path to directory containing SQL files + + CSV_DIR = config_reader['PATHS']['test_data_dir'] if config_reader['PARAMETERS']['data_basis'] == 'test' \ + else config_reader['PATHS']['model_data_dir'] # absolute or relative path to directory where data is stored + + CSV_DELIM = config_reader['DELIMITERS']['csv_sep'] # delimiter for CSV files written from SQL results + + print(f"data_basis set to: {config_reader['PARAMETERS']['data_basis']}\n") + + ################################################################################################################ + # Execute all queries in SQL_DIR + print('Loading data from SQL server:\n') + + sql_files = [each_file for each_file in os.listdir(SQL_DIR) if each_file.endswith('.sql')] + # --> Use this line instead for loading only specific files: + # sql_files = [each_file for each_file in os.listdir(SQL_DIR) if each_file in ['OE_PFLEGE_MAP.sql']] + + for each_file in sql_files: + print('--> Loading file: ' + each_file + ' ... ', end='', flush=True) + start_dt = datetime.datetime.now() + + # Execute query and write results + WriteSQLToCSV(path_to_sql=os.path.join(SQL_DIR, each_file), + path_to_csv=os.path.join(CSV_DIR, each_file.replace('.sql', '.csv')), csv_sep=CSV_DELIM, + connection_file=config_reader['PATHS']['odbc_file_path'], trusted_connection=False) + + print(f'\tDone !\t Total processing time: {str(datetime.datetime.now()-start_dt).split(".")[0]}') + # --> print timedelta without fractional seconds (original string would be printed as 0:00:13.4567) + + ##################################### + print('\nAll files loaded successfully !') + ##################################### + + diff --git a/spitalhygiene/resources/Update_Model.sh b/spitalhygiene/resources/Update_Model.sh new file mode 100644 index 0000000..3e8068d --- /dev/null +++ b/spitalhygiene/resources/Update_Model.sh @@ -0,0 +1,109 @@ +#!/bin/bash -e + +### Start time measurement +START_TIME=$SECONDS + +### Activate virtual environment +source /home/i0308559/vre_venv/bin/activate + +########################################################################################################################### +### VARIABLE DECLARATION +########################################################################################################################### + +MODEL_DIR=/home/i0308559/vre_input +NEO4J_DIR=/home/i0308559/vre_output/neo4j +FEATVEC_DIR=/home/i0308559/vre_output/feature_vector +LOG_PATH=/home/i0308559/vre_log + +########################################################################################################################### +### FILE BACKUP (ONLY PERFORMED IN HDFS) +########################################################################################################################### + +### Backup HDFS files from the SQL export (stored in MODEL_DIR) +echo "backing up HDFS files for model data" +# sudo -u hdfs hdfs dfs -rm -r ${MODEL_DIR}/old +# sudo -u hdfs hdfs dfs -mkdir ${MODEL_DIR}/old +# sudo -u hdfs hdfs dfs -mv ${MODEL_DIR}/*.csv ${MODEL_DIR}/old +echo "Done !" + +### Backup HDFS files from the Neo4J export +echo "backing up HDFS files for Neo4J data" +# sudo -u hdfs hdfs dfs -rm -r ${NEO4J_DIR}/old +# sudo -u hdfs hdfs dfs -mkdir ${NEO4J_DIR}/old +# sudo -u hdfs hdfs dfs -mv ${NEO4J_DIR}/*.csv ${NEO4J_DIR}/old +echo "Done !" + +### Backup the feature vector +echo "backing up the feature vector" +# sudo -u hdfs hdfs dfs -rm -r ${FEATVEC_DIR}/old +# sudo -u hdfs hdfs dfs -mkdir ${FEATVEC_DIR}/old +# sudo -u hdfs hdfs dfs -mv ${FEATVEC_DIR}/*.csv ${FEATVEC_DIR}/old +echo "Done !" + +########################################################################################################################### +### DATA PRE-PROCESSING +########################################################################################################################### +echo "Pre-processing data into the Atelier_DataScience database..." +python3.6 /home/i0308559/spitalhygiene/resources/preprocessor.py > ${LOG_PATH}/Preprocessor.log 2>&1 +echo "Done !" + +########################################################################################################################### +### MODEL DATA EXTRACTION +########################################################################################################################### + +### Extract data from the SQL server into CSV (must be called by the virtual environment interpreter to load pyodbc module) +echo "Extracting new data from the Atelier_DataScience into CSV..." +python3.6 /home/i0308559/spitalhygiene/resources/Query_Atelier_Data.py > ${LOG_PATH}/SQL_Data_Load.log 2>&1 # pyodbc module installed +# python3.6 /home/i0308559/spitalhygiene/resources/update_db_jar.py > ${LOG_PATH}/SQL_Data_Load.log 2>&1 # pyodbc module NOT installed +echo "Done !" + +########################################################################################################################### +### MODEL CALCULATION +########################################################################################################################### + +### Run the feature_extractor.py script, which performs +# --> data load from the CSV files produced by the SQL query +# --> creation and export of the feature vector into FEATVEC_DIR +# --> Export of data in Neo4J-compatible format into NEO4J_DIR +# --> Calculation of the statistical model (random forests???) and its desired output (NOT DESIGNED YET - WHAT SHOULD WE PRODUCE?) +echo "Running feature_extractor.py..." +python3.6 /home/i0308559/spitalhygiene/vre/src/main/python/vre/data_compiler.py > ${LOG_PATH}/Compiler.log 2>&1 +echo "Done !" + +########################################################################################################################### +### TRANSFER TO HDFS +########################################################################################################################### + +### Add extracted CSV files to HDFS +echo "Putting files to HDFS:" +printf ">> SQL Query files... " +# sudo -u hdfs hdfs dfs -put ${MODEL_DIR}/*.csv /data1/sqooba/vre/model_data/ +echo "Done !" + +printf ">> Feature vector... " +# sudo -u hdfs hdfs dfs -put ${FEATVEC_DIR}/*.csv /data1/sqooba/vre/data_export/feature_vector +echo "Done !" + +printf ">> Neo4J files... " +# sudo -u hdfs hdfs dfs -put ${NEO4J_DIR}/*.csv /data1/sqooba/vre/data_export/neo4j +echo "Done !" + +########################################################################################################################### +### PROCESS MODEL OUTPUT (NOT YET USED) +########################################################################################################################### +# This part includes any additional processing steps (e.g. sending of a name list by email for screening candidates, etc) +# +# This part (i.e. the model PURPOSE) has not yet been determined. + +########################################################################################################################### +### PRINT SUCCESS AND EXECUTION TIME TO LOGFILE +########################################################################################################################### +ELAPSED_TIME=$(($SECONDS - $START_TIME)) +HOURS=$(($ELAPSED_TIME/3600)) +MINUTES=$((($ELAPSED_TIME-$HOURS*3600)/60)) +SECONDS=$((($ELAPSED_TIME-$HOURS*3600)%60)) + +echo "" +echo "Model updated successfully !" +echo "Update duration: $HOURS h, $MINUTES minutes, $SECONDS seconds" + diff --git a/spitalhygiene/resources/preprocessor.py b/spitalhygiene/resources/preprocessor.py new file mode 100644 index 0000000..eea55d4 --- /dev/null +++ b/spitalhygiene/resources/preprocessor.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +"""This script contains various functions for pre-processing data required in the VRE project. +This includes: + +- Regenerating the ward screening overview data stored in the ``Atelier_DataScience.dbo.WARD_SCREENINGS`` table +- ... + +Please refer to the script code for details on the various functions. + +----- +""" + +import pyodbc +import os +import configparser +import datetime + +def ExecuteSQL(sql_command, connection_file, trusted_connection=True): + """ + Executes an arbitrary SQL command, but does **not** return any results. + + Args: + sql_command (str): SQL command to be executed + connection_file (str): path to file containing information used for server connection and authentication, + as well as database selection (read and passed to ``pyodbc.connect()`` ) + + trusted_connection (bool): additional argument passed to pyodbc.connect(), converted to "yes" if ``True`` and + "no" otherwise (defaults to ``True``) + """ + connection_string = ';'.join([line.replace('\n', '') for line in open(connection_file, 'r')]) + + conn = pyodbc.connect(connection_string, trusted_connection='yes' if trusted_connection else 'no') + cursor = conn.cursor() + + cursor.execute(sql_command) + + cursor.commit() # this is required for INSERT, DELETE and UPDATE statements to take effect in the DB + + cursor.close() + conn.close() + +def RecreateWardOverviewData(path_to_config_file, csv_sep=';'): + """ + Recreates dates at which specific screening types were active in various clinics. + + This information is found in the ``[...]/vre_input/screening_data/screening_overview.csv`` file. Its contents + are used to create a query for updating the ``Atelier_DataScience.dbo.WARD_SCREENINGS`` table. This query is written + to the ``[...]/vre_output/manual_sql_queries`` folder and named ``update_ward_screenings.sql``, + since the Atelier_Datascience_Reader does not have permission to execute TRUNCATE, DELETE, INSERT or UPDATE + statements. + + To Do: + Find a solution to automate this part. + + Args: + path_to_config_file (str): path to the ``BasicConfig.ini`` file + csv_sep (str): separator used in the read file (defaults to ``;``) + """ + # Load config file: + config_reader = configparser.ConfigParser() + config_reader.read(path_to_config_file) + + # Prepare INSERT statement for table + sql_statement = 'TRUNCATE TABLE [Atelier_DataScience].[dbo].[WARD_SCREENINGS];\n' + + # Load data from file + all_screening_data = [each_line.replace('\n', '') for each_line in + open(os.path.join(config_reader['PATHS']['input_dir'], 'screening_data', + 'screening_overview.csv'), 'r')] + # --> Note: the data file contains the date of active screening in column 1, day of week in column 2 (not used), + # and active screenings in all other "pflegerischen" wards in subsequent columns + + for index, line in enumerate(all_screening_data): + if index == 0: # first line contains headers + all_wards = line.split(csv_sep) + else: + line_data = line.split(csv_sep) + for line_index, each_entry in enumerate(line_data): + if line_index > 1 and each_entry != '': + sql_statement += f"INSERT INTO [Atelier_DataScience].[dbo].[WARD_SCREENINGS] VALUES" \ + f"( '{datetime.datetime.strptime(line_data[0], '%d.%m.%Y').strftime('%Y-%m-%d')}'," \ + f"'{all_wards[line_index]}', '{each_entry}' )\n" + # Write statement to file + with open(os.path.join(config_reader['PATHS']['output_dir'], 'manual_sql_queries', + 'update_ward_screenings.sql'), 'w') as writequery: + writequery.write(sql_statement) + +def RecreateHospitalMap(path_to_config_file, csv_sep=';'): + """ + Recreates the *Hospital Map* in the ``Atelier_DataScience``. + + This map links the following important entities in the model: + + - fachliche OE + - pflegerische OE + - official abbreviation of pflegerische OE + - building in which pflegerische OE is located + - floor of building in which pflegerische OE is located + + All information required is found in the ``[...]/vre_input/maps/insel_map.csv`` file. Its contents + are used to create a query for updating the ``Atelier_DataScience.dbo.INSEL_MAP`` table. This query is written + to the ``[...]/vre_output/manual_sql_queries`` folder and named ``update_insel_map.sql``, since the + Atelier_Datascience_Reader does not have permission to execute TRUNCATE, DELETE, INSERT or UPDATE statements. + + Note: + Floors are very important, since rooms are exported "floor-wise" from Waveware. + + Args: + path_to_config_file (str): path to the ``BasicConfig.ini`` file + csv_sep (str): separator used in the read file (defaults to ``;``) + """ + # Load config file: + config_reader = configparser.ConfigParser() + config_reader.read(path_to_config_file) + + # Prepare INSERT statement for table + sql_statement = 'TRUNCATE TABLE [Atelier_DataScience].[dbo].[INSEL_MAP];\n' + + # Load data from file + all_map_data = [each_line.replace('\n', '') for each_line in + open(os.path.join(config_reader['PATHS']['input_dir'], 'maps', 'insel_map.csv'), 'r')] + + for index, each_line in enumerate(all_map_data): + if index > 0: # skip header row + line_data = each_line.split(csv_sep) + sql_statement += f'INSERT INTO [Atelier_DataScience].[dbo].[INSEL_MAP] VALUES (' + sql_statement += f"'{line_data[0]}', " + sql_statement += f"'{line_data[1]}', " if line_data[1] != '' else 'NULL, ' + sql_statement += f"'{line_data[2]}', " if line_data[2] != '' else 'NULL, ' + sql_statement += f"'{line_data[3]}', " + sql_statement += f"'{line_data[4]}', " + sql_statement += f"'{line_data[5]}', " + sql_statement += f"'{line_data[6]}', " if line_data[6] != '' else 'NULL, ' + sql_statement += f"'{line_data[7]}', " + sql_statement += f"'{line_data[8]}', " if line_data[8] != '' else 'NULL, ' + sql_statement += f"'{line_data[9]}' )\n" + + # Write statement to file + with open(os.path.join(config_reader['PATHS']['output_dir'], 'manual_sql_queries', + 'update_insel_map.sql'), 'w') as writequery: + writequery.write(sql_statement) + +def RecreatePflegerischeOEMap(path_to_config_file, csv_sep=';'): + """ + Recreates the map for pflegerische OEs in the ``Atelier_DataScience``. + + This map links "free-text" pflegerische OE names to the *official* names in the ``OE_pflege_abk`` column of the + ``Atelier_DataScience.dbo.INSEL_MAP`` table. + + All information required is found in the ``[...]/vre_input/maps/oe_pflege_map.csv`` file. Its contents + are used to create a query for updating the ``Atelier_DataScience.dbo.OE_PFLEGE_MAP`` table. This query is written + to the ``[...]/vre_output/manual_sql_queries`` folder and named ``update_oe_pflege_map.sql``, since the + Atelier_Datascience_Reader does not have permission to execute TRUNCATE, DELETE, INSERT or UPDATE statements. + + Args: + path_to_config_file (str): path to the ``BasicConfig.ini`` file + csv_sep (str): separator used in the read file (defaults to ``;``) + """ + # Load config file: + config_reader = configparser.ConfigParser() + config_reader.read(path_to_config_file) + + # Prepare INSERT statement for table + sql_statement = 'TRUNCATE TABLE [Atelier_DataScience].[dbo].[OE_PFLEGE_MAP];\n' + + # Load data from file + all_map_data = [each_line.replace('\n', '') for each_line in + open(os.path.join(config_reader['PATHS']['input_dir'], 'maps', 'oe_pflege_map.csv'), 'r')] + + for index, each_line in enumerate(all_map_data): + if index > 0: # skip header row + line_data = each_line.split(csv_sep) + sql_statement += f'INSERT INTO [Atelier_DataScience].[dbo].[OE_PFLEGE_MAP] VALUES (' + sql_statement += f"'{line_data[0]}', '{line_data[1]}' )\n" + + # Write statement to file + with open(os.path.join(config_reader['PATHS']['output_dir'], 'manual_sql_queries', + 'update_oe_pflege_map.sql'), 'w') as writequery: + writequery.write(sql_statement) + + +def RecreateScreeningData(path_to_config_file, csv_sep=';'): + """ + Recreates all screening data in the ``Atelier_DataScience``. + + All information required is found in the ``[...]/vre_input/screening_data/vre_screenings.csv`` file. Its contents + are used to create a query for updating the ``Atelier_DataScience.dbo.VRE_SCREENING_DATA`` table. This query is + written to the ``[...]/vre_output/manual_sql_queries`` folder and named ``update_VRE_SCREENING_DATA.sql``, since the + Atelier_Datascience_Reader does not have permission to execute TRUNCATE, DELETE, INSERT or UPDATE statements. + + Args: + path_to_config_file (str): path to the ``BasicConfig.ini`` file + csv_sep (str): separator used in the read file (defaults to ``;``) + """ + # Load config file: + config_reader = configparser.ConfigParser() + config_reader.read(path_to_config_file) + + # Prepare INSERT statement for table + sql_statement = 'TRUNCATE TABLE [Atelier_DataScience].[dbo].[VRE_SCREENING_DATA];\n' + + # Load data from file + all_screening_data = [each_line.replace('\n', '') for each_line in + open(os.path.join(config_reader['PATHS']['input_dir'], 'screening_data', + 'vre_screenings.csv'), 'r')] + + for index, each_line in enumerate(all_screening_data): + if index > 0: # skip header row + line_data = each_line.split(csv_sep) + sql_statement += f'INSERT INTO [Atelier_DataScience].[dbo].[VRE_SCREENING_DATA] VALUES (' + sql_statement += f"'{line_data[0]}', " + sql_statement += f"'{line_data[1]}', " + sql_statement += f"'{line_data[2]}', " if line_data[2] != '' else 'NULL, ' + sql_statement += f"'{line_data[3]}', " + sql_statement += f"'{line_data[4]}', " if line_data[4] != '' else 'NULL, ' + sql_statement += f"'{line_data[5]}', " if line_data[5] != '' else 'NULL, ' + sql_statement += f"'{line_data[6]}', " if line_data[6] != '' else 'NULL, ' + sql_statement += f"'{line_data[7]}', " if line_data[7] != '' else 'NULL, ' + sql_statement += f"'{line_data[8]}', " + sql_statement += f"'{line_data[9]}', " if line_data[9] != '' else 'NULL, ' + sql_statement += f"'{line_data[10]}', " + sql_statement += f"'{line_data[11]}', " if line_data[11] != '' else 'NULL, ' + sql_statement += f"'{line_data[12]}', " + sql_statement += f"'{line_data[13]}' )\n" if line_data[13] != '' else 'NULL) \n' + + # Write statement to file + with open(os.path.join(config_reader['PATHS']['output_dir'], 'manual_sql_queries', + 'update_VRE_SCREENING_DATA.sql'), 'w') as writequery: + writequery.write(sql_statement) + +if __name__ == '__main__': # Necessary to avoid code parsing by Sphinx + ################################################################################################################ + # Extract correct filepath + this_filepath = os.path.dirname(os.path.realpath(__file__)) + # contains the directory in which this script is located, irrespective of the current working directory + + # Extract path to config file: + path_to_config_file = os.path.join(this_filepath, '../vre/src/main/python/vre/BasicConfig.ini') + + # #>> Preprocess data: + print('Pre-processing data:') + # 1) Ward Overview Data + print('--> Ward Overview Data... ', end='') + RecreateWardOverviewData(path_to_config_file=path_to_config_file) + print('Done !\n') + + # 2) Hospital Map + print('--> Hospital Map... ', end='') + RecreateHospitalMap(path_to_config_file=path_to_config_file) + print('Done !\n') + + # 3) Pflegerische OE Map + print('--> Pflegerische OE Map... ', end='') + RecreatePflegerischeOEMap(path_to_config_file=path_to_config_file) + print('Done !\n') + + # 4) Screening Data + print('--> Screening Data... ', end='') + RecreateScreeningData(path_to_config_file=path_to_config_file) + print('Done !\n') + diff --git a/spitalhygiene/sql/quality/LA_ISH_NBEW_DQ.sql b/spitalhygiene/sql/quality/LA_ISH_NBEW_DQ.sql new file mode 100644 index 0000000..545b888 --- /dev/null +++ b/spitalhygiene/sql/quality/LA_ISH_NBEW_DQ.sql @@ -0,0 +1,38 @@ +SELECT + sum (case when [FALNR] is null then 0 else 1 end) as "FALNR_NONNULLCOUNT", + sum (case when [FALNR] is null then 1 else 0 end) as "FALNR_NULLCOUNT", + sum (case when [LFDNR] is null then 0 else 1 end) as "LFDNR_NONNULLCOUNT", + sum (case when [LFDNR] is null then 1 else 0 end) as "LFDNR_NULLCOUNT", + sum (case when [BEWTY] is null then 0 else 1 end) as "BEWTY_NONNULLCOUNT", + sum (case when [BEWTY] is null then 1 else 0 end) as "BEWTY_NULLCOUNT", + sum (case when [BWART] is null then 0 else 1 end) as "BWART_NONNULLCOUNT", + sum (case when [BWART] is null then 1 else 0 end) as "BWART_NULLCOUNT", + sum (case when [BWIDT] is null then 0 else 1 end) as "BWIDT_NONNULLCOUNT", + sum (case when [BWIDT] is null then 1 else 0 end) as "BWIDT_NULLCOUNT", + sum (case when [BWIZT] is null then 0 else 1 end) as "BWIZT_NONNULLCOUNT", + sum (case when [BWIZT] is null then 1 else 0 end) as "BWIZT_NULLCOUNT", + sum (case when [STATU] is null then 0 else 1 end) as "STATU_NONNULLCOUNT", + sum (case when [STATU] is null then 1 else 0 end) as "STATU_NULLCOUNT", + sum (case when [BWEDT] is null then 0 else 1 end) as "BWEDT_NONNULLCOUNT", + sum (case when [BWEDT] is null then 1 else 0 end) as "BWEDT_NULLCOUNT", + sum (case when [BWEZT] is null then 0 else 1 end) as "BWEZT_NONNULLCOUNT", + sum (case when [BWEZT] is null then 1 else 0 end) as "BWEZT_NULLCOUNT", + sum (case when [LFDREF] is null then 0 else 1 end) as "LFDREF_NONNULLCOUNT", + sum (case when [LFDREF] is null then 1 else 0 end) as "LFDREF_NULLCOUNT", + sum (case when [KZTXT] is null then 0 else 1 end) as "KZTXT_NONNULLCOUNT", + sum (case when [KZTXT] is null then 1 else 0 end) as "KZTXT_NULLCOUNT", + sum (case when [ORGFA] is null then 0 else 1 end) as "ORGFA_NONNULLCOUNT", + sum (case when [ORGFA] is null then 1 else 0 end) as "ORGFA_NULLCOUNT", + sum (case when [ORGPF] is null then 0 else 1 end) as "ORGPF_NONNULLCOUNT", + sum (case when [ORGPF] is null then 1 else 0 end) as "ORGPF_NULLCOUNT", + sum (case when [ORGAU] is null then 0 else 1 end) as "ORGAU_NONNULLCOUNT", + sum (case when [ORGAU] is null then 1 else 0 end) as "ORGAU_NULLCOUNT", + sum (case when [ZIMMR] is null then 0 else 1 end) as "ZIMMR_NONNULLCOUNT", + sum (case when [ZIMMR] is null then 1 else 0 end) as "ZIMMR_NULLCOUNT", + sum (case when [BETT] is null then 0 else 1 end) as "BETT_NONNULLCOUNT", + sum (case when [BETT] is null then 1 else 0 end) as "BETT_NULLCOUNT", + sum (case when [STORN] is null then 0 else 1 end) as "STORN_NONNULLCOUNT", + sum (case when [STORN] is null then 1 else 0 end) as "STORN_NULLCOUNT", + sum (case when [EXTKH] is null then 0 else 1 end) as "EXTKH_NONNULLCOUNT", + sum (case when [EXTKH] is null then 1 else 0 end) as "EXTKH_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[LA_ISH_NBEW] \ No newline at end of file diff --git a/spitalhygiene/sql/quality/LA_ISH_NDIA_NORM_DQ.sql b/spitalhygiene/sql/quality/LA_ISH_NDIA_NORM_DQ.sql new file mode 100644 index 0000000..10fb22e --- /dev/null +++ b/spitalhygiene/sql/quality/LA_ISH_NDIA_NORM_DQ.sql @@ -0,0 +1,13 @@ +SELECT + sum (case when [FALNR] is null then 0 else 1 end) as "FALNR_NONNULLCOUNT", + sum (case when [FALNR] is null then 1 else 0 end) as "FALNR_NULLCOUNT", + sum (case when [DKEY1] is null then 0 else 1 end) as "DKEY1_NONNULLCOUNT", + sum (case when [DKEY1] is null then 1 else 0 end) as "DKEY1_NULLCOUNT", + sum (case when [DKAT1] is null then 0 else 1 end) as "DKAT1_NONNULLCOUNT", + sum (case when [DKAT1] is null then 1 else 0 end) as "DKAT1_NULLCOUNT", + sum (case when [DIADT] is null then 0 else 1 end) as "DIADT_NONNULLCOUNT", + sum (case when [DIADT] is null then 1 else 0 end) as "DIADT_NULLCOUNT", + sum (case when [DRG_CATEGORY] is null then 0 else 1 end) as "DRG_CATEGORY_NONNULLCOUNT", + sum (case when [DRG_CATEGORY] is null then 1 else 0 end) as "DRG_CATEGORY_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[LA_ISH_NDIA_NORM] +WHERE DKEY1 is not NULL and DRG_CATEGORY is not null diff --git a/spitalhygiene/sql/quality/LA_ISH_NDRG_DQ.sql b/spitalhygiene/sql/quality/LA_ISH_NDRG_DQ.sql new file mode 100644 index 0000000..14d20e0 --- /dev/null +++ b/spitalhygiene/sql/quality/LA_ISH_NDRG_DQ.sql @@ -0,0 +1,6 @@ +SELECT + sum (case when [PATCASEID] is null then 0 else 1 end) as "PATCASEID_NONNULLCOUNT", + sum (case when [PATCASEID] is null then 1 else 0 end) as "PATCASEID_NULLCOUNT", + sum (case when [COST_WEIGHT] is null then 0 else 1 end) as "COST_WEIGHT_NONNULLCOUNT", + sum (case when [COST_WEIGHT] is null then 1 else 0 end) as "COST_WEIGHT_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[LA_ISH_NDRG] diff --git a/spitalhygiene/sql/quality/LA_ISH_NFPZ_DQ.sql b/spitalhygiene/sql/quality/LA_ISH_NFPZ_DQ.sql new file mode 100644 index 0000000..222b996 --- /dev/null +++ b/spitalhygiene/sql/quality/LA_ISH_NFPZ_DQ.sql @@ -0,0 +1,14 @@ +SELECT + sum (case when [EARZT] is null then 0 else 1 end) as "EARZT_NONNULLCOUNT", + sum (case when [EARZT] is null then 1 else 0 end) as "EARZT_NULLCOUNT", + sum (case when [FARZT] is null then 0 else 1 end) as "FARZT_NONNULLCOUNT", + sum (case when [FARZT] is null then 1 else 0 end) as "FARZT_NULLCOUNT", + sum (case when [FALNR] is null then 0 else 1 end) as "FALNR_NONNULLCOUNT", + sum (case when [FALNR] is null then 1 else 0 end) as "FALNR_NULLCOUNT", + sum (case when [LFDNR] is null then 0 else 1 end) as "LFDNR_NONNULLCOUNT", + sum (case when [LFDNR] is null then 1 else 0 end) as "LFDNR_NULLCOUNT", + sum (case when [PERNR] is null then 0 else 1 end) as "PERNR_NONNULLCOUNT", + sum (case when [PERNR] is null then 1 else 0 end) as "PERNR_NULLCOUNT", + sum (case when [STORN] is null then 0 else 1 end) as "STORN_NONNULLCOUNT", + sum (case when [STORN] is null then 1 else 0 end) as "STORN_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[LA_ISH_NFPZ] \ No newline at end of file diff --git a/spitalhygiene/sql/quality/LA_ISH_NGPA_DQ.sql b/spitalhygiene/sql/quality/LA_ISH_NGPA_DQ.sql new file mode 100644 index 0000000..1b59adb --- /dev/null +++ b/spitalhygiene/sql/quality/LA_ISH_NGPA_DQ.sql @@ -0,0 +1,22 @@ +SELECT + sum (case when [GPART] is null then 0 else 1 end) as "GPART_NONNULLCOUNT", + sum (case when [GPART] is null then 1 else 0 end) as "GPART_NULLCOUNT", + sum (case when [NAME1] is null then 0 else 1 end) as "NAME1_NONNULLCOUNT", + sum (case when [NAME1] is null then 1 else 0 end) as "NAME1_NULLCOUNT", + sum (case when [NAME2] is null then 0 else 1 end) as "NAME2_NONNULLCOUNT", + sum (case when [NAME2] is null then 1 else 0 end) as "NAME2_NULLCOUNT", + sum (case when [NAME3] is null then 0 else 1 end) as "NAME3_NONNULLCOUNT", + sum (case when [NAME3] is null then 1 else 0 end) as "NAME3_NULLCOUNT", + sum (case when [LAND] is null then 0 else 1 end) as "LAND_NONNULLCOUNT", + sum (case when [LAND] is null then 1 else 0 end) as "LAND_NULLCOUNT", + sum (case when [PSTLZ] is null then 0 else 1 end) as "PSTLZ_NONNULLCOUNT", + sum (case when [PSTLZ] is null then 1 else 0 end) as "PSTLZ_NULLCOUNT", + sum (case when [ORT] is null then 0 else 1 end) as "ORT_NONNULLCOUNT", + sum (case when [ORT] is null then 1 else 0 end) as "ORT_NULLCOUNT", + sum (case when [ORT2] is null then 0 else 1 end) as "ORT2_NONNULLCOUNT", + sum (case when [ORT2] is null then 1 else 0 end) as "ORT2_NULLCOUNT", + sum (case when [STRAS] is null then 0 else 1 end) as "STRAS_NONNULLCOUNT", + sum (case when [STRAS] is null then 1 else 0 end) as "STRAS_NULLCOUNT", + sum (case when [KRKHS] is null then 0 else 1 end) as "KRKHS_NONNULLCOUNT", + sum (case when [KRKHS] is null then 1 else 0 end) as "KRKHS_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[LA_ISH_NGPA] \ No newline at end of file diff --git a/spitalhygiene/sql/quality/LA_ISH_NICP_DQ.sql b/spitalhygiene/sql/quality/LA_ISH_NICP_DQ.sql new file mode 100644 index 0000000..5c49dd8 --- /dev/null +++ b/spitalhygiene/sql/quality/LA_ISH_NICP_DQ.sql @@ -0,0 +1,20 @@ +SELECT + sum (case when [LFDBEW] is null then 0 else 1 end) as "LFDBEW_NONNULLCOUNT", + sum (case when [LFDBEW] is null then 1 else 0 end) as "LFDBEW_NULLCOUNT", + sum (case when [ICPMK] is null then 0 else 1 end) as "ICPMK_NONNULLCOUNT", + sum (case when [ICPMK] is null then 1 else 0 end) as "ICPMK_NULLCOUNT", + sum (case when [ICPML] is null then 0 else 1 end) as "ICPML_NONNULLCOUNT", + sum (case when [ICPML] is null then 1 else 0 end) as "ICPML_NULLCOUNT", + sum (case when [ANZOP] is null then 0 else 1 end) as "ANZOP_NONNULLCOUNT", + sum (case when [ANZOP] is null then 1 else 0 end) as "ANZOP_NULLCOUNT", + sum (case when [BGDOP] is null then 0 else 1 end) as "BGDOP_NONNULLCOUNT", + sum (case when [BGDOP] is null then 1 else 0 end) as "BGDOP_NULLCOUNT", + sum (case when [LSLOK] is null then 0 else 1 end) as "LSLOK_NONNULLCOUNT", + sum (case when [LSLOK] is null then 1 else 0 end) as "LSLOK_NULLCOUNT", + sum (case when [STORN] is null then 0 else 1 end) as "STORN_NONNULLCOUNT", + sum (case when [STORN] is null then 1 else 0 end) as "STORN_NULLCOUNT", + sum (case when [FALNR] is null then 0 else 1 end) as "FALNR_NONNULLCOUNT", + sum (case when [FALNR] is null then 1 else 0 end) as "FALNR_NULLCOUNT", + sum (case when [ORGPF] is null then 0 else 1 end) as "ORGPF_NONNULLCOUNT", + sum (case when [ORGPF] is null then 1 else 0 end) as "ORGPF_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[LA_ISH_NICP] \ No newline at end of file diff --git a/spitalhygiene/sql/quality/TACS_DATEN_DQ.sql b/spitalhygiene/sql/quality/TACS_DATEN_DQ.sql new file mode 100644 index 0000000..ce74fbf --- /dev/null +++ b/spitalhygiene/sql/quality/TACS_DATEN_DQ.sql @@ -0,0 +1,26 @@ +SELECT + sum (case when [patient_patientid] is null then 0 else 1 end) as "patient_patientid_NONNULLCOUNT", + sum (case when [patient_patientid] is null then 1 else 0 end) as "patient_patientid_NULLCOUNT", + sum (case when [patient_typ] is null then 0 else 1 end) as "patient_typ_NONNULLCOUNT", + sum (case when [patient_typ] is null then 1 else 0 end) as "patient_typ_NULLCOUNT", + sum (case when [patient_status] is null then 0 else 1 end) as "patient_status_NONNULLCOUNT", + sum (case when [patient_status] is null then 1 else 0 end) as "patient_status_NULLCOUNT", + sum (case when [fall_nummer] is null then 0 else 1 end) as "fall_nummer_NONNULLCOUNT", + sum (case when [fall_nummer] is null then 1 else 0 end) as "fall_nummer_NULLCOUNT", + sum (case when [fall_typ] is null then 0 else 1 end) as "fall_typ_NONNULLCOUNT", + sum (case when [fall_typ] is null then 1 else 0 end) as "fall_typ_NULLCOUNT", + sum (case when [fall_status] is null then 0 else 1 end) as "fall_status_NONNULLCOUNT", + sum (case when [fall_status] is null then 1 else 0 end) as "fall_status_NULLCOUNT", + sum (case when [datum_betreuung] is null then 0 else 1 end) as "datum_betreuung_NONNULLCOUNT", + sum (case when [datum_betreuung] is null then 1 else 0 end) as "datum_betreuung_NULLCOUNT", + sum (case when [dauer_betreuung_in_min] is null then 0 else 1 end) as "dauer_betreuung_in_min_NONNULLCOUNT", + sum (case when [dauer_betreuung_in_min] is null then 1 else 0 end) as "dauer_betreuung_in_min_NULLCOUNT", + sum (case when [mitarbeiter_personalnummer] is null then 0 else 1 end) as "mitarbeiter_personalnummer_NONNULLCOUNT", + sum (case when [mitarbeiter_personalnummer] is null then 1 else 0 end) as "mitarbeiter_personalnummer_NULLCOUNT", + sum (case when [mitarbeiter_anstellungsnummer] is null then 0 else 1 end) as "mitarbeiter_anstellungsnummer_NONNULLCOUNT", + sum (case when [mitarbeiter_anstellungsnummer] is null then 1 else 0 end) as "mitarbeiter_anstellungsnummer_NULLCOUNT", + sum (case when [mitarbeiter_login] is null then 0 else 1 end) as "mitarbeiter_login_NONNULLCOUNT", + sum (case when [mitarbeiter_login] is null then 1 else 0 end) as "mitarbeiter_login_NULLCOUNT", + sum (case when [BATCH_RUN_ID] is null then 0 else 1 end) as "BATCH_RUN_ID_NONNULLCOUNT", + sum (case when [BATCH_RUN_ID] is null then 1 else 0 end) as "BATCH_RUN_ID_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[TACS_DATEN] \ No newline at end of file diff --git a/spitalhygiene/sql/quality/V_LA_IPD_DRUG_NORM_DQ.sql b/spitalhygiene/sql/quality/V_LA_IPD_DRUG_NORM_DQ.sql new file mode 100644 index 0000000..06ba5b6 --- /dev/null +++ b/spitalhygiene/sql/quality/V_LA_IPD_DRUG_NORM_DQ.sql @@ -0,0 +1,18 @@ +SELECT + sum (case when [PATIENTID] is null then 0 else 1 end) as "PATIENTID_NONNULLCOUNT", + sum (case when [PATIENTID] is null then 1 else 0 end) as "PATIENTID_NULLCOUNT", + sum (case when [CASEID] is null then 0 else 1 end) as "CASEID_NONNULLCOUNT", + sum (case when [CASEID] is null then 1 else 0 end) as "CASEID_NULLCOUNT", + sum (case when [DRUG_TEXT] is null then 0 else 1 end) as "DRUG_TEXT_NONNULLCOUNT", + sum (case when [DRUG_TEXT] is null then 1 else 0 end) as "DRUG_TEXT_NULLCOUNT", + sum (case when [DRUG_ATC] is null then 0 else 1 end) as "DRUG_ATC_NONNULLCOUNT", + sum (case when [DRUG_ATC] is null then 1 else 0 end) as "DRUG_ATC_NULLCOUNT", + sum (case when [DRUG_QUANTITY] is null then 0 else 1 end) as "DRUG_QUANTITY_NONNULLCOUNT", + sum (case when [DRUG_QUANTITY] is null then 1 else 0 end) as "DRUG_QUANTITY_NULLCOUNT", + sum (case when [DRUG_UNIT] is null then 0 else 1 end) as "DRUG_UNIT_NONNULLCOUNT", + sum (case when [DRUG_UNIT] is null then 1 else 0 end) as "DRUG_UNIT_NULLCOUNT", + sum (case when [DRUG_DISPFORM] is null then 0 else 1 end) as "DRUG_DISPFORM_NONNULLCOUNT", + sum (case when [DRUG_DISPFORM] is null then 1 else 0 end) as "DRUG_DISPFORM_NULLCOUNT", + sum (case when [DRUG_SUBMISSION] is null then 0 else 1 end) as "DRUG_SUBMISSION_NONNULLCOUNT", + sum (case when [DRUG_SUBMISSION] is null then 1 else 0 end) as "DRUG_SUBMISSION_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[V_LA_IPD_DRUG_NORM] \ No newline at end of file diff --git a/spitalhygiene/sql/quality/V_LA_ISH_NFAL_NORM_DQ.sql b/spitalhygiene/sql/quality/V_LA_ISH_NFAL_NORM_DQ.sql new file mode 100644 index 0000000..d9c7b7c --- /dev/null +++ b/spitalhygiene/sql/quality/V_LA_ISH_NFAL_NORM_DQ.sql @@ -0,0 +1,20 @@ +SELECT + sum (case when [PATIENTID] is null then 0 else 1 end) as "PATIENTID_NONNULLCOUNT", + sum (case when [PATIENTID] is null then 1 else 0 end) as "PATIENTID_NULLCOUNT", + sum (case when [CASEID] is null then 0 else 1 end) as "CASEID_NONNULLCOUNT", + sum (case when [CASEID] is null then 1 else 0 end) as "CASEID_NULLCOUNT", + sum (case when [CASETYP] is null then 0 else 1 end) as "CASETYP_NONNULLCOUNT", + sum (case when [CASETYP] is null then 1 else 0 end) as "CASETYP_NULLCOUNT", + sum (case when [CASESTATUS] is null then 0 else 1 end) as "CASESTATUS_NONNULLCOUNT", + sum (case when [CASESTATUS] is null then 1 else 0 end) as "CASESTATUS_NULLCOUNT", + sum (case when [FALAR] is null then 0 else 1 end) as "FALAR_NONNULLCOUNT", + sum (case when [FALAR] is null then 1 else 0 end) as "FALAR_NULLCOUNT", + sum (case when [BEGDT] is null then 0 else 1 end) as "BEGDT_NONNULLCOUNT", + sum (case when [BEGDT] is null then 1 else 0 end) as "BEGDT_NULLCOUNT", + sum (case when [ENDDT] is null then 0 else 1 end) as "ENDDT_NONNULLCOUNT", + sum (case when [ENDDT] is null then 1 else 0 end) as "ENDDT_NULLCOUNT", + sum (case when [PATIENTTYP] is null then 0 else 1 end) as "PATIENTTYP_NONNULLCOUNT", + sum (case when [PATIENTTYP] is null then 1 else 0 end) as "PATIENTTYP_NULLCOUNT", + sum (case when [PATIENTSTATUS] is null then 0 else 1 end) as "PATIENTSTATUS_NONNULLCOUNT", + sum (case when [PATIENTSTATUS] is null then 1 else 0 end) as "PATIENTSTATUS_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[V_LA_ISH_NFAL_NORM] \ No newline at end of file diff --git a/spitalhygiene/sql/quality/V_LA_ISH_NRSF_NORM_DQ.sql b/spitalhygiene/sql/quality/V_LA_ISH_NRSF_NORM_DQ.sql new file mode 100644 index 0000000..3eecc73 --- /dev/null +++ b/spitalhygiene/sql/quality/V_LA_ISH_NRSF_NORM_DQ.sql @@ -0,0 +1,13 @@ +SELECT + sum (case when [PATIENTID] is null then 0 else 1 end) as "PATIENTID_NONNULLCOUNT", + sum (case when [PATIENTID] is null then 1 else 0 end) as "PATIENTID_NULLCOUNT", + sum (case when [RSFNR] is null then 0 else 1 end) as "RSFNR_NONNULLCOUNT", + sum (case when [RSFNR] is null then 1 else 0 end) as "RSFNR_NULLCOUNT", + sum (case when [KZTXT] is null then 0 else 1 end) as "KZTXT_NONNULLCOUNT", + sum (case when [KZTXT] is null then 1 else 0 end) as "KZTXT_NULLCOUNT", + sum (case when [ERDAT] is null then 0 else 1 end) as "ERDAT_NONNULLCOUNT", + sum (case when [ERDAT] is null then 1 else 0 end) as "ERDAT_NULLCOUNT", + sum (case when [ERTIM] is null then 0 else 1 end) as "ERTIM_NONNULLCOUNT", + sum (case when [ERTIM] is null then 1 else 0 end) as "ERTIM_NULLCOUNT" +FROM [Atelier_DataScience].[atl].[V_LA_ISH_NRSF_NORM] +WHERE isnull(LOEKZ,'') <> 'X' \ No newline at end of file diff --git a/spitalhygiene/sql/tests/LA_ISH_NBEW.sql b/spitalhygiene/sql/tests/LA_ISH_NBEW.sql new file mode 100644 index 0000000..93080d8 --- /dev/null +++ b/spitalhygiene/sql/tests/LA_ISH_NBEW.sql @@ -0,0 +1,62 @@ +SELECT + [FALNR] + ,[LFDNR] + ,[BEWTY] + ,[BWART] + ,[BWIDT] + ,[BWIZT] + ,[STATU] + ,[BWEDT] + ,[BWEZT] + ,[LFDREF] + ,[KZTXT] + ,[ORGFA] + ,[ORGPF] + ,[ORGAU] + ,[ZIMMR] + ,[BETT] + ,[STORN] + ,[EXTKH] +FROM [Atelier_DataScience].[atl].[LA_ISH_NBEW] +WHERE FALNR IN ( + '0006280483', + '0006314210', + '0006336438', + '0005889802', + '0005873082', + '0006065973', + '0006091736', + '0006148746', + '0006334066', + '0006059391', + '0005976205', + '0006057834', + '0005983693', + '0006520444', + '0006931777', + '0006812114', + '0005965462', + '0006452545', + '0006433446', + '0006466165', + '0001927876', + '0004555507', + '0004728517', + '0001928507', + '0002802610', + '0004204668', + '0004181978', + '0006951942', + '0005880782', + '0002289902', + '0004411153', + '0004411005', + '0006565152', + '0003962974', + '0006594482', + '0006596375', + '0006551728', + '0005864325', + '0005877026', + '0006069476' + ) diff --git a/spitalhygiene/sql/tests/LA_ISH_NDIA_NORM.sql b/spitalhygiene/sql/tests/LA_ISH_NDIA_NORM.sql new file mode 100644 index 0000000..4f63dec --- /dev/null +++ b/spitalhygiene/sql/tests/LA_ISH_NDIA_NORM.sql @@ -0,0 +1,50 @@ +SELECT FALNR + ,DKEY1 + ,DKAT1 + ,DIADT + ,DRG_CATEGORY +FROM [Atelier_DataScience].[atl].[LA_ISH_NDIA_NORM] +WHERE DKEY1 IS NOT NULL + AND DRG_CATEGORY IS NOT NULL + AND FALNR IN ( + '0006280483' + ,'0006314210' + ,'0006336438' + ,'0005889802' + ,'0005873082' + ,'0006065973' + ,'0006091736' + ,'0006148746' + ,'0006334066' + ,'0006059391' + ,'0005976205' + ,'0006057834' + ,'0005983693' + ,'0006520444' + ,'0006931777' + ,'0006812114' + ,'0005965462' + ,'0006452545' + ,'0006433446' + ,'0006466165' + ,'0001927876' + ,'0004555507' + ,'0004728517' + ,'0001928507' + ,'0002802610' + ,'0004204668' + ,'0004181978' + ,'0006951942' + ,'0005880782' + ,'0002289902' + ,'0004411153' + ,'0004411005' + ,'0006565152' + ,'0003962974' + ,'0006594482' + ,'0006596375' + ,'0006551728' + ,'0005864325' + ,'0005877026' + ,'0006069476' + ) diff --git a/spitalhygiene/sql/tests/LA_ISH_NDRG.sql b/spitalhygiene/sql/tests/LA_ISH_NDRG.sql new file mode 100644 index 0000000..410b275 --- /dev/null +++ b/spitalhygiene/sql/tests/LA_ISH_NDRG.sql @@ -0,0 +1,5 @@ +SELECT + [PATCASEID], + [COST_WEIGHT] +FROM [Atelier_DataScience].[atl].[LA_ISH_NDRG] +where PATCASEID in ('0005976205','0004204668','0005873082','0006594482','0006148746','0005877026','0006565152','0005983693','0006452545','0006057834','0005965462','0005889802','0006551728','0006065973','0004411005','0004411153','0006280483','0006466165','0006091736','0006433446','0005880782','0004555507','0006314210','0004181978','0005864325','0006596375','0002802610','0006334066','0002289902','0006069476','0006336438','0006520444','0006059391','0001927876','0001928507','0003962974','0004728517') diff --git a/spitalhygiene/sql/tests/LA_ISH_NFPZ.sql b/spitalhygiene/sql/tests/LA_ISH_NFPZ.sql new file mode 100644 index 0000000..16a6bc1 --- /dev/null +++ b/spitalhygiene/sql/tests/LA_ISH_NFPZ.sql @@ -0,0 +1,50 @@ +SELECT + [EARZT] + ,[FARZT] + ,[FALNR] + ,[LFDNR] + ,[PERNR] + ,[STORN] +FROM [Atelier_DataScience].[atl].[LA_ISH_NFPZ] +WHERE FALNR IN ( + '0006280483', + '0006314210', + '0006336438', + '0005889802', + '0005873082', + '0006065973', + '0006091736', + '0006148746', + '0006334066', + '0006059391', + '0005976205', + '0006057834', + '0005983693', + '0006520444', + '0006931777', + '0006812114', + '0005965462', + '0006452545', + '0006433446', + '0006466165', + '0001927876', + '0004555507', + '0004728517', + '0001928507', + '0002802610', + '0004204668', + '0004181978', + '0006951942', + '0005880782', + '0002289902', + '0004411153', + '0004411005', + '0006565152', + '0003962974', + '0006594482', + '0006596375', + '0006551728', + '0005864325', + '0005877026', + '0006069476' + ) diff --git a/spitalhygiene/sql/tests/LA_ISH_NGPA.sql b/spitalhygiene/sql/tests/LA_ISH_NGPA.sql new file mode 100644 index 0000000..f038cc2 --- /dev/null +++ b/spitalhygiene/sql/tests/LA_ISH_NGPA.sql @@ -0,0 +1,12 @@ +SELECT [GPART] + ,[NAME1] + ,[NAME2] + ,[NAME3] + ,[LAND] + ,[PSTLZ] + ,[ORT] + ,[ORT2] + ,[STRAS] + ,[KRKHS] +FROM [Atelier_DataScience].[atl].[LA_ISH_NGPA] +WHERE GPART IN ('0010000990') diff --git a/spitalhygiene/sql/tests/LA_ISH_NICP.sql b/spitalhygiene/sql/tests/LA_ISH_NICP.sql new file mode 100644 index 0000000..78c4966 --- /dev/null +++ b/spitalhygiene/sql/tests/LA_ISH_NICP.sql @@ -0,0 +1,51 @@ +SELECT [LFDBEW] + ,[ICPMK] + ,[ICPML] + ,[ANZOP] + ,[BGDOP] + ,[LSLOK] + ,[STORN] + ,[FALNR] + ,[ORGPF] +FROM [Atelier_DataScience].[atl].[LA_ISH_NICP] +WHERE FALNR IN ( + '0006314210' + ,'0006336438' + ,'0005889802' + ,'0005873082' + ,'0006065973' + ,'0006091736' + ,'0006148746' + ,'0006334066' + ,'0006059391' + ,'0005976205' + ,'0006057834' + ,'0005983693' + ,'0006520444' + ,'0006931777' + ,'0006812114' + ,'0005965462' + ,'0006452545' + ,'0006433446' + ,'0006466165' + ,'0001927876' + ,'0004555507' + ,'0004728517' + ,'0001928507' + ,'0002802610' + ,'0004204668' + ,'0004181978' + ,'0006951942' + ,'0005880782' + ,'0002289902' + ,'0004411153' + ,'0004411005' + ,'0006565152' + ,'0003962974' + ,'0006594482' + ,'0006596375' + ,'0006551728' + ,'0005864325' + ,'0005877026' + ,'0006069476' + ) diff --git a/spitalhygiene/sql/tests/OBSOLETE_deleted_screenings.sql b/spitalhygiene/sql/tests/OBSOLETE_deleted_screenings.sql new file mode 100644 index 0000000..0fe4c5c --- /dev/null +++ b/spitalhygiene/sql/tests/OBSOLETE_deleted_screenings.sql @@ -0,0 +1,10 @@ +SELECT + NPAT.VNAME + , NPAT.NNAME + , NPAT.PATIENTID + , NPAT.GBDAT + , CASE WHEN len(Screening1Datum)>1 THEN CONVERT(date, Screening1Datum, 104) END AS ScreeningDate + FROM [Atelier_DataScience].[dbo].[deleted_screenings] as D + LEFT OUTER JOIN [Atelier_DataScience].[atl].[V_LA_ISH_NPAT_NORM] as NPAT ON + D.Name = NPAT.NNAME AND D.Vorname = NPAT.VNAME AND CONVERT(date, DatumGeboren, 104) = NPAT.GBDAT + WHERE NPAT.PATIENTID = '00003067149' \ No newline at end of file diff --git a/spitalhygiene/sql/tests/TACS_DATEN.sql b/spitalhygiene/sql/tests/TACS_DATEN.sql new file mode 100644 index 0000000..f82639a --- /dev/null +++ b/spitalhygiene/sql/tests/TACS_DATEN.sql @@ -0,0 +1,14 @@ +SELECT [patient_patientid] + ,[patient_typ] + ,[patient_status] + ,[fall_nummer] + ,[fall_typ] + ,[fall_status] + ,[datum_betreuung] + ,[dauer_betreuung_in_min] + ,[mitarbeiter_personalnummer] + ,[mitarbeiter_anstellungsnummer] + ,[mitarbeiter_login] + ,[BATCH_RUN_ID] +FROM [Atelier_DataScience].[atl].[TACS_DATEN] +WHERE patient_patientid IN ('00003067149', '00008301433', '00004348346') diff --git a/spitalhygiene/sql/tests/V_DH_DIM_GERAET_CUR.sql b/spitalhygiene/sql/tests/V_DH_DIM_GERAET_CUR.sql new file mode 100644 index 0000000..e640db3 --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_DIM_GERAET_CUR.sql @@ -0,0 +1,35 @@ +SELECT [GERAETID] + ,[GERAETNAME] +FROM [Atelier_DataScience].[atl].[V_DH_DIM_GERAET_CUR] +WHERE GERAETID IN ( + '134074' + ,'125922' + ,'137160' + ,'125916' + ,'125981' + ,'125981' + ,'64174' + ,'125921' + ,'125981' + ,'125981' + ,'125981' + ,'125981' + ,'125981' + ,'28609' + ,'86293' + ,'125981' + ,'125981' + ,'125981' + ,'86293' + ,'125981' + ,'125981' + ,'64174' + ,'125981' + ,'125981' + ,'125981' + ,'125981' + ,'125981' + ,'125974' + ,'28609' + ,'125981' + ) diff --git a/spitalhygiene/sql/tests/V_DH_DIM_PATIENT_CUR.sql b/spitalhygiene/sql/tests/V_DH_DIM_PATIENT_CUR.sql new file mode 100644 index 0000000..866d525 --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_DIM_PATIENT_CUR.sql @@ -0,0 +1,15 @@ +SELECT + [PATIENTID] + ,[GESCHLECHT] = CASE GSCHL + WHEN 1 THEN 'männlich' + WHEN 2 THEN 'weiblich' + ELSE 'Unbekannt' + END + ,[GEBURTSDATUM] = GBDAT + ,[PLZ] = PSTLZ + ,[WOHNORT] = ORT + ,[KANTON] = BLAND + ,[SPRACHE] = SPRAS_TEXT +FROM [Atelier_DataScience].[atl].[V_LA_ISH_NPAT_NORM] +WHERE PATIENTID IN ('00003067149', '00008301433', '00004348346') + diff --git a/spitalhygiene/sql/tests/V_DH_DIM_RAUM_CUR.sql b/spitalhygiene/sql/tests/V_DH_DIM_RAUM_CUR.sql new file mode 100644 index 0000000..19ada76 --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_DIM_RAUM_CUR.sql @@ -0,0 +1,38 @@ +SELECT + [RAUMID], + [RAUMNAME] +FROM [Atelier_DataScience].[atl].[V_DH_DIM_RAUM_CUR] +WHERE RAUMID in ( +'140363', +'142476', +'61994', +'134701', +'128842', +'140371', +'127804', +'133962', +'90408', +'82813', +'123978', +'140417', +'127803', +'140370', +'128840', +'123499', +'124367', +'64855', +'140366', +'140360', +'140362', +'140369', +'140367', +'124368', +'90409', +'123981', +'125514', +'90411', +'125417', +'140372', +'140361', +'140368' +) \ No newline at end of file diff --git a/spitalhygiene/sql/tests/V_DH_DIM_TERMIN_CUR.sql b/spitalhygiene/sql/tests/V_DH_DIM_TERMIN_CUR.sql new file mode 100644 index 0000000..a0c52f5 --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_DIM_TERMIN_CUR.sql @@ -0,0 +1,108 @@ +SELECT [TERMINID] + ,[IS_DELETED] + ,[TERMINBEZEICHNUNG] + ,[TERMINART] + ,[TERMINTYP] + ,[TERMINDATUM] + ,[DAUERINMIN] +FROM [Atelier_DataScience].[atl].[V_DH_DIM_TERMIN_CUR] +WHERE TERMINID IN ( + '38515699' + ,'38321122' + ,'35416924' + ,'1164130 ' + ,'38470639' + ,'41827160' + ,'39893063' + ,'38411180' + ,'35571391' + ,'35130813' + ,'36160483' + ,'40766840' + ,'42155710' + ,'39491988' + ,'36067632' + ,'37374631' + ,'36129549' + ,'39001478' + ,'39425469' + ,'34338471' + ,'35630084' + ,'35139096' + ,'38431954' + ,'38452040' + ,'40344805' + ,'13831398' + ,'38063644' + ,'38539785' + ,'34220024' + ,'39819467' + ,'39423020' + ,'38386995' + ,'42394432' + ,'38446243' + ,'42213628' + ,'38565198' + ,'39893320' + ,'37244357' + ,'37554138' + ,'41124954' + ,'39051017' + ,'36129560' + ,'35621237' + ,'38772701' + ,'21130116' + ,'38063650' + ,'39608858' + ,'39427731' + ,'21131159' + ,'38331618' + ,'38062724' + ,'24171386' + ,'14908956' + ,'41909560' + ,'39114133' + ,'14091256' + ,'38939623' + ,'35626775' + ,'35139491' + ,'36006751' + ,'38329080' + ,'41909690' + ,'35130747' + ,'36129541' + ,'1278803 ' + ,'38507433' + ,'1192059 ' + ,'39456191' + ,'14091249' + ,'39933520' + ,'24291359' + ,'36071093' + ,'36160474' + ,'19096210' + ,'40218521' + ,'1162144 ' + ,'38660148' + ,'42211133' + ,'39613790' + ,'24230235' + ,'38262758' + ,'35417252' + ,'19252406' + ,'39215737' + ,'38446041' + ,'36830543' + ,'35200182' + ,'40766156' + ,'36070942' + ,'34310589' + ,'37232112' + ,'34337667' + ,'38446523' + ,'34482529' + ,'17297480' + ,'39298995' + ,'36830574' + ,'1405150' + ) diff --git a/spitalhygiene/sql/tests/V_DH_FACT_TERMINGERAET.sql b/spitalhygiene/sql/tests/V_DH_FACT_TERMINGERAET.sql new file mode 100644 index 0000000..8b8c0e6 --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_FACT_TERMINGERAET.sql @@ -0,0 +1,106 @@ +SELECT [TERMINID] + ,[GERAETID] + ,[TERMINSTART_TS] + ,[TERMINENDE_TS] + ,[DAUERINMIN] +FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINGERAET] +WHERE TERMINID IN ( + '38515699' + ,'38321122' + ,'35416924' + ,'1164130 ' + ,'38470639' + ,'41827160' + ,'39893063' + ,'38411180' + ,'35571391' + ,'35130813' + ,'36160483' + ,'40766840' + ,'42155710' + ,'39491988' + ,'36067632' + ,'37374631' + ,'36129549' + ,'39001478' + ,'39425469' + ,'34338471' + ,'35630084' + ,'35139096' + ,'38431954' + ,'38452040' + ,'40344805' + ,'13831398' + ,'38063644' + ,'38539785' + ,'34220024' + ,'39819467' + ,'39423020' + ,'38386995' + ,'42394432' + ,'38446243' + ,'42213628' + ,'38565198' + ,'39893320' + ,'37244357' + ,'37554138' + ,'41124954' + ,'39051017' + ,'36129560' + ,'35621237' + ,'38772701' + ,'21130116' + ,'38063650' + ,'39608858' + ,'39427731' + ,'21131159' + ,'38331618' + ,'38062724' + ,'24171386' + ,'14908956' + ,'41909560' + ,'39114133' + ,'14091256' + ,'38939623' + ,'35626775' + ,'35139491' + ,'36006751' + ,'38329080' + ,'41909690' + ,'35130747' + ,'36129541' + ,'1278803 ' + ,'38507433' + ,'1192059 ' + ,'39456191' + ,'14091249' + ,'39933520' + ,'24291359' + ,'36071093' + ,'36160474' + ,'19096210' + ,'40218521' + ,'1162144 ' + ,'38660148' + ,'42211133' + ,'39613790' + ,'24230235' + ,'38262758' + ,'35417252' + ,'19252406' + ,'39215737' + ,'38446041' + ,'36830543' + ,'35200182' + ,'40766156' + ,'36070942' + ,'34310589' + ,'37232112' + ,'34337667' + ,'38446523' + ,'34482529' + ,'17297480' + ,'39298995' + ,'36830574' + ,'1405150' + ) diff --git a/spitalhygiene/sql/tests/V_DH_FACT_TERMINMITARBEITER.sql b/spitalhygiene/sql/tests/V_DH_FACT_TERMINMITARBEITER.sql new file mode 100644 index 0000000..7a00b5d --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_FACT_TERMINMITARBEITER.sql @@ -0,0 +1,106 @@ +SELECT [TERMINID] + ,[MITARBEITERID] + ,[TERMINSTART_TS] + ,[TERMINENDE_TS] + ,[DAUERINMIN] +FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINMITARBEITER] +WHERE TERMINID IN ( + '38515699' + ,'38321122' + ,'35416924' + ,'1164130 ' + ,'38470639' + ,'41827160' + ,'39893063' + ,'38411180' + ,'35571391' + ,'35130813' + ,'36160483' + ,'40766840' + ,'42155710' + ,'39491988' + ,'36067632' + ,'37374631' + ,'36129549' + ,'39001478' + ,'39425469' + ,'34338471' + ,'35630084' + ,'35139096' + ,'38431954' + ,'38452040' + ,'40344805' + ,'13831398' + ,'38063644' + ,'38539785' + ,'34220024' + ,'39819467' + ,'39423020' + ,'38386995' + ,'42394432' + ,'38446243' + ,'42213628' + ,'38565198' + ,'39893320' + ,'37244357' + ,'37554138' + ,'41124954' + ,'39051017' + ,'36129560' + ,'35621237' + ,'38772701' + ,'21130116' + ,'38063650' + ,'39608858' + ,'39427731' + ,'21131159' + ,'38331618' + ,'38062724' + ,'24171386' + ,'14908956' + ,'41909560' + ,'39114133' + ,'14091256' + ,'38939623' + ,'35626775' + ,'35139491' + ,'36006751' + ,'38329080' + ,'41909690' + ,'35130747' + ,'36129541' + ,'1278803 ' + ,'38507433' + ,'1192059 ' + ,'39456191' + ,'14091249' + ,'39933520' + ,'24291359' + ,'36071093' + ,'36160474' + ,'19096210' + ,'40218521' + ,'1162144 ' + ,'38660148' + ,'42211133' + ,'39613790' + ,'24230235' + ,'38262758' + ,'35417252' + ,'19252406' + ,'39215737' + ,'38446041' + ,'36830543' + ,'35200182' + ,'40766156' + ,'36070942' + ,'34310589' + ,'37232112' + ,'34337667' + ,'38446523' + ,'34482529' + ,'17297480' + ,'39298995' + ,'36830574' + ,'1405150' + ) diff --git a/spitalhygiene/sql/tests/V_DH_FACT_TERMINPATIENT.sql b/spitalhygiene/sql/tests/V_DH_FACT_TERMINPATIENT.sql new file mode 100644 index 0000000..f9bd843 --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_FACT_TERMINPATIENT.sql @@ -0,0 +1,5 @@ +SELECT [TERMINID] + ,[PATIENTID] + ,[FALLID] +FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINPATIENT] +WHERE PATIENTID in ('00003067149', '00008301433', '00004348346') diff --git a/spitalhygiene/sql/tests/V_DH_FACT_TERMINRAUM.sql b/spitalhygiene/sql/tests/V_DH_FACT_TERMINRAUM.sql new file mode 100644 index 0000000..d2cd010 --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_FACT_TERMINRAUM.sql @@ -0,0 +1,110 @@ +SELECT [TERMINID] + ,FACT_TERMINRAUM.[RAUMID] + ,DIM_RAUM.RAUMNAME + ,[TERMINSTART_TS] + ,[TERMINENDE_TS] + ,[DAUERINMIN] +FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINRAUM] as FACT_TERMINRAUM + LEFT JOIN [Atelier_DataScience].[atl].V_DH_DIM_RAUM_CUR as DIM_RAUM + ON FACT_TERMINRAUM.RAUMID = DIM_RAUM.RAUMID +WHERE DIM_RAUM.RAUMNAME IS NOT NULL + AND TERMINID IN ( + '38515699' + ,'38321122' + ,'35416924' + ,'1164130 ' + ,'38470639' + ,'41827160' + ,'39893063' + ,'38411180' + ,'35571391' + ,'35130813' + ,'36160483' + ,'40766840' + ,'42155710' + ,'39491988' + ,'36067632' + ,'37374631' + ,'36129549' + ,'39001478' + ,'39425469' + ,'34338471' + ,'35630084' + ,'35139096' + ,'38431954' + ,'38452040' + ,'40344805' + ,'13831398' + ,'38063644' + ,'38539785' + ,'34220024' + ,'39819467' + ,'39423020' + ,'38386995' + ,'42394432' + ,'38446243' + ,'42213628' + ,'38565198' + ,'39893320' + ,'37244357' + ,'37554138' + ,'41124954' + ,'39051017' + ,'36129560' + ,'35621237' + ,'38772701' + ,'21130116' + ,'38063650' + ,'39608858' + ,'39427731' + ,'21131159' + ,'38331618' + ,'38062724' + ,'24171386' + ,'14908956' + ,'41909560' + ,'39114133' + ,'14091256' + ,'38939623' + ,'35626775' + ,'35139491' + ,'36006751' + ,'38329080' + ,'41909690' + ,'35130747' + ,'36129541' + ,'1278803 ' + ,'38507433' + ,'1192059 ' + ,'39456191' + ,'14091249' + ,'39933520' + ,'24291359' + ,'36071093' + ,'36160474' + ,'19096210' + ,'40218521' + ,'1162144 ' + ,'38660148' + ,'42211133' + ,'39613790' + ,'24230235' + ,'38262758' + ,'35417252' + ,'19252406' + ,'39215737' + ,'38446041' + ,'36830543' + ,'35200182' + ,'40766156' + ,'36070942' + ,'34310589' + ,'37232112' + ,'34337667' + ,'38446523' + ,'34482529' + ,'17297480' + ,'39298995' + ,'36830574' + ,'1405150' + ) diff --git a/spitalhygiene/sql/tests/V_DH_REF_CHOP.sql b/spitalhygiene/sql/tests/V_DH_REF_CHOP.sql new file mode 100644 index 0000000..95efbc1 --- /dev/null +++ b/spitalhygiene/sql/tests/V_DH_REF_CHOP.sql @@ -0,0 +1,76 @@ +SELECT CHOPCODE + ,CHOPVERWENDUNGSJAHR + ,CHOP + ,CHOPCODELEVEL1 + ,CHOPLEVEL1 + ,CHOPCODELEVEL2 + ,CHOPLEVEL2 + ,CHOPCODELEVEL3 + ,CHOPLEVEL3 + ,CHOPCODELEVEL4 + ,CHOPLEVEL4 + ,CHOPCODELEVEL5 + ,CHOPLEVEL5 + ,CHOPCODELEVEL6 + ,CHOPLEVEL6 + ,CHOPSTATUS + ,CHOPSAPKATALOGID +FROM [Atelier_DataScience].[atl].[V_DH_REF_CHOP] +WHERE CHOPCODE in ( + 'Z99.B8.11' + ,'Z50.27.32' + ,'Z00.99.60' + ,'Z88.38.60' + ,'Z99.85' + ,'Z89.07.24' + ,'Z50.27.32' + ,'Z00.99.60' + ,'Z88.38.60' + ,'Z99.85' + ,'Z50.23.13' + ,'Z50.12.09' + ,'Z88.79.50' + ,'Z00.9A.13' + ,'Z39.32.41' + ,'Z50.93' + ,'Z34.84' + ,'Z34.89.99' + ,'Z39.29.89' + ,'Z50.52' + ,'Z00.93.99' + ,'Z00.90.99' + ,'Z00.99.10' + ,'Z50.27.32' + ,'Z00.99.60' + ,'Z88.38.60' + ,'Z99.85' + ,'Z99.04.10' + ,'Z94.8X.40' + ,'Z99.B7.12' + ,'Z99.07.3C' + ,'Z99.04.15' + ,'Z99.05.47' + ,'Z99.B7.13' + ,'Z99.0A' + ,'Z99.28.11' + ,'Z50.52' + ,'Z54.52' + ,'Z00.93.99' + ,'Z00.90.99' + ,'Z51.22.11' + ,'Z39.29.89' + ,'Z99.00' + ,'Z54.12.11' + ,'Z50.12.12' + ,'Z88.79.50' + ,'Z54.25' + ,'Z36.11.22' + ,'Z36.11.26' + ,'Z36.1C.12' + ,'Z39.61.10' + ,'Z39.63' + ,'Z39.64' + ,'Z88.79.50' + ,'Z01.16.12' + ,'Z99.00' + ) diff --git a/spitalhygiene/sql/tests/V_LA_IPD_DRUG_NORM.sql b/spitalhygiene/sql/tests/V_LA_IPD_DRUG_NORM.sql new file mode 100644 index 0000000..a31cd16 --- /dev/null +++ b/spitalhygiene/sql/tests/V_LA_IPD_DRUG_NORM.sql @@ -0,0 +1,11 @@ +SELECT + [PATIENTID] + ,[CASEID] + ,[DRUG_TEXT] + ,[DRUG_ATC] + ,[DRUG_QUANTITY] + ,[DRUG_UNIT] + ,[DRUG_DISPFORM] + ,[DRUG_SUBMISSION] +FROM [Atelier_DataScience].[atl].[V_LA_IPD_DRUG_NORM] +WHERE PATIENTID IN ('00003067149', '00008301433', '00004348346') diff --git a/spitalhygiene/sql/tests/V_LA_ISH_NFAL_NORM.sql b/spitalhygiene/sql/tests/V_LA_ISH_NFAL_NORM.sql new file mode 100644 index 0000000..78049bf --- /dev/null +++ b/spitalhygiene/sql/tests/V_LA_ISH_NFAL_NORM.sql @@ -0,0 +1,12 @@ +SELECT + [PATIENTID] + ,[CASEID] + ,[CASETYP] + ,[CASESTATUS] + ,[FALAR] + ,[BEGDT] + ,[ENDDT] + ,[PATIENTTYP] + ,[PATIENTSTATUS] +FROM [Atelier_DataScience].[atl].[V_LA_ISH_NFAL_NORM] +WHERE PATIENTID IN ('00003067149', '00008301433', '00004348346') diff --git a/spitalhygiene/sql/tests/V_LA_ISH_NRSF_NORM.sql b/spitalhygiene/sql/tests/V_LA_ISH_NRSF_NORM.sql new file mode 100644 index 0000000..8485b6c --- /dev/null +++ b/spitalhygiene/sql/tests/V_LA_ISH_NRSF_NORM.sql @@ -0,0 +1,8 @@ +SELECT + [PATIENTID], + [RSFNR], + [KZTXT], + [ERDAT], + [ERTIM] +FROM [Atelier_DataScience].[atl].[V_LA_ISH_NRSF_NORM] +WHERE PATIENTID in ('00003067149', '00008301433', '00004348346') \ No newline at end of file diff --git a/spitalhygiene/sql/tests/V_VRE_SCREENING_DATA.sql b/spitalhygiene/sql/tests/V_VRE_SCREENING_DATA.sql new file mode 100644 index 0000000..00395f9 --- /dev/null +++ b/spitalhygiene/sql/tests/V_VRE_SCREENING_DATA.sql @@ -0,0 +1,17 @@ +SELECT [auftrag_nr] + ,[erfassung] + ,[entnahme] + ,[vorname] + ,[nachname] + ,[geburtsdatum] + ,[patient_nr] + ,[pruefziffer] + ,[patient_id] + ,[auftraggeber] + ,[kostenstelle] + ,[material_typ] + ,[transport] + ,[resultat] + ,[analyse_methode] + ,[screening_context] +FROM [Atelier_DataScience].[dbo].[V_VRE_SCREENING_DATA] \ No newline at end of file diff --git a/spitalhygiene/sql/vre/LA_ISH_NBEW.sql b/spitalhygiene/sql/vre/LA_ISH_NBEW.sql new file mode 100644 index 0000000..405acc0 --- /dev/null +++ b/spitalhygiene/sql/vre/LA_ISH_NBEW.sql @@ -0,0 +1,21 @@ +SELECT + [FALNR] + ,[LFDNR] + ,[BEWTY] + ,[BWART] + ,[BWIDT] + ,[BWIZT] + ,[STATU] + ,[BWEDT] + ,[BWEZT] + ,[LFDREF] + ,[KZTXT] + ,[ORGFA] + ,[ORGPF] + ,[ORGAU] + ,[ZIMMR] + ,[BETT] + ,[STORN] + ,[EXTKH] +FROM [Atelier_DataScience].[atl].[LA_ISH_NBEW] +WHERE STORN IS NULL -- Exclude stornierte Bewegungen diff --git a/spitalhygiene/sql/vre/LA_ISH_NDIA_NORM.sql b/spitalhygiene/sql/vre/LA_ISH_NDIA_NORM.sql new file mode 100644 index 0000000..0a20946 --- /dev/null +++ b/spitalhygiene/sql/vre/LA_ISH_NDIA_NORM.sql @@ -0,0 +1,8 @@ +SELECT FALNR + ,DKEY1 + ,DKAT1 + ,DIADT + ,DRG_CATEGORY +FROM [Atelier_DataScience].[atl].[LA_ISH_NDIA_NORM] +WHERE DKEY1 IS NOT NULL + AND DRG_CATEGORY IS NOT NULL diff --git a/spitalhygiene/sql/vre/LA_ISH_NDRG.sql b/spitalhygiene/sql/vre/LA_ISH_NDRG.sql new file mode 100644 index 0000000..3af61d4 --- /dev/null +++ b/spitalhygiene/sql/vre/LA_ISH_NDRG.sql @@ -0,0 +1,4 @@ +SELECT + [PATCASEID], + [COST_WEIGHT] +FROM [Atelier_DataScience].[atl].[LA_ISH_NDRG] diff --git a/spitalhygiene/sql/vre/LA_ISH_NFPZ.sql b/spitalhygiene/sql/vre/LA_ISH_NFPZ.sql new file mode 100644 index 0000000..1db4e2e --- /dev/null +++ b/spitalhygiene/sql/vre/LA_ISH_NFPZ.sql @@ -0,0 +1,8 @@ +SELECT + [EARZT] + ,[FARZT] + ,[FALNR] + ,[LFDNR] + ,[PERNR] + ,[STORN] +FROM [Atelier_DataScience].[atl].[LA_ISH_NFPZ] diff --git a/spitalhygiene/sql/vre/LA_ISH_NGPA.sql b/spitalhygiene/sql/vre/LA_ISH_NGPA.sql new file mode 100644 index 0000000..d79878e --- /dev/null +++ b/spitalhygiene/sql/vre/LA_ISH_NGPA.sql @@ -0,0 +1,11 @@ +SELECT [GPART] + ,[NAME1] + ,[NAME2] + ,[NAME3] + ,[LAND] + ,[PSTLZ] + ,[ORT] + ,[ORT2] + ,[STRAS] + ,[KRKHS] +FROM [Atelier_DataScience].[atl].[LA_ISH_NGPA] diff --git a/spitalhygiene/sql/vre/LA_ISH_NICP.sql b/spitalhygiene/sql/vre/LA_ISH_NICP.sql new file mode 100644 index 0000000..c872b86 --- /dev/null +++ b/spitalhygiene/sql/vre/LA_ISH_NICP.sql @@ -0,0 +1,10 @@ +SELECT [LFDBEW] + ,[ICPMK] + ,[ICPML] + ,[ANZOP] + ,[BGDOP] + ,[LSLOK] + ,[STORN] + ,[FALNR] + ,[ORGPF] +FROM [Atelier_DataScience].[atl].[LA_ISH_NICP] diff --git a/spitalhygiene/sql/vre/OBSOLETE_deleted_screenings.sql b/spitalhygiene/sql/vre/OBSOLETE_deleted_screenings.sql new file mode 100644 index 0000000..17fd8b9 --- /dev/null +++ b/spitalhygiene/sql/vre/OBSOLETE_deleted_screenings.sql @@ -0,0 +1,9 @@ +SELECT + NPAT.VNAME + , NPAT.NNAME + , NPAT.PATIENTID + , NPAT.GBDAT + , CASE WHEN len(Screening1Datum)>1 THEN CONVERT(date, Screening1Datum, 104) END AS ScreeningDate + FROM [Atelier_DataScience].[dbo].[deleted_screenings] as D + LEFT OUTER JOIN [Atelier_DataScience].[atl].[V_LA_ISH_NPAT_NORM] as NPAT ON + D.Name = NPAT.NNAME AND D.Vorname = NPAT.VNAME AND CONVERT(date, DatumGeboren, 104) = NPAT.GBDAT \ No newline at end of file diff --git a/spitalhygiene/sql/vre/OE_PFLEGE_MAP.sql b/spitalhygiene/sql/vre/OE_PFLEGE_MAP.sql new file mode 100644 index 0000000..e4d1a55 --- /dev/null +++ b/spitalhygiene/sql/vre/OE_PFLEGE_MAP.sql @@ -0,0 +1,3 @@ +SELECT [oe_pflege] + ,[oe_pflege_map] +FROM [Atelier_DataScience].[dbo].[OE_PFLEGE_MAP] diff --git a/spitalhygiene/sql/vre/TACS_DATEN.sql b/spitalhygiene/sql/vre/TACS_DATEN.sql new file mode 100644 index 0000000..e9a8242 --- /dev/null +++ b/spitalhygiene/sql/vre/TACS_DATEN.sql @@ -0,0 +1,13 @@ +SELECT [patient_patientid] + ,[patient_typ] + ,[patient_status] + ,[fall_nummer] + ,[fall_typ] + ,[fall_status] + ,[datum_betreuung] + ,[dauer_betreuung_in_min] + ,[mitarbeiter_personalnummer] + ,[mitarbeiter_anstellungsnummer] + ,[mitarbeiter_login] + ,[BATCH_RUN_ID] +FROM [Atelier_DataScience].[atl].[TACS_DATEN] diff --git a/spitalhygiene/sql/vre/V_DH_DIM_GERAET_CUR.sql b/spitalhygiene/sql/vre/V_DH_DIM_GERAET_CUR.sql new file mode 100644 index 0000000..70376dc --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_DIM_GERAET_CUR.sql @@ -0,0 +1,3 @@ +SELECT [GERAETID] + ,[GERAETNAME] +FROM [Atelier_DataScience].[atl].[V_DH_DIM_GERAET_CUR] diff --git a/spitalhygiene/sql/vre/V_DH_DIM_PATIENT_CUR.sql b/spitalhygiene/sql/vre/V_DH_DIM_PATIENT_CUR.sql new file mode 100644 index 0000000..c3affec --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_DIM_PATIENT_CUR.sql @@ -0,0 +1,14 @@ +SELECT + [PATIENTID] + ,[GESCHLECHT] = CASE GSCHL + WHEN 1 THEN 'männlich' + WHEN 2 THEN 'weiblich' + ELSE 'Unbekannt' + END + ,[GEBURTSDATUM] = GBDAT + ,[PLZ] = PSTLZ + ,[WOHNORT] = ORT + ,[KANTON] = BLAND + ,[SPRACHE] = SPRAS_TEXT +FROM [Atelier_DataScience].[atl].[V_LA_ISH_NPAT_NORM] + diff --git a/spitalhygiene/sql/vre/V_DH_DIM_RAUM_CUR.sql b/spitalhygiene/sql/vre/V_DH_DIM_RAUM_CUR.sql new file mode 100644 index 0000000..f31d264 --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_DIM_RAUM_CUR.sql @@ -0,0 +1,4 @@ +SELECT + [RAUMID], + [RAUMNAME] +FROM [Atelier_DataScience].[atl].[V_DH_DIM_RAUM_CUR] \ No newline at end of file diff --git a/spitalhygiene/sql/vre/V_DH_DIM_TERMIN_CUR.sql b/spitalhygiene/sql/vre/V_DH_DIM_TERMIN_CUR.sql new file mode 100644 index 0000000..e050ce2 --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_DIM_TERMIN_CUR.sql @@ -0,0 +1,8 @@ +SELECT [TERMINID] + ,[IS_DELETED] + ,[TERMINBEZEICHNUNG] + ,[TERMINART] + ,[TERMINTYP] + ,[TERMINDATUM] + ,[DAUERINMIN] +FROM [Atelier_DataScience].[atl].[V_DH_DIM_TERMIN_CUR] diff --git a/spitalhygiene/sql/vre/V_DH_FACT_TERMINGERAET.sql b/spitalhygiene/sql/vre/V_DH_FACT_TERMINGERAET.sql new file mode 100644 index 0000000..71cb106 --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_FACT_TERMINGERAET.sql @@ -0,0 +1,6 @@ +SELECT [TERMINID] + ,[GERAETID] + ,[TERMINSTART_TS] + ,[TERMINENDE_TS] + ,[DAUERINMIN] +FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINGERAET] diff --git a/spitalhygiene/sql/vre/V_DH_FACT_TERMINMITARBEITER.sql b/spitalhygiene/sql/vre/V_DH_FACT_TERMINMITARBEITER.sql new file mode 100644 index 0000000..a89af1f --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_FACT_TERMINMITARBEITER.sql @@ -0,0 +1,6 @@ +SELECT [TERMINID] + ,[MITARBEITERID] + ,[TERMINSTART_TS] + ,[TERMINENDE_TS] + ,[DAUERINMIN] +FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINMITARBEITER] diff --git a/spitalhygiene/sql/vre/V_DH_FACT_TERMINPATIENT.sql b/spitalhygiene/sql/vre/V_DH_FACT_TERMINPATIENT.sql new file mode 100644 index 0000000..5503e61 --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_FACT_TERMINPATIENT.sql @@ -0,0 +1,4 @@ +SELECT [TERMINID] + ,[PATIENTID] + ,[FALLID] +FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINPATIENT] diff --git a/spitalhygiene/sql/vre/V_DH_FACT_TERMINRAUM.sql b/spitalhygiene/sql/vre/V_DH_FACT_TERMINRAUM.sql new file mode 100644 index 0000000..219c0c1 --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_FACT_TERMINRAUM.sql @@ -0,0 +1,10 @@ +SELECT [TERMINID] + ,FACT_TERMINRAUM.[RAUMID] + ,DIM_RAUM.RAUMNAME + ,[TERMINSTART_TS] + ,[TERMINENDE_TS] + ,[DAUERINMIN] +FROM [Atelier_DataScience].[atl].[V_DH_FACT_TERMINRAUM] as FACT_TERMINRAUM + LEFT JOIN [Atelier_DataScience].[atl].V_DH_DIM_RAUM_CUR as DIM_RAUM + ON FACT_TERMINRAUM.RAUMID = DIM_RAUM.RAUMID +WHERE DIM_RAUM.RAUMNAME IS NOT NULL diff --git a/spitalhygiene/sql/vre/V_DH_REF_CHOP.sql b/spitalhygiene/sql/vre/V_DH_REF_CHOP.sql new file mode 100644 index 0000000..bccd388 --- /dev/null +++ b/spitalhygiene/sql/vre/V_DH_REF_CHOP.sql @@ -0,0 +1,18 @@ +SELECT CHOPCODE + ,CHOPVERWENDUNGSJAHR + ,CHOP + ,CHOPCODELEVEL1 + ,CHOPLEVEL1 + ,CHOPCODELEVEL2 + ,CHOPLEVEL2 + ,CHOPCODELEVEL3 + ,CHOPLEVEL3 + ,CHOPCODELEVEL4 + ,CHOPLEVEL4 + ,CHOPCODELEVEL5 + ,CHOPLEVEL5 + ,CHOPCODELEVEL6 + ,CHOPLEVEL6 + ,CHOPSTATUS + ,CHOPSAPKATALOGID +FROM [Atelier_DataScience].[atl].[V_DH_REF_CHOP] diff --git a/spitalhygiene/sql/vre/V_LA_IPD_DRUG_NORM.sql b/spitalhygiene/sql/vre/V_LA_IPD_DRUG_NORM.sql new file mode 100644 index 0000000..0b0da0a --- /dev/null +++ b/spitalhygiene/sql/vre/V_LA_IPD_DRUG_NORM.sql @@ -0,0 +1,10 @@ +SELECT + [PATIENTID] + ,[CASEID] + ,[DRUG_TEXT] + ,[DRUG_ATC] + ,[DRUG_QUANTITY] + ,[DRUG_UNIT] + ,[DRUG_DISPFORM] + ,[DRUG_SUBMISSION] +FROM [Atelier_DataScience].[atl].[V_LA_IPD_DRUG_NORM] diff --git a/spitalhygiene/sql/vre/V_LA_ISH_NFAL_NORM.sql b/spitalhygiene/sql/vre/V_LA_ISH_NFAL_NORM.sql new file mode 100644 index 0000000..1db0d72 --- /dev/null +++ b/spitalhygiene/sql/vre/V_LA_ISH_NFAL_NORM.sql @@ -0,0 +1,12 @@ +SELECT + [PATIENTID] + ,[CASEID] + ,[CASETYP] + ,[CASESTATUS] + ,[FALAR] + ,[BEGDT] + ,[ENDDT] + ,[PATIENTTYP] + ,[PATIENTSTATUS] +FROM [Atelier_DataScience].[atl].[V_LA_ISH_NFAL_NORM] +WHERE FALAR = 1 diff --git a/spitalhygiene/sql/vre/V_LA_ISH_NRSF_NORM.sql b/spitalhygiene/sql/vre/V_LA_ISH_NRSF_NORM.sql new file mode 100644 index 0000000..6a44d0b --- /dev/null +++ b/spitalhygiene/sql/vre/V_LA_ISH_NRSF_NORM.sql @@ -0,0 +1,8 @@ +SELECT + [PATIENTID], + [RSFNR], + [KZTXT], + [ERDAT], + [ERTIM] +FROM [Atelier_DataScience].[atl].[V_LA_ISH_NRSF_NORM] +WHERE isnull(LOEKZ,'') <> 'X' \ No newline at end of file diff --git a/spitalhygiene/sql/vre/V_VRE_SCREENING_DATA.sql b/spitalhygiene/sql/vre/V_VRE_SCREENING_DATA.sql new file mode 100644 index 0000000..00395f9 --- /dev/null +++ b/spitalhygiene/sql/vre/V_VRE_SCREENING_DATA.sql @@ -0,0 +1,17 @@ +SELECT [auftrag_nr] + ,[erfassung] + ,[entnahme] + ,[vorname] + ,[nachname] + ,[geburtsdatum] + ,[patient_nr] + ,[pruefziffer] + ,[patient_id] + ,[auftraggeber] + ,[kostenstelle] + ,[material_typ] + ,[transport] + ,[resultat] + ,[analyse_methode] + ,[screening_context] +FROM [Atelier_DataScience].[dbo].[V_VRE_SCREENING_DATA] \ No newline at end of file diff --git a/spitalhygiene/sql/vre/WARD_SCREENINGS.sql b/spitalhygiene/sql/vre/WARD_SCREENINGS.sql new file mode 100644 index 0000000..e1ecd3f --- /dev/null +++ b/spitalhygiene/sql/vre/WARD_SCREENINGS.sql @@ -0,0 +1,4 @@ +SELECT [screening_day] + ,[org_pf] + ,[screening_type] +FROM [Atelier_DataScience].[dbo].[WARD_SCREENINGS] diff --git a/spitalhygiene/vre/pom.xml b/spitalhygiene/vre/pom.xml new file mode 100644 index 0000000..766c9ad --- /dev/null +++ b/spitalhygiene/vre/pom.xml @@ -0,0 +1,32 @@ + + + 4.0.0 + io.sqooba.insel + vre + 0.3.0-SNAPSHOT + + + io.sqooba.insel + spitalhygiene + 0.1.0-SNAPSHOT + + + + + + io.sqooba + maven-python-plugin + 1.1.2 + + + + package + + + + + + + + diff --git a/spitalhygiene/vre/src/main/python/README.md b/spitalhygiene/vre/src/main/python/README.md new file mode 100644 index 0000000..6279494 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/README.md @@ -0,0 +1,17 @@ +# Directory Overview + +Folder `vre` + +Contains all functions relevant for data processing, model creation and feature vector export. + +----- + +Folder `tests`: + +Contains test code for each class in the model. Tests are intended to be run with the `pytest` module as follows: + +- Make sure `test_data_dir` in `BasicConfig.ini` points to a directory containing all test CSV files. This is a small subset of the complete data extract of the Atelier_DataScience +and may **not** be in the repo (i.e. must be made available separately). +- Navigate to the `tests` directory +- Start the tests via `python -m pytest` or `python -m pytest > output.log` + diff --git a/spitalhygiene/vre/src/main/python/setup.py b/spitalhygiene/vre/src/main/python/setup.py new file mode 100644 index 0000000..5899366 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/setup.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os +import codecs +from setuptools import setup, find_packages + + +# Utility function to read the README file. +# Used for the long_description. It's nice, because now 1) we have a top level +# README file and 2) it's easier to type in the README file than to put a raw +# string in below ... +def read(fname): + return open(os.path.join(os.path.dirname(__file__), fname)).read() + + +setup( + name="vre", + description=("Inselspital SpitalHygiene"), + keywords="inselspital vre python", + url="http://bitbucket.org/sqooba/...", # url of source code repository + classifiers=[ + "Topic :: Utilities", + ], + install_requires=[ + "scikit-learn==0.19.1", + "numpy==1.14.5", + "pandas==0.23.0", + "scipy==1.1.0", + "pytest==3.7.1", # for testing + ], + packages=find_packages(), # find all the modules automatically + include_package_data=True, # use MANIFEST.in during install if needed +) \ No newline at end of file diff --git a/spitalhygiene/vre/src/main/python/tests/conftest.py b/spitalhygiene/vre/src/main/python/tests/conftest.py new file mode 100644 index 0000000..0bc13cc --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/conftest.py @@ -0,0 +1,115 @@ +import pytest +import csv +import os +import sys +import configparser +sys.path.append("../vre/model") + +from Patient import Patient +from Case import Case +from Move import Move +from Risk import Risk +from Medication import Medication +from Chop import Chop +from Surgery import Surgery +from Appointment import Appointment +from Device import Device +from Employee import Employee +from Room import Room +from Partner import Partner +from Care import Care +from ICD import ICD + +# Load configuration file +config_reader = configparser.ConfigParser() +config_reader.read('../vre/BasicConfig.ini') + +base_path = config_reader['PATHS']['test_data_dir'] # Must point to the directory containing all CSV testfiles - these are patient data and may NOT be in the repo ! + +patients_path = os.path.join(base_path, "V_DH_DIM_PATIENT_CUR.csv") +cases_path = os.path.join(base_path, "V_LA_ISH_NFAL_NORM.csv") +moves_path = os.path.join(base_path, "LA_ISH_NBEW.csv") +risks_path = os.path.join(base_path, "V_LA_ISH_NRSF_NORM.csv") +deleted_risks_path = os.path.join(base_path, "deleted_screenings.csv") +medication_path = os.path.join(base_path, "V_LA_IPD_DRUG_NORM.csv") +chop_path = os.path.join(base_path, "V_DH_REF_CHOP.csv") +surgery_path = os.path.join(base_path, "LA_ISH_NICP.csv") +appointments_path = os.path.join(base_path, "V_DH_DIM_TERMIN_CUR.csv") +appointment_patient_path = os.path.join(base_path, "V_DH_FACT_TERMINPATIENT.csv") +devices_path = os.path.join(base_path, "V_DH_DIM_GERAET_CUR.csv") +device_appointment_path = os.path.join(base_path, "V_DH_FACT_TERMINGERAET.csv") +appointment_employee_path = os.path.join(base_path, "V_DH_FACT_TERMINMITARBEITER.csv") +rooms_path = os.path.join(base_path, "V_DH_DIM_RAUM_CUR.csv") +room_appointment_path = os.path.join(base_path, "V_DH_FACT_TERMINRAUM.csv") +partner_path = os.path.join(base_path, "LA_ISH_NGPA.csv") +partner_case_path = os.path.join(base_path, "LA_ISH_NFPZ.csv") +tacs_path = os.path.join(base_path, "TACS_DATEN.csv") +icd_path = os.path.join(base_path, "LA_ISH_NDIA_NORM.csv") + +def get_hdfs_pipe(path): + encoding = "iso-8859-1" + lines = csv.reader(open(path, 'r'), delimiter=config_reader['DELIMITERS']['csv_sep']) + next(lines, None) # skip header + return lines + +@pytest.fixture +def patient_data(): + rooms = dict() + wards = dict() + + patients = Patient.create_patient_dict(get_hdfs_pipe(patients_path)) + cases = Case.create_case_map(get_hdfs_pipe(cases_path), patients) + + partners = Partner.create_partner_map(get_hdfs_pipe(partner_path)) + Partner.add_partners_to_cases(get_hdfs_pipe(partner_case_path), cases, partners) + + Move.add_move_to_case( + get_hdfs_pipe(moves_path), + cases, + rooms, + wards, + partners, + ) + Risk.add_risk_to_patient(get_hdfs_pipe(risks_path), patients) + Risk.add_deleted_risk_to_patient(get_hdfs_pipe(deleted_risks_path), patients) + + drugs = Medication.create_drug_map(get_hdfs_pipe(medication_path)) + Medication.add_medication_to_case(get_hdfs_pipe(medication_path), cases) + + chops = Chop.create_chop_dict(get_hdfs_pipe(chop_path)) + Surgery.add_surgery_to_case(get_hdfs_pipe(surgery_path), cases, chops) + + appointments = Appointment.create_termin_map(get_hdfs_pipe(appointments_path)) + Appointment.add_appointment_to_case(get_hdfs_pipe(appointment_patient_path), cases, appointments) + + devices = Device.create_device_map(get_hdfs_pipe(devices_path)) + Device.add_device_to_appointment(get_hdfs_pipe(device_appointment_path), appointments, devices) + + employees = Employee.create_employee_map(get_hdfs_pipe(appointment_employee_path)) + Employee.add_employee_to_appointment( get_hdfs_pipe(appointment_employee_path), appointments, employees ) + + Care.add_care_to_case(get_hdfs_pipe(tacs_path), cases, employees) + + room_id_map = Room.create_room_id_map(get_hdfs_pipe(rooms_path)) + Room.add_room_to_appointment(get_hdfs_pipe(room_appointment_path), appointments, room_id_map, rooms) + + ICD_Codes = ICD.create_icd_dict(get_hdfs_pipe(icd_path)) + ICD.add_icd_to_case(get_hdfs_pipe(icd_path), cases) + + return dict( + { + "rooms": rooms, + "wards": wards, + "partners": partners, + "patients": patients, + "cases": cases, + "drugs": drugs, + "chops": chops, + "appointments": appointments, + "devices": devices, + "employees": employees, + "room_id_map": room_id_map, + "icd_codes": ICD_Codes + } + ) + diff --git a/spitalhygiene/vre/src/main/python/tests/pytest.ini b/spitalhygiene/vre/src/main/python/tests/pytest.ini new file mode 100644 index 0000000..96e7e1a --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/pytest.ini @@ -0,0 +1,7 @@ +[pytest] + +python_files=test_*.py + +log_format = %(asctime)s %(levelname)s %(message)s +log_date_format = %Y-%m-%d %H:%M:%S +log_level = DEBUG diff --git a/spitalhygiene/vre/src/main/python/tests/test_age.py b/spitalhygiene/vre/src/main/python/tests/test_age.py new file mode 100644 index 0000000..bad33da --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_age.py @@ -0,0 +1,3 @@ +def test_age(patient_data): + assert patient_data["patients"].get("00008301433").get_age() == 79 + assert patient_data["patients"].get("00003067149").get_age() == 65 \ No newline at end of file diff --git a/spitalhygiene/vre/src/main/python/tests/test_antibiotic_exposure.py b/spitalhygiene/vre/src/main/python/tests/test_antibiotic_exposure.py new file mode 100644 index 0000000..43e345c --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_antibiotic_exposure.py @@ -0,0 +1,36 @@ +import datetime + + +def test_antibiotic_exposure(patient_data): + assert ( + len(patient_data["patients"].get("00003067149").get_antibiotic_exposure()) == 2 + ) + assert ( + patient_data["patients"] + .get("00008301433") + .get_antibiotic_exposure() + .get("J01DC02") + ) + + +def test_antibiotic_exposure_dates(patient_data): + assert ( + len( + patient_data["patients"] + .get("00008301433") + .get_antibiotic_exposure() + .get("J01DC02") + ) + == 2 + ) + assert datetime.datetime(2018, 7, 7).date() in patient_data["patients"].get( + "00008301433" + ).get_antibiotic_exposure().get("J01DC02") + assert datetime.datetime(2018, 7, 5).date() not in patient_data["patients"].get( + "00008301433" + ).get_antibiotic_exposure().get("J01DC02") + + +def test_dispforms(patient_data): + assert len(patient_data["patients"].get("00008301433").get_dispform()) == 7 + assert "p.o." in patient_data["patients"].get("00008301433").get_dispform() diff --git a/spitalhygiene/vre/src/main/python/tests/test_appointments.py b/spitalhygiene/vre/src/main/python/tests/test_appointments.py new file mode 100644 index 0000000..4367c5f --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_appointments.py @@ -0,0 +1,5 @@ +def test_appointments_load(patient_data): + assert len(patient_data["appointments"]) == 87 + +def test_appointments_to_case(patient_data): + assert len(patient_data["cases"].get("0006452545").appointments) == 7 \ No newline at end of file diff --git a/spitalhygiene/vre/src/main/python/tests/test_care.py b/spitalhygiene/vre/src/main/python/tests/test_care.py new file mode 100644 index 0000000..d36ebf6 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_care.py @@ -0,0 +1,10 @@ +def test_add_care_to_case(patient_data): + sum = 0 + for case in patient_data["cases"].values(): + sum += len(case.cares) + assert sum == 409 + +def test_care_in_employees(patient_data): + for case in patient_data["cases"].values(): + for care in case.cares: + assert care.employee.mitarbeiter_id in list(patient_data["employees"].keys()) diff --git a/spitalhygiene/vre/src/main/python/tests/test_case_timing.py b/spitalhygiene/vre/src/main/python/tests/test_case_timing.py new file mode 100644 index 0000000..deb9b79 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_case_timing.py @@ -0,0 +1,117 @@ +import datetime + + +def test_case_timing(patient_data): + # beg_dt and end_dt are not set in the source data + assert patient_data["cases"].get("0006314210").beg_dt is None + assert patient_data["cases"].get("0006314210").end_dt is None + + # moves_start and moves_end are set to the beginning of the first move and end of the last move respectively + assert patient_data["cases"].get("0006314210").moves_start == datetime.datetime( + 2018, 1, 5, 11, 0 + ) + assert patient_data["cases"].get("0006314210").moves_end == datetime.datetime( + 2018, 1, 16, 13, 0 + ) + + # length of stay is the timedelta between first and last move + assert patient_data["cases"].get( + "0006314210" + ).get_length_of_stay() == datetime.timedelta(11, 7200) + + # length of stay for non-stationary cases must be None + assert patient_data["cases"].get("0006334066").moves_start == datetime.datetime( + 2018, 4, 9, 11, 7 + ) + assert patient_data["cases"].get("0006334066").moves_end == datetime.datetime( + 2018, 8, 17, 10, 56 + ) + assert patient_data["cases"].get("0006334066").get_length_of_stay() is None + + # case 0006594482 is almost 8 days + assert patient_data["cases"].get( + "0006594482" + ).get_length_of_stay() == datetime.timedelta(7, 84600) + + # case 0006594482 is relevant case of patient with risk date during the case. thus, length of relevant stay is shorter + assert patient_data["patients"].get( + "00008301433" + ).get_length_of_relevant_case() == datetime.timedelta(4, 50400) + + +def test_case_stationary(patient_data): + assert patient_data["cases"].get("0005976205").is_stationary() + assert not patient_data["cases"].get("0005965462").is_stationary() + + +def test_case_open_before(patient_data): + # 0005976205 starts at 2017-6-16 and ends at 2017-6-17 + assert ( + patient_data["cases"] + .get("0005976205") + .open_before_or_at_date(datetime.datetime(2017, 6, 16).date()) + ) + assert ( + patient_data["cases"] + .get("0005976205") + .open_before_or_at_date(datetime.datetime(2017, 7, 1).date()) + ) + assert ( + not patient_data["cases"] + .get("0005976205") + .open_before_or_at_date(datetime.datetime(2017, 6, 15).date()) + ) + + +def test_case_closed_after(patient_data): + assert ( + patient_data["cases"] + .get("0005976205") + .closed_after_or_at_date(datetime.datetime(2017, 6, 17).date()) + ) + assert ( + patient_data["cases"] + .get("0005976205") + .closed_after_or_at_date(datetime.datetime(2017, 5, 1).date()) + ) + assert ( + not patient_data["cases"] + .get("0005976205") + .closed_after_or_at_date(datetime.datetime(2017, 7, 1).date()) + ) + + +def test_relevant_date(patient_data): + # for a patient without risk factor, the relevant date is now + assert ( + patient_data["patients"].get("00004348346").get_relevant_date() + == datetime.datetime.now().date() + ) + + # for a patient with risk factor, the relevant date is the date of the risk factor + assert ( + patient_data["patients"].get("00008301433").get_relevant_date() + == datetime.datetime(2018, 7, 9).date() + ) + + +def test_relevant_case(patient_data): + # patient has no stationary case -> no relevant case + assert patient_data["patients"].get("00004348346").get_relevant_case() is None + + # case 0006314210: 5.1.2018 - 16.1.2018 + assert ( + patient_data["patients"].get("00003067149").get_relevant_case().case_id + == "0006314210" + ) + assert ( + patient_data["patients"] + .get("00003067149") + .get_relevant_case(since=datetime.datetime(2018, 2, 1).date()) + is None + ) + + assert ( + patient_data["patients"].get("00008301433").get_relevant_case().case_id + == "0006594482" + ) diff --git a/spitalhygiene/vre/src/main/python/tests/test_chop_codes.py b/spitalhygiene/vre/src/main/python/tests/test_chop_codes.py new file mode 100644 index 0000000..dd0a4a6 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_chop_codes.py @@ -0,0 +1,28 @@ +def test_chop_codes_load(patient_data): + assert len(patient_data["chops"]) == 250 + + +def test_chop_code_description(patient_data): + print(patient_data["chops"].get("Z39.64_18").get_detailed_chop()) + assert ( + patient_data["chops"].get("Z39.64_18").get_detailed_chop() + == "Vorläufiger Schrittmacher eingesetzt während und unmittelbar nach herzchirurgischem Eingriff" # Note weird encoding of german "Umlaut" strings + ) + + +def test_lowest_level_code(patient_data): + assert patient_data["chops"].get("Z39.64_18").get_lowest_level_code() == "Z39" + assert patient_data["chops"].get("Z89.07.24_16").get_lowest_level_code() == "Z89" + + +def test_surgeries_per_chop(patient_data): + assert len(patient_data["chops"].get("Z39.61.10_18").cases) == 1 + + +def test_relevant_chops(patient_data): + # 99.04.10 + assert len(patient_data["patients"].get("00008301433").get_chop_codes()) == 12 + +def test_has_surgery(patient_data): + assert patient_data["patients"].get("00008301433").has_surgery() + assert not patient_data["patients"].get("00004348346").has_surgery() \ No newline at end of file diff --git a/spitalhygiene/vre/src/main/python/tests/test_devices.py b/spitalhygiene/vre/src/main/python/tests/test_devices.py new file mode 100644 index 0000000..f439c60 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_devices.py @@ -0,0 +1,7 @@ +def test_device_load(patient_data): + assert len(patient_data["devices"]) == 10 + + +def test_relevant_devices(patient_data): + assert len(patient_data["patients"].get("00008301433").get_devices()) == 1 + assert "ECC" in patient_data["patients"].get("00008301433").get_devices() diff --git a/spitalhygiene/vre/src/main/python/tests/test_employees.py b/spitalhygiene/vre/src/main/python/tests/test_employees.py new file mode 100644 index 0000000..4bc9057 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_employees.py @@ -0,0 +1,7 @@ +def test_employee_load(patient_data): + assert len(patient_data["employees"]) == 182 + + +def test_relevant_employees(patient_data): + assert len(patient_data["patients"].get("00008301433").get_employees()) == 41 + assert "0030236" in patient_data["patients"].get("00008301433").get_employees() diff --git a/spitalhygiene/vre/src/main/python/tests/test_features.py b/spitalhygiene/vre/src/main/python/tests/test_features.py new file mode 100644 index 0000000..d94f84f --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_features.py @@ -0,0 +1,64 @@ +from sklearn.feature_extraction import DictVectorizer + +def test_features(patient_data): + assert patient_data["patients"].get("00004348346").get_features() is None + + nr_not_none = 0 + for patient in patient_data["patients"].values(): + if patient.get_features() is not None: + nr_not_none += 1 + + features = patient.get_features() + + assert type(features.get("age", None)).__name__=="int" + assert 0 <= features.get("age") < 150 + + assert type(features.get("length_of_stay")).__name__=="int" + assert 0 <= features.get("length_of_stay") + + assert type(features.get("surgery")).__name__ == "bool" + + assert type(features.get("icu")).__name__ == "bool" + + assert type(features.get("plz")).__name__ == "str" + assert type(features.get("kanton")).__name__ == "str" + + assert type(features.get("language")).__name__ == "str" + + assert nr_not_none == 2 + +def test_features_antibiotic_exposure(patient_data): + features = patient_data["patients"].get("00008301433").get_features() + assert features["antibiotic=J01DC02"] == 2 + +def test_features_chop_codes(patient_data): + features = patient_data["patients"].get("00008301433").get_features() + assert features["chop=Z99"] + +def test_features_rooms(patient_data): + features = patient_data["patients"].get("00008301433").get_features() + assert features["room=BH N 125"] + assert features["room=KARR EKG"] + +def test_features_devices(patient_data): + features = patient_data["patients"].get("00008301433").get_features() + assert features["device=ECC"] + +def test_features_employees(patient_data): + features = patient_data["patients"].get("00008301433").get_features() + assert features["employee=0030236"] == 203 # from RAP + assert features["employee=0324009"] == 43 # from TACS + +def test_dict_vectorizer(patient_data): + risk_factors = [] + for patient in patient_data["patients"].values(): + p_risk_factors = patient.get_features() + if p_risk_factors is not None: + risk_factors.append(p_risk_factors) + v = DictVectorizer(sparse=False) + features = v.fit_transform(risk_factors) + + assert len(features) == 2 + assert len(features[0]) == 203 + + assert "device=ECC" in v.vocabulary_ diff --git a/spitalhygiene/vre/src/main/python/tests/test_icu_stay.py b/spitalhygiene/vre/src/main/python/tests/test_icu_stay.py new file mode 100644 index 0000000..68fb3b5 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_icu_stay.py @@ -0,0 +1,5 @@ + +def test_has_icu_stay(patient_data): + icu_orgs = ["N NORD"] + assert(patient_data["patients"].get("00008301433").has_icu_stay(icu_orgs)) + assert ( not patient_data["patients"].get("00003067149").has_icu_stay(icu_orgs)) \ No newline at end of file diff --git a/spitalhygiene/vre/src/main/python/tests/test_idc_codes.py b/spitalhygiene/vre/src/main/python/tests/test_idc_codes.py new file mode 100644 index 0000000..bb38d49 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_idc_codes.py @@ -0,0 +1,17 @@ + +def test_icd_code(patient_data): + """ + Test ICD codes for case "0006148746" + + :param patient_data: Dictionary containing all VRE-relevant information (see @pytest.fixture patient_data() for details) + """ + icd_list = [each_icd.icd_code for each_icd in patient_data['cases'].get('0006148746').icd_codes] + + # This patient should have 5 ICD codes: C22.0, B16.9, G47.31, J45.9 and Z00.6 + + assert 'C22.0' in icd_list + assert 'B16.9' in icd_list + assert 'G47.31' in icd_list + assert 'J45.9' in icd_list + assert 'Z00.6' in icd_list + diff --git a/spitalhygiene/vre/src/main/python/tests/test_label.py b/spitalhygiene/vre/src/main/python/tests/test_label.py new file mode 100644 index 0000000..6c45d9c --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_label.py @@ -0,0 +1,7 @@ +def test_label_patient(patient_data): + # no relevant case + assert patient_data["patients"].get("00004348346").get_label() == -1 + # screening removed + assert patient_data["patients"].get("00003067149").get_label() == 2 + # screening unconclusive: 00008301433 + assert patient_data["patients"].get("00008301433").get_label() == 1 diff --git a/spitalhygiene/vre/src/main/python/tests/test_load_data.py b/spitalhygiene/vre/src/main/python/tests/test_load_data.py new file mode 100644 index 0000000..34b38b2 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_load_data.py @@ -0,0 +1,56 @@ +import os +import sys +sys.path.append('../vre/model') +import pytest +import configparser + +from Patient import Patient +from Case import Case +from Move import Move +from Risk import Risk + +from conftest import get_hdfs_pipe + +# Load configuration file +config_reader = configparser.ConfigParser() +config_reader.read('../vre/BasicConfig.ini') + +base_path = config_reader['PATHS']['test_data_dir'] + +patients_path = os.path.join(base_path, "V_DH_DIM_PATIENT_CUR.csv") +cases_path = os.path.join(base_path, "V_LA_ISH_NFAL_NORM.csv") +moves_path = os.path.join(base_path, "LA_ISH_NBEW.csv") + + +def test_load_patients(): + pytest.patients = dict() + pytest.cases = dict() + pytest.rooms = dict() + pytest.wards = dict() + pytest.partners = dict() + + pytest.patients = Patient.create_patient_dict(get_hdfs_pipe(patients_path)) + assert len(pytest.patients) == 3 + + +def test_load_cases(): + pytest.cases = Case.create_case_map(get_hdfs_pipe(cases_path), pytest.patients) + assert len(pytest.cases) == 31 + assert pytest.cases.get("0006314210", None) is not None + assert pytest.patients.get("00003067149", None) is not None + + +def test_load_moves(): + Move.add_move_to_case( + get_hdfs_pipe(moves_path), + pytest.cases, + pytest.rooms, + pytest.wards, + pytest.partners, + ) + assert len(pytest.patients.get("00003067149").cases.get("0006314210").moves) == 12 + assert len(pytest.rooms) == 15 + assert len(pytest.wards) == 26 + assert pytest.rooms.get("BH H 116", None) is not None + assert pytest.wards.get("INEGE 2", None) is not None + assert len(pytest.rooms.get("BH H 116").moves) == 3 diff --git a/spitalhygiene/vre/src/main/python/tests/test_model_creator.py b/spitalhygiene/vre/src/main/python/tests/test_model_creator.py new file mode 100644 index 0000000..cc00769 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_model_creator.py @@ -0,0 +1,53 @@ +import sys +sys.path.append('../vre') + +from feature_extractor import feature_extractor + +import pandas as pd + + +def test_prepare_features_and_labels(patient_data): + mc = feature_extractor() + features, labels, dates, v = mc.prepare_features_and_labels( + patient_data["patients"] + ) + + assert len(features) == len(labels) == len(dates) == 2 + assert labels[1] == 2 + + +def test_export_data(patient_data, tmpdir_factory): + mc = feature_extractor() + features, labels, dates, v = mc.prepare_features_and_labels( patient_data["patients"] ) + + fn = tmpdir_factory.mktemp("data").join("risk_factors.csv") + + mc.export_csv(features, labels, dates, v, str(fn)) + + result_dataframe = pd.read_csv(str(fn)) + + assert not result_dataframe.empty + assert result_dataframe["age"].sum() == 144 + assert result_dataframe["device=ECC"].sum() == 1 + assert result_dataframe["label"].sum() == 3 + + +def test_export_gephi(patient_data, tmpdir_factory): + mc = feature_extractor() + features, labels, dates, v = mc.prepare_features_and_labels( patient_data["patients"] ) + + print(features) + print(v.feature_names_) + + file_dir = tmpdir_factory.mktemp("data") # creates a temporary directory only for this test session + + mc.export_gephi(features, labels, dates, v, file_dir) + + result_nodes = pd.read_csv(file_dir.join("node_list.csv")) + result_edges = pd.read_csv(file_dir.join("edge_list.csv")) + + assert result_nodes.shape == (2, 4) + assert result_edges.shape == (2, 5) + + assert result_nodes["Category"][0] == 1 + assert result_nodes["Category"][1] == 2 diff --git a/spitalhygiene/vre/src/main/python/tests/test_nr_cases.py b/spitalhygiene/vre/src/main/python/tests/test_nr_cases.py new file mode 100644 index 0000000..85ffc7c --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_nr_cases.py @@ -0,0 +1,2 @@ +def test_nr_cases(patient_data): + assert(patient_data["patients"].get("00003067149").get_nr_cases() == 13) \ No newline at end of file diff --git a/spitalhygiene/vre/src/main/python/tests/test_partners.py b/spitalhygiene/vre/src/main/python/tests/test_partners.py new file mode 100644 index 0000000..9ed4b7d --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_partners.py @@ -0,0 +1,7 @@ +def test_partner_dict(patient_data): + assert len(patient_data["partners"]) == 1 + +def test_partner_case(patient_data): + assert len(patient_data["patients"].get("00008301433").get_partner()) == 1 + assert len(patient_data["patients"].get("00003067149").get_partner()) == 0 + assert patient_data["patients"].get("00004348346").get_partner() is None \ No newline at end of file diff --git a/spitalhygiene/vre/src/main/python/tests/test_relevant_rooms.py b/spitalhygiene/vre/src/main/python/tests/test_relevant_rooms.py new file mode 100644 index 0000000..c9f7d07 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_relevant_rooms.py @@ -0,0 +1,21 @@ +import datetime + +def test_moves_before_dt(patient_data): + assert ( + len( + patient_data["cases"] + .get("0006594482") + .get_moves_before_dt(datetime.datetime(2018, 7, 6, 0, 0)) + ) + == 3 + ) + +def test_relevant_rooms(patient_data): + # From SAP IS-H: BH N 125, INE GE06 + # From RAP: KARR EKG, INO OP 08 + room_names = patient_data["patients"].get("00008301433").get_relevant_rooms() + assert(len(room_names) == 4) + assert("BH N 125" in room_names) + assert("INE GE06" in room_names) + assert("INO OP 08" in room_names) + assert("KARR EKG" in room_names) diff --git a/spitalhygiene/vre/src/main/python/tests/test_risk_data.py b/spitalhygiene/vre/src/main/python/tests/test_risk_data.py new file mode 100644 index 0000000..52d894d --- /dev/null +++ b/spitalhygiene/vre/src/main/python/tests/test_risk_data.py @@ -0,0 +1,33 @@ +import datetime + + +def test_risk_codes(patient_data): + nr_risk = 0 + for patient in patient_data["patients"].values(): + if patient.has_risk([(32, None), (42, None), (22, None)]): + nr_risk += 1 + assert nr_risk == 1 + + +def test_load_risk_deleted(patient_data): + assert patient_data["patients"].get("00003067149").has_risk([(142, None)]) + + +def test_risk_date(patient_data): + # screening date + assert patient_data["patients"].get("00008301433").get_risk_date( + [(42, None)] + ) == datetime.datetime(2018, 7, 9, 16, 32, 10) + + # screening date of deleted screening + assert patient_data["patients"].get("00003067149").get_risk_date( + [(142, None)] + ) == datetime.datetime(2018, 3, 16, 0, 0, 0) + + +def test_risk_case(patient_data): + # do we get the relevant case for patients with risk factors (32: positive, 42: screening, 142: screening deleted)? + assert patient_data["patients"].get("00003067149").get_relevant_case() is not None + + # patient with no relevant risk and only old cases + assert patient_data["patients"].get("00004348346").get_relevant_case() is None diff --git a/spitalhygiene/vre/src/main/python/vre/BasicConfig.ini b/spitalhygiene/vre/src/main/python/vre/BasicConfig.ini new file mode 100644 index 0000000..3d7f71c --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/BasicConfig.ini @@ -0,0 +1,42 @@ +[PATHS] +# Path to the "vre_input" folder +input_dir = /home/i0308559/vre_input + +# Path to the "vre_output" folder +output_dir = /home/i0308559/vre_output + +# Path to directory containing test csv files +test_data_dir = /home/i0308559/vre_input/test_data + +# Path to directory containing complete csv data files for model +model_data_dir = /home/i0308559/vre_input/model_data + +# absolute file path for the exported feature vector CSV file +csv_export_path = /home/i0308559/vre_output/feature_vector/feature_vector.csv + +# path to directory in which edge_list.csv and node_list.csv for import into Gephi will be saved +gephi_export_dir = /home/i0308559/vre_output/gephi + +# path to Theus' JAR file +jar_file_path = /data1/sqooba/vre/jar_files/spitalhygiene-2.0-SNAPSHOT-jar-with-dependencies.jar + +# directory containing all log files +log_dir = /home/i0308559/vre_log + +# directory into which all Neo4J data will be exported +neo4j_dir = /home/i0308559/vre_output/neo4j + +# directory containing the odbc connection files (see README for structure) +odbc_file_path = /home/i0308559/connection_file/server_connection.txt + +[DELIMITERS] +# delimiter used for CSV files (default is ,) +csv_sep = , + + +[PARAMETERS] +# Indicator for data to use, one of 'test' (will load test patient data only) or 'all' (data for all patients) +data_basis = all + +# Number of patients and cases to be loaded, either None (load all data) or any positive integer +load_limit = 1000 diff --git a/spitalhygiene/vre/src/main/python/vre/HDFS_data_loader.py b/spitalhygiene/vre/src/main/python/vre/HDFS_data_loader.py new file mode 100644 index 0000000..0f1721c --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/HDFS_data_loader.py @@ -0,0 +1,309 @@ +# -*- coding: utf-8 -*- +"""This script contains all functions for loading data from CSV or HDFS, and controls the creation of all +objects required for the VRE model. + +----- +""" + +import subprocess +import csv +import os +import logging +import sys +import configparser + +# make sure to append the correct path regardless where script is called from +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'model')) + +from Patient import Patient +from Risk import Risk +from Case import Case +from Room import Room +from Move import Move +from Medication import Medication +from Appointment import Appointment +from Device import Device +from Employee import Employee +from Chop import Chop +from Surgery import Surgery +from Partner import Partner +from Care import Care +from ICD import ICD + +############################################################################################################### + + +class HDFS_data_loader: + """Loads all the csv files from HDFS and creates the data model. + """ + + def __init__(self, hdfs_pipe=True): + # Load configuration file + config_reader = configparser.ConfigParser() + config_reader.read(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'BasicConfig.ini')) + + self.load_test_data = config_reader['PARAMETERS']['data_basis'] == 'test' + + self.base_path = config_reader['PATHS']['model_data_dir'] if self.load_test_data is False \ + else config_reader['PATHS']['test_data_dir'] + + logging.debug(f"base_path: {self.base_path}") + + self.devices_path = os.path.join(self.base_path, "V_DH_DIM_GERAET_CUR.csv") + self.patients_path = os.path.join(self.base_path, "V_DH_DIM_PATIENT_CUR.csv") + self.cases_path = os.path.join(self.base_path, "V_LA_ISH_NFAL_NORM.csv") + self.moves_path = os.path.join(self.base_path, "LA_ISH_NBEW.csv") + self.risks_path = os.path.join(self.base_path, "V_LA_ISH_NRSF_NORM.csv") + # self.deleted_risks_path = os.path.join(self.base_path, "deleted_screenings.csv") + self.appointments_path = os.path.join(self.base_path, "V_DH_DIM_TERMIN_CUR.csv") + self.device_appointment_path = os.path.join(self.base_path, "V_DH_FACT_TERMINGERAET.csv") + self.appointment_patient_path = os.path.join(self.base_path, "V_DH_FACT_TERMINPATIENT.csv") + self.rooms_path = os.path.join(self.base_path, "V_DH_DIM_RAUM_CUR.csv") + self.room_appointment_path = os.path.join(self.base_path, "V_DH_FACT_TERMINRAUM.csv") + self.appointment_employee_path = os.path.join(self.base_path, "V_DH_FACT_TERMINMITARBEITER.csv") + self.medication_path = os.path.join(self.base_path, "V_LA_IPD_DRUG_NORM.csv") + self.bwart_path = os.path.join(self.base_path, "BWTYP-BWART.csv") + self.partner_path = os.path.join(self.base_path, "LA_ISH_NGPA.csv") + self.partner_case_path = os.path.join(self.base_path, "LA_ISH_NFPZ.csv") + self.chop_path = os.path.join(self.base_path, "V_DH_REF_CHOP.csv") + self.surgery_path = os.path.join(self.base_path, "LA_ISH_NICP.csv") + self.tacs_path = os.path.join(self.base_path, "TACS_DATEN.csv") + self.icd_path = os.path.join(self.base_path, "LA_ISH_NDIA_NORM.csv") + self.VRE_screenings_path = os.path.join(self.base_path, "V_VRE_SCREENING_DATA.csv") + self.VRE_ward_screenings_path = os.path.join(self.base_path, "WARD_SCREENINGS.csv") + self.oe_pflege_map_path = os.path.join(self.base_path, "OE_PFLEGE_MAP.csv") + + self.hdfs_pipe = hdfs_pipe # binary attribute specifying whether to read data Hadoop (True) or CSV (False) + + self.file_delim = config_reader['DELIMITERS']['csv_sep'] # delimiter character for reading CSV files + + self.load_limit = None if config_reader['PARAMETERS']['load_limit'] == 'None' \ + else int(config_reader['PARAMETERS']['load_limit']) + + def get_hdfs_pipe(self, path): + """Loads a datafile from HDFS. + + Loads the datafile specified in path from the Hadoop file system, and returns the file **without header** as a + csv.reader() instance. This function is used in the method patient_data() if hdfs_pipe is ``True`` + (the default). + + Args: + path (str): full path to file in HDFS to be loaded. + + Returns: + ``csv.reader()`` instance **not** containing the header of the file. + """ + logging.debug(f"get_hdfs_pipe: {path}") + encoding = "iso-8859-1" + cat = subprocess.Popen(["hadoop", "fs", "-cat", path], stdout=subprocess.PIPE) + output = cat.communicate()[0].decode(encoding) + lines = csv.reader(output.splitlines(), delimiter=self.file_delim) + next(lines, None) # skip header + return lines + + def get_csv_file(self, csv_path): + """Loads a datafile from CSV. + + Loads the datafile specified in csv_path, and returns the file **without header** as a csv.reader() instance. + ``csv_path`` must be an **absolute** filepath. This function is used in the method patient_data() if hdfs_pipe + is ``False``. + + Args: + csv_path (str): full path to csv file. + + Important: + Since the csv.reader() instance is returned by this functions via ``open(csv_path, ...)``, these files may + not be properly closed ! + + Returns: + ``csv.reader()`` instance **not** containing the header of the file. + """ + logging.debug(f"csv_path: {csv_path}") + encoding = "iso-8859-1" + output = csv.reader(open(csv_path, 'r', encoding=encoding), delimiter=self.file_delim) + # Note --> Test Data are ';'-delimited, but original data are ','-delimited ! + next(output, None) # ignore the header line + return output + + def patient_data(self, risk_only=False): + """Prepares patient data based on all results obtained from the SQL queries. + + If self.hdfs_pipe is ``True``, this will use the :meth:`get_hdfs_pipe()` method. Otherwise, the + :meth:`get_csv_file()` method is used. + + Args: + risk_only (bool): Whether or not to use only risk data (defaults to ``False``). + + Returns: + dict: Dictionary containing all VRE-relevant objects of the form + + { "rooms" :math:`\\longrightarrow` *Rooms*, + + "wards" :math:`\\longrightarrow` *Wards*, etc. } + + Please refer to the ``vre/src/main/python/vre/model`` folder documentation for more details on the + various objects. + """ + rooms = dict() # dictionary mapping room names to Room() objects --> {'BH N 123' : Room(), ... } + wards = dict() + + logging.info(f"Processing patient data (load_test_data is {self.load_test_data}, hdfs_pipe is {self.hdfs_pipe}," + f" base_path set to {self.base_path}).") + + # Load Patient data from table: V_DH_DIM_PATIENT_CUR + logging.info("loading patient data") + patients = Patient.create_patient_dict(self.get_hdfs_pipe(self.patients_path) if self.hdfs_pipe is True + else self.get_csv_file(self.patients_path), + load_limit=self.load_limit) + + # Load Case data from table: V_LA_ISH_NFAL_NORM + logging.info("loading case data") + cases = Case.create_case_map(self.get_hdfs_pipe(self.cases_path) if self.hdfs_pipe is True + else self.get_csv_file(self.cases_path), patients, + load_limit=self.load_limit) + + # Load Partner data from table: LA_ISH_NGPA + partners = Partner.create_partner_map(self.get_hdfs_pipe(self.partner_path) if self.hdfs_pipe is True + else self.get_csv_file(self.partner_path)) + Partner.add_partners_to_cases( # This will update partners from table: LA_ISH_NFPZ + self.get_hdfs_pipe(self.partner_case_path) if self.hdfs_pipe is True + else self.get_csv_file(self.partner_case_path), cases, partners) + + # Load Move data from table: LA_ISH_NBEW + logging.info("loading move data") + Move.add_move_to_case(self.get_hdfs_pipe(self.moves_path) if self.hdfs_pipe is True + else self.get_csv_file(self.moves_path), cases, rooms, wards, partners, + load_limit=self.load_limit) + # --> Note: Move() objects are not part of the returned dictionary, they are only used in + # Case() objects --> Case().moves = [1 : Move(), 2 : Move(), ...] + + # Generate ward screening overview map + screen_map = Risk.generate_screening_overview_map(self.get_hdfs_pipe(self.VRE_ward_screenings_path) + if self.hdfs_pipe is True + else self.get_csv_file(self.VRE_ward_screenings_path)) + # --> this yields a dictionary mapping dt.date() objects to tuples of (ward_name, screening_type) + # i.e. of the form {'2018-10-22' : ('O SUED', 'W'), '2018-09-15' : ('IB BLAU', 'E'), ...} + + # Generate OE_pflege_map + oe_pflege_map = Risk.generate_oe_pflege_map(self.get_hdfs_pipe(self.oe_pflege_map_path) + if self.hdfs_pipe is True + else self.get_csv_file(self.oe_pflege_map_path)) + # --> yields a dictionary mapping "inofficial" ward names to official ones found in the OE_pflege_abk column + # of the dbo.INSEL_MAP table in the Atelier_DataScience. This name allows linkage to Waveware ! + # i.e. of the form {'BEWA' : 'C WEST', 'E 121' : 'E 120-21', ...} + + # ---------------------------------------------------------------- + # Load Risk data --> ADJUST THIS SECTION ! + logging.info("loading risk data") + # ## --> OLD VERSION: from table: V_LA_ISH_NRSF_NORM + # Risk.add_risk_to_patient( self.get_hdfs_pipe(self.risks_path) if self.hdfs_pipe is True + # else self.get_csv_file(self.risks_path), patients ) + # Risk.add_deleted_risk_to_patient( # Update data from table: deleted_screenings + # self.get_hdfs_pipe(self.deleted_risks_path) if self.hdfs_pipe is True + # else self.get_csv_file(self.deleted_risks_path), patients + # ) + # ## --> NEW VERSION: from file VRE_Screenings_Final.csv + Risk.add_annotated_screening_data_to_patients(self.get_hdfs_pipe(self.VRE_screenings_path) + if self.hdfs_pipe is True + else self.get_csv_file(self.VRE_screenings_path), + patient_dict=patients) + + + if risk_only: + logging.info("keeping only risk patients") + patients_risk = dict() + for patient in patients.values(): + if patient.get_label() > 0: + patients_risk[patient.patient_id] = patient + patients = patients_risk + logging.info(f"{len(patients)} patients") + # ---------------------------------------------------------------- + + # Load Drug data from table: V_LA_IPD_DRUG_NORM + logging.info("loading drug data") + drugs = Medication.create_drug_map(self.get_hdfs_pipe(self.medication_path) if self.hdfs_pipe is True + else self.get_csv_file(self.medication_path)) + Medication.add_medication_to_case( # Update is based on the same table + self.get_hdfs_pipe(self.medication_path) if self.hdfs_pipe is True + else self.get_csv_file(self.medication_path), cases) + + # Load CHOP data from table: V_DH_REF_CHOP + logging.info("loading chop data") + chops = Chop.create_chop_dict(self.get_hdfs_pipe(self.chop_path) if self.hdfs_pipe is True + else self.get_csv_file(self.chop_path)) + + # Add Surgery data to cases from table: LA_ISH_NICP + Surgery.add_surgery_to_case(self.get_hdfs_pipe(self.surgery_path) if self.hdfs_pipe is True + else self.get_csv_file(self.surgery_path), cases, chops) + # Surgery() objects are not part of the returned dictionary + + # Load Appointment data from table: V_DH_DIM_TERMIN_CUR + logging.info("loading appointment data") + appointments = Appointment.create_termin_map(self.get_hdfs_pipe(self.appointments_path) + if self.hdfs_pipe is True + else self.get_csv_file(self.appointments_path)) + + # Add Appointments to cases from table: V_DH_FACT_TERMINPATIENT + logging.info('Adding appointments to cases') + Appointment.add_appointment_to_case(self.get_hdfs_pipe(self.appointment_patient_path) if self.hdfs_pipe is True + else self.get_csv_file(self.appointment_patient_path), + cases, appointments) + + # Load Device data from table: V_DH_DIM_GERAET_CUR + logging.info("loading device data") + devices = Device.create_device_map(self.get_hdfs_pipe(self.devices_path) if self.hdfs_pipe is True + else self.get_csv_file(self.devices_path)) + + # Add Device data to Appointments from table: V_DH_FACT_TERMINGERAET + Device.add_device_to_appointment(self.get_hdfs_pipe(self.device_appointment_path) if self.hdfs_pipe is True + else self.get_csv_file(self.device_appointment_path), + appointments, devices) + + # Load Employee data (RAP) from table: V_DH_FACT_TERMINMITARBEITER + logging.info("loading employee data from RAP") + employees = Employee.create_employee_map(self.get_hdfs_pipe(self.appointment_employee_path) + if self.hdfs_pipe is True + else self.get_csv_file(self.appointment_employee_path)) + + # Add Employees to Appointments using the same table + Employee.add_employee_to_appointment(self.get_hdfs_pipe(self.appointment_employee_path) + if self.hdfs_pipe is True + else self.get_csv_file(self.appointment_employee_path), + appointments, employees) + + # Add Care data to Cases from table: TACS_DATEN + logging.info("Adding Care data to Cases from TACS") + Care.add_care_to_case(self.get_hdfs_pipe(self.tacs_path) if self.hdfs_pipe is True + else self.get_csv_file(self.tacs_path), cases, employees) + # --> Note: Care() objects are not part of the returned dictionary, they are only used in + # Case() objects --> Case().cares = [Care(), Care(), ...] (list of all cares for each case) + + # Add Room data to Appointments from table: V_DH_FACT_TERMINRAUM + logging.info('Adding rooms to appointments') + Room.add_room_to_appointment(self.get_hdfs_pipe(self.room_appointment_path) if self.hdfs_pipe is True + else self.get_csv_file(self.room_appointment_path), appointments, rooms) + logging.info(f"Dataset contains in total {len(rooms)} Rooms") + + # Add ICD codes to cases from table: LA_ISH_NDIA_NORM + icd_codes = ICD.create_icd_dict(self.get_hdfs_pipe(self.icd_path) if self.hdfs_pipe is True + else self.get_csv_file(self.icd_path)) + ICD.add_icd_to_case(self.get_hdfs_pipe(self.icd_path) if self.hdfs_pipe is True + else self.get_csv_file(self.icd_path), cases) + + return dict( + { + "rooms": rooms, + "wards": wards, + "partners": partners, + "patients": patients, + "cases": cases, + "drugs": drugs, + "chops": chops, + "appointments": appointments, + "devices": devices, + "employees": employees, + # "room_id_map": room_id_map, # --> no longer used + 'icd_codes': icd_codes + } + ) + diff --git a/spitalhygiene/vre/src/main/python/vre/Neo4JExporter.py b/spitalhygiene/vre/src/main/python/vre/Neo4JExporter.py new file mode 100644 index 0000000..c9b1d82 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/Neo4JExporter.py @@ -0,0 +1,463 @@ +import configparser +import csv +import datetime +import os +import logging + +class Neo4JExporter: + """ + Class responsible for data export in Neo4J-compatible format. + """ + def __init__(self): + """ + Loads the configuration file and makes its contents available via the self.config attribute (e.g. self.config['PATHS']['some_dir'] + """ + self.config = configparser.ConfigParser() + self.config.read(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'BasicConfig.ini')) # makes configuration file entries available as self.config['PATHS']['XXX'] + + self.init_date = datetime.datetime.now().strftime('%Y%m%d%H%M%S') # timestamp at initiation - used as a suffix for all exported files + logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', level=logging.INFO, datefmt='%d.%m.%Y %H:%M:%S') + + def write_patient(self, patients): + """ + Export patient data for all patients with a relevant case. + + :param patients: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + written_count = 0 + total_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'patients_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( + [ + "PATNR:ID", + "type:LABEL", + "risk:LABEL", + "risk_datetime:datetime{timezone:Europe/Bern}", + ] ) # Write file header + for key, patient in patients.items(): + total_count += 1 + l = "False" + d = None + if patient.has_risk(): + l = "True" + d = patient.get_risk_date() # returns either a dt.dt() object or None + if patient.get_relevant_case() is not None: # filter for patients with a relevant case + csvwriter.writerow( + [ + patient.patient_id, + "Patient", + l, + d.strftime("%Y-%m-%dT%H:%M") if d is not None else None, + ] ) + written_count += 1 + logging.info(f'Created {written_count} patients with a relevant case out of {total_count} patients.') + + + def write_patient_patient(self, contact_pats, patients): + """ + Export all contacts between patients. + + :param contact_pats: List containing tuples of length 6 of either the format: (source_pat_id, dest_pat_id, start_overlap_dt, end_overlap_dt, room_name, "kontakt_raum") + or the format: (source_pat_id, dest_pat_id, start_overlap_dt, end_overlap_dt, ward_name, "kontakt_org") + :param patients: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + contact_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'patient_patient_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( + [ + ":START_ID", + ":END_ID", + "from:datetime{timezone:Europe/Bern}", + "to:datetime{timezone:Europe/Bern}", + "room", + ":TYPE", + ] ) + for contact in contact_pats: # list of tuples --> can be directly printed ! + if patients[contact[0]].get_relevant_case() is not None and patients[contact[1]].get_relevant_case() is not None: # make sure to include only patients with a relevant date ! + csvwriter.writerow(contact) + contact_count += 1 + logging.info(f'Created {contact_count} patient contacts.') + + + def write_room(self, rooms): + """ + Export all room data. + + :param rooms: Dictionary mapping room names to a Room() object --> {'BH N 125' : Room(), ... } + """ + room_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'zimmer_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow(["Name:ID", "type:LABEL"]) # header + for k, r in rooms.items(): + csvwriter.writerow([r.name, "Room"]) + room_count += 1 + logging.info(f'Created {room_count} rooms.') + + + def write_patient_room(self, patients): + """ + Export room data only for patients with a relevant case in the sense: patient_id --[in]--> room_name + + :param patients: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + relcase_count = 0 + patient_count = 0 + room_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'patient_zimmer_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( + [ + ":START_ID", + ":END_ID", + "from:datetime{timezone:Europe/Bern}", + "to:datetime{timezone:Europe/Bern}", + ":TYPE", + ] ) # header + for patient in patients.values(): + patient_count += 1 + pat_rel_case = patient.get_relevant_case() + if pat_rel_case is not None: + relcase_count += 1 + for move in pat_rel_case.moves.values(): # the Case().moves attribute is a dictionary mapping the order of moves to Moves() objects + if move.room is not None: + csvwriter.writerow( + [ + patient.patient_id, + move.room.name, + move.bwi_dt.strftime("%Y-%m-%dT%H:%M"), + move.bwe_dt.strftime("%Y-%m-%dT%H:%M"), + "in", + ]) + room_count += 1 + logging.info(f'Wrote {room_count} rooms (based on moves) for {relcase_count} patients with relevant cases out of {patient_count} patients.') + + + def write_bed(self, rooms): + """ + Export all beds involved in all rooms. + + :param rooms: Dictionary mapping room names to a Room() object --> {'BH N 125' : Room(), ... } + """ + room_count = 0 + bed_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'bett_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow(["Name:ID", "type:LABEL"]) + for k, r in rooms.items(): + room_count += 1 + for b in r.beds: + bed_count += 1 + csvwriter.writerow([b, "Bed"]) + logging.info(f'Created {bed_count} beds from {room_count} rooms.') + + + def write_room_bed(self, rooms): + """ + Assign beds to rooms. + :param rooms: Dictionary mapping room names to a Room() object --> {'BH N 125' : Room(), ... } + """ + room_count = 0 + bed_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'zimmer_bett_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow([":START_ID", ":END_ID", ":TYPE"]) + for k, r in rooms.items(): + room_count += 1 + for b in r.beds: + csvwriter.writerow([r.name, b, "in"]) + bed_count += 1 + logging.info(f'Assigned {bed_count} beds to {room_count} rooms.') + + + def write_device(self, devices): + """ + Exports all devices. + + :param rooms: Dictionary mapping room names to a Room() object --> {'BH N 125' : Room(), ... } + """ + device_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'geraet_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow([":ID", "type:LABEL", "name"]) + for k, g in devices.items(): + csvwriter.writerow([g.geraet_id, "Device", g.geraet_name]) + device_count += 1 + logging.info(f'Created {device_count} devices.') + + + def write_patient_device(self, patients): + """ + Exports all contacts between patients and a specific "geraet" if the contact happened during the patient's relevant case. + + :param patients: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + pat_count = 0 + relcase_count = 0 + device_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'patient_geraet_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( [":START_ID", ":END_ID", ":TYPE", "from:datetime{timezone:Europe/Bern}"] ) + for k, patient in patients.items(): + pat_count += 1 + pat_rel_case = patient.get_relevant_case() + if pat_rel_case is not None: # consider only patients with a relevant case + relcase_count += 1 + for t in pat_rel_case.appointments: # consider only appointments in the relevant case + for d in t.devices: + csvwriter.writerow( + [ + patient.patient_id, + d.geraet_id, + "used", + t.termin_datum.strftime("%Y-%m-%dT%H:%M"), + ] ) + device_count += 1 + logging.info(f'Added {device_count} devices for {relcase_count} cases from {pat_count} patients.') + + def write_drug(self, drugs): + """ + Export all drugs. + + :param drugs: A dictionary mapping drug codes to their respective text description --> {'B02BA01' : 'NaCl Braun Inf Lös 0.9 % 500 ml (Natriumchlorid)', ... } + """ + drug_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'drugs_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow([":ID", "type:LABEL", "name"]) + for k, d in drugs.items(): + csvwriter.writerow([k, "Drug", d]) + drug_count += 1 + logging.info(f'Created {drug_count} drugs.') + + + def write_patient_medication(self, patients): + """ + Export exposure to drugs for all patients with a relevant case. + + :param patients: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + pat_count = 0 + exposure_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'patient_medication_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( + [ + ":START_ID", + ":END_ID", + ":TYPE", + "from:datetime{timezone:Europe/Bern}", + "to:datetime{timezone:Europe/Bern}", + ] ) # header + for k, p in patients.items(): + pat_count += 1 + exposure = p.get_antibiotic_exposure() # this will return None if the patient has no relevant case + if exposure is None: + continue + for k, d in exposure.items(): # d is a set of datetime.date() objects + date_list = list(d) + min_date = date_list[date_list.index(min(date_list))] + max_date = date_list[date_list.index(max(date_list))] + csvwriter.writerow( + [ + p.patient_id, + k, + "administered", + min_date.strftime("%Y-%m-%dT%H:%M"), + max_date.strftime("%Y-%m-%dT%H:%M"), + ] ) + exposure_count += 1 + logging.info(f'Created {exposure_count} exposures from {pat_count} patients.') + + + def write_employees(self, employees): + """ + Export all employees. + + :param employees: Dictionary mapping employee_ids to Employee() objects --> {'0032719' : Employee(), ... } + """ + employee_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'employees_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow([":ID", "type:LABEL", "name"]) + for k, e in employees.items(): + csvwriter.writerow([k, "Employee", k]) + employee_count += 1 + logging.info(f'Created {employee_count} employees.') + + def write_patient_employee(self, patients): + """ + Export all contacts between an employee and a patient during the patient's relevant case. + + :param patients: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + pat_count = 0 + relcase_count = 0 + contact_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'patient_employee_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( [":START_ID", ":END_ID", ":TYPE", "from:datetime{timezone:Europe/Bern}"] ) + for k, p in patients.items(): + pat_rel_case = p.get_relevant_case() # returns None if patient has no relevant case + pat_count += 1 + if pat_rel_case is not None: + relcase_count += 1 + for t in pat_rel_case.appointments: + for e in t.employees: + if e.mitarbeiter_id != "-1": # indicates an unknown mitarbeiter - these cases are ignored + csvwriter.writerow( + [ + p.patient_id, + e.mitarbeiter_id, + "appointment_with", + t.termin_datum.strftime("%Y-%m-%dT%H:%M"), + ] ) + contact_count += 1 + logging.info(f'Created {contact_count} contacts in {relcase_count} relevant cases from {pat_count} patients.') + + + def write_referrer(self, partners): + """ + Export all referrers. + + :param partners: Dictionary mapping partners to Partner() objects --> {'1001503842' : Partner(), '1001503845' : Partner(), ... } + """ + refer_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'referrers_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( + [ + ":ID", + "type:LABEL", + "name1", + "name2", + "name3", + "land", + "plz", + "ort", + "ort2", + ] ) + for partner in partners.values(): + csvwriter.writerow( + [ + partner.gp_art, + "Referrer", + partner.name1, + partner.name2, + partner.name3, + partner.land, + partner.pstlz, + partner.ort, + partner.ort2, + ] ) + refer_count += 1 + logging.info(f'Created {refer_count} referrers.') + + + def write_referrer_patient(self, patients): + """ + Export referrers which came into contact with patients during their relevant case. + + :param patients: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + patient_count = 0 + referrer_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'referrer_patient_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( [":END_ID", ":START_ID", ":TYPE", "from:datetime{timezone:Europe/Bern}"] ) + for patient in patients.values(): + patient_count += 1 + patient_relcase = patient.get_relevant_case() + if patient_relcase is not None: + if patient_relcase.moves_start is not None: + for referrer in patient_relcase.referrers: + csvwriter.writerow( + [ + patient.patient_id, + referrer.gp_art, + "referring", + patient_relcase.moves_start.strftime("%Y-%m-%dT%H:%M"), + ] ) + referrer_count += 1 + logging.info(f'Created {referrer_count} referrals from {patient_count} patients.') + + + def write_chop_code(self, chops): + """ + Export all chop codes. + + :param chops: Dictionary mapping the chopcode_katalogid entries to Chop() obects --> { 'Z39.61.10_11': Chop(), ... } + """ + chop_count = 0 + filtered_chops = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'chops_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( + [ + ":ID", + "type:LABEL", + "level1", + "level2", + "level3", + "level4", + "level5", + "level6", + "latest_catalog", + ] ) + # Extract only the latest CHOP codes, given by the highest value in each chop.chop_sap_katalog_id() value + chop_dict = {} # maps chop codes (without the SAP katalog number) to tuples of (SAP_Katalog_ID, chop object) + for each_chop in chops.values(): + chop_count += 1 + if each_chop.chop_code not in chop_dict.keys(): + chop_dict[each_chop.chop_code] = (int(each_chop.chop_sap_katalog_id), each_chop) + else: # indicates that the CHOP code has already been appended + if int(each_chop.chop_sap_katalog_id) > chop_dict[each_chop.chop_code][0]: + chop_dict[each_chop.chop_code] = (int(each_chop.chop_sap_katalog_id), each_chop) # overwrite the previous entry with the "more current" katalog ID + # for key, value in cho + # final_chop_list = [] # holds lists of values to be appended + for chop_tuple in chop_dict.values(): + csvwriter.writerow( + [ + chop_tuple[1].chop_code, + "CHOP", + chop_tuple[1].chop_level1.replace('\n', ' '), # Note that some of the descriptions may contain newline characters + chop_tuple[1].chop_level2.replace('\n', ' '), + chop_tuple[1].chop_level3.replace('\n', ' '), + chop_tuple[1].chop_level4.replace('\n', ' '), + chop_tuple[1].chop_level5.replace('\n', ' '), + chop_tuple[1].chop_level6.replace('\n', ' '), + chop_tuple[1].chop_sap_katalog_id.replace('\n', ' ') + ] ) + filtered_chops += 1 + logging.info(f'Created {filtered_chops} latest CHOP codes out of {chop_count} total CHOP codes.') + + def write_chop_patient(self, patients): + """ + Export CHOP codes for patients during their relevant case. + + :param patients: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + pat_count = 0 + relcase_count = 0 + chop_count = 0 + with open(os.path.join(self.config['PATHS']['neo4j_dir'], 'chop_patient_' + self.init_date + '.csv'), "w") as csvfile: + csvwriter = csv.writer( csvfile, delimiter=self.config['DELIMITERS']['csv_sep'], quotechar='"', quoting=csv.QUOTE_MINIMAL ) + csvwriter.writerow( [":START_ID", ":END_ID", ":TYPE", "from:datetime{timezone:Europe/Bern}"] ) + for patient in patients.values(): + pat_count += 1 + pat_relcase = patient.get_relevant_case() + if pat_relcase is not None: + relcase_count += 1 + for surgery in pat_relcase.surgeries: + csvwriter.writerow( + [ + patient.patient_id, + surgery.chop.chop_code, + "surgery", + surgery.bgd_op.strftime("%Y-%m-%dT%H:%M"), + ] ) + chop_count += 1 + logging.info(f'Created {chop_count} surgeries (CHOP codes) for {relcase_count} relevant cases from {pat_count} patients.') + diff --git a/spitalhygiene/vre/src/main/python/vre/README.md b/spitalhygiene/vre/src/main/python/vre/README.md new file mode 100644 index 0000000..f891ca1 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/README.md @@ -0,0 +1,91 @@ +# Project Spitalhygiene - Details on Model Creation + +This file contains additional information on the procedures involved in the Spitalhygiene project, including + +1. Querying of data in the Data_Science_Atelier and its export to CSV +2. Import of CSV data into the model +3. Creation of the feature vectors for each patient +4. Export of feature vectors into CSV (serves as basis for Andrew Atkinson's univariate and multivariate analysis) +5. Export of data in Neo4J-compatible format +6. Import into the Neo4J database + +All the above steps are controlled and logged by the file `~/resources/Update_Model.sh`. + +Note: `~` in this script refers to the `spitalhygiene` folder in the master branch of the insel mono-repo on Bitbucket. + +### 1. Querying of data in the Data_Science_Atelier and its export to CSV + +This step is controlled by the `~/resources/Query_Aterlier_Data.py`. This file controls the querying of data from the Aterlier_DataScience via the `pyodbc` module, and +requires that ODBC Driver 17 for SQL Server is installed. In addition, a file containing connection information is required and must be formatted as follows: + +``` +DRIVER={ODBC Driver 17 for SQL Server} +SERVER=PATH/TO/SERVER +DATABASE=YOUR_DATABASE +UID=YOUR_USERNAME +PWD=PASSWORD +``` + +This file is **not** part of this branch and must be available separately, so as not to hard-code SQL connection information in the repo. + +The `Query_Atelier_Data.py` script essentially performs an easy task: it connects to the Data_Science_Atelier, executes a series of SQL queries, +and saves the retrieved data in CSV format to a specified folder. All executed queries are found in the `~/sql` folder. Created CSV files are named identically to the SQL query +scripts, i.e. `LA_ISH_NICP.sql` → `LA_ISH_NICP.csv`. + +### 2. Import of CSV data into VRE model + +The CSV import and model creation is controlled by the `~/vre/src/main/python/vre/feature_extractor.py` script. Data are loaded in multiple steps: + +1. An HDFS_data_loader() instance is created, responsible for the extraction of data from the loaded CSV files +2. Patient data are extracted and loaded via a call to the HDFS_data_loader.patient_data() function +3. Features and labels are processed using an instance of the features_extractor() class +4. Features and labels are prepared via a call to features_extractor().prepare_features_and_labels() + +### 3. Creation of the feature vectors for each patient + +This step is also controlled by the `~/vre/src/main/python/vre/feature_extractor.py` script. + +1. Feature vectors are created based on the HDFS_data_loader.patient_data() object from step 2. +2. Features and labels are processed using an instance of the features_extractor() class and +3. prepared via a call to features_extractor().prepare_features_and_labels() + +### 4. Export of feature vectors into CSV + +Created feature vectors from step 3 are exported as CSV using a call to the features_extractor().export_csv() method. This file is formatted as follows: + +|Patient_ID|Label|age|antibiotic=J01AA02|chop=Z95|employee=0081218|device=Oxy 929992|…| +|---|---|---|---|---|---|---|---| +|ID01|32|68|0|0|0|1| +|ID02|42|50|0|1|0|0| +|ID03|32|65|0|0|30|0| +|ID04|142|61|1|0|0|1| + +Patient_IDs and labels are actually supplied as separate vectors to the model. Important to note is that all contacts are converted into a one-of-k kind of table, meaning that there is a +boolean feature for each "entity" in the data indicating whether or not a patient came into contact with it *at any point during the relevant case*. The only exception are continuous +variables (such as age) and contacts with employees, for which the table lists the *exposure time in minutes* between a particular patient and an employee *in the relevant case*. +Contacts are not restricted to patients, but can be much more diverse: + +- Patient-Bed +- Patient-Device +- Patient-Employee +- Patient-Antibiotic (i.e. if the patient received a particular medicine) +- Patient-CHOP Code (i.e. if a patient underwent a particular surgery) +- etc. + +This "spectrum" of contact possibilities is what makes this approach much more sophisticated than the current approach in the clinics, in which the VRE screening method is merely based +on Patient-Patient interactions over the last 7 days. + +This feature vector file, which is exported as a simple CSV file, also serves as the basis for Andrew Atkinson's univariate and multivariate analysis. + +### 5. Export of data in Neo4J-compatible format + +This part is not yet implemented. + +### 6. Import into the Neo4J database + +This part is not yet implemented. + +----- + +The file `BasicConfig.ini` is used by all modules and intended to simplify the adjustment of paths, settings, etc. when working on different file systems. + diff --git a/spitalhygiene/vre/src/main/python/vre/data_compiler.py b/spitalhygiene/vre/src/main/python/vre/data_compiler.py new file mode 100644 index 0000000..5a79035 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/data_compiler.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- +"""This script contains all functions for compiling data from CSV or HDFS, and controls the creation of all +objects required for the VRE model. This process has multiple steps and is structured as follows: + +- Reading the ``BasicConfig.ini`` file in this directory +- Loading all VRE-relevant data using the ``HDFS_data_loader`` class +- Creation of the feature vector using the ``feature_extractor`` class +- Export of this feature vector to CSV +- Creation of the surface model using the ``surface_model`` class +- Export of various results from the surface model using its built-in functions + +**This script is called in the cronjob and triggers the build of the VRE model!** + +Please refer to the script code for details. + +----- +""" + +from HDFS_data_loader import HDFS_data_loader +from feature_extractor import feature_extractor +from networkx_graph import surface_model, create_model_snapshots +import logging +import os +import datetime +import configparser +import calendar +import pathlib + + +if __name__ == "__main__": + ##################################### + # ### Load configuration file + this_filepath = pathlib.Path(os.path.realpath(__file__)).parent + + config_reader = configparser.ConfigParser() + config_reader.read(pathlib.Path(this_filepath, 'BasicConfig.ini')) + + logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', level=logging.INFO, + datefmt='%d.%m.%Y %H:%M:%S') + ##################################### + + ##################################### + # ### Initiate data loader + logging.info("Initiating HDFS_data_loader") + + # --> Load all data: + loader = HDFS_data_loader(hdfs_pipe=False) # hdfs_pipe = False --> files will be loaded directly from CSV + patient_data = loader.patient_data() + ##################################### + + ##################################### + # ### Create and export feature vector + logging.info("creating feature vector") + model_creator = feature_extractor() + (features, labels, dates, v) = model_creator.prepare_features_and_labels(patient_data["patients"]) + + # Export feature vector + logging.info("exporting feature vector") + model_creator.export_csv(features, labels, dates, v, config_reader['PATHS']['csv_export_path']) + ##################################### + + ##################################### + # ### Add contact patients to patient_data (CURRENTLY NOT USED, IMPLEMENTED IN NETWORK) + # logging.info('Retrieving patient contacts') + # patient_data['contact_pats'] = model_creator.get_contact_patients(patients=patient_data['patients']) + ##################################### + + ##################################### + # ### Create graph of the CURRENT model in networkX + surface_graph = surface_model(data_dir='/home/i0308559/vre_wd') + surface_graph.add_network_data(patient_dict=patient_data, subset='relevant_case') + surface_graph.remove_isolated_nodes() + surface_graph.add_edge_infection() + + # Extract positive patient nodes + positive_patient_nodes = [node for node, nodedata in surface_graph.S_GRAPH.nodes(data=True) + if nodedata['type'] == 'Patient' and nodedata['vre_status'] == 'pos'] + + # Write node files + surface_graph.write_node_files() + + # Export Patient Degree Ratio + surface_graph.export_patient_degree_ratio(export_path='/home/i0308559/vre_output') + + # Export Total Degree Ratio + surface_graph.export_total_degree_ratio(export_path='/home/i0308559/vre_output') + + # Export node betweenness + surface_graph.update_shortest_path_statistics() + surface_graph.export_node_betweenness(export_path='/home/i0308559/vre_output') + ##################################### + + # Create a monthly TIMESERIES of pdr values starting in January 2018 + # month_snapshots = [datetime.datetime(2018, i, calendar.monthrange(2018, i)[1]) for i in range(1, 13)] # calendar.monthrange() returns a tuple of length 2 of (day_of_week, day_of_month) + # month_snapshots += [datetime.datetime(2019, 1, 31)] # Add values from 2019 + # all_snapshot_models = create_model_snapshots(orig_model = surface_graph, snapshot_dt_list = month_snapshots) + # for model in all_snapshot_models: + # model.export_patient_degree_ratio(export_path='/home/i0308559/vre_output') + ##################################### + + logging.info("Data processed successfully !") + + ##################################### + # ### OBSOLETE SECTION + + + ### Export data in Neo4J-compatible format - REIMPLEMENT THIS PART + # logging.info('Exporting data for Neo4J') + # exporter = Neo4JExporter() + + # Export to csv files for Neo4J - REIMPLEMENT THIS PART + # exporter.write_patient(patient_data["patients"]) + # exporter.write_patient_patient(patient_data['contact_pats'], patient_data['patients']) # patient_data['contact_pats'] is really only a list of tuples of patient contacts + # exporter.write_room(patient_data["rooms"]) + # exporter.write_patient_room(patient_data["patients"]) + # exporter.write_bed(patient_data["rooms"]) + # exporter.write_room_bed(patient_data["rooms"]) + # exporter.write_device(patient_data["devices"]) + # exporter.write_patient_device(patient_data["patients"]) + # exporter.write_drug(patient_data["drugs"]) + # exporter.write_patient_medication(patient_data["patients"]) + # exporter.write_employees(patient_data["employees"]) + # exporter.write_patient_employee(patient_data["patients"]) + # exporter.write_referrer(patient_data["partners"]) + # exporter.write_referrer_patient(patient_data["patients"]) + # exporter.write_chop_code(patient_data["chops"]) + # exporter.write_chop_patient(patient_data["patients"]) + + ########################################################################## + # Export data using the data_purification.py script for manual inspection and purging + ########################################################################## + # relcase_count = 0 + # employee_list = [] + # room_list = [] + # geraet_list = [] + # + # for patient in patient_data['patients'].values(): + # rel_case = patient.get_relevant_case() + # patient_has_risk = patient.has_risk() + # if rel_case is not None: # This filter is important ! + # # Write relevant case and associated patient ID + # dpf.write_patient_case(case = rel_case, has_risk = patient_has_risk) + # relcase_count += 1 + # + # # Note employees associated to the relevant case --> employee_list + # for care in rel_case.cares: + # employee_list.append(care.employee.mitarbeiter_id) + # + # # Note rooms associated to the relevant case --> room_list + # for move in rel_case.moves.values(): + # room_list.append(move.zimmr) + # + # # Note all geraete associated to relevant case --> geraet_list + # for appmnt in rel_case.appointments: + # for each_geraet in appmnt.devices: + # geraet_list.append((str(each_geraet.geraet_id), str(each_geraet.geraet_name))) + # print(f"Wrote {relcase_count} relevant cases and associated patients.") + # + # # Remove duplicate entries from the list and write them to file: + # unique_employees = list(set(employee_list)) + # for each_entry in unique_employees: + # dpf.write_employee(each_entry) + # print(f"Wrote {len(unique_employees)} employees.") + # + # unique_rooms = list(set(room_list)) + # for each_entry in unique_rooms: + # dpf.write_room(each_entry) + # print(f"Wrote {len(unique_rooms)} rooms.") + # + # unique_geraete = list(set(geraet_list)) + # for each_tuple in unique_geraete: + # dpf.write_geraet(each_tuple) + # print(f"Wrote {len(unique_geraete)} devices.") + ########################################################################## + + + ########################################################################## + ### For overview purposes (works only on test data) + ########################################################################## + + # Room object + # print('\nRoom object') + # for attribute in ['name', 'moves', 'appointments', 'beds']: + # print(getattr(patient_data['rooms']['BH N 125'], attribute), type(getattr(patient_data['rooms']['BH N 125'], attribute))) + # + # # Bed object + # print('\nBed object') + # for attribute in ['name', 'moves']: + # print(getattr(patient_data['rooms']['BH N 125'].beds['BHN125F'], attribute), type(getattr(patient_data['rooms']['BH N 125'].beds['BHN125F'], attribute))) + # + # # Moves object + # print('\nMoves object') + # for attribute in ['fal_nr', 'lfd_nr','bew_ty','bw_art','bwi_dt','statu','bwe_dt','ldf_ref','kz_txt','org_fa','org_pf','org_au','zimmr','bett','storn','ext_kh', 'room', 'ward', 'case']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0], attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0], attribute))) + # + # # Ward object + # print('\nWard object') + # for attribute in ['name', 'moves', 'appointments']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].ward, attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].ward, attribute))) + # + # # Case object + # print('\nCase object') + # for attribute in ['patient_id','case_id','case_typ','case_status','fal_ar','beg_dt','end_dt','patient_typ','patient_status','appointments','cares','surgeries','moves','moves_start','moves_end','referrers','patient','medications']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].case, attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].case, attribute))) + # + # # Patient object + # print('\nPatient object') + # for attribute in ['patient_id','geschlecht','geburtsdatum','plz','wohnort','kanton','sprache','cases','risks']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].case.patient, attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].case.patient, attribute))) + # + # # Care object + # print('\nCare object') + # for attribute in ['patient_id','case_id','dt','duration_in_minutes','employee_nr','employee']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].case.cares[0], attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].case.cares[0], attribute))) + # + # # Employee object + # print('\nEmployee object') + # for attribute in ['mitarbeiter_id']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].case.cares[0].employee, attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].case.cares[0].employee, attribute))) + # + # # Appointment object + # print('\nAppointment object') + # for attribute in ['termin_id','is_deleted','termin_bezeichnung','termin_art','termin_typ','termin_datum','dauer_in_min','devices','employees','rooms']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].case.appointments[0], attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].case.appointments[0], attribute))) + # + # # Surgery object + # print('\nSurgery object') + # for attribute in ['bgd_op','lfd_bew','icpmk','icpml','anzop','lslok','fall_nr','storn','org_pf','chop']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].case.surgeries[0], attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].case.surgeries[0], attribute))) + # + # # Chop object + # print('\nChop object') + # for attribute in ['chop_code','chop_verwendungsjahr','chop','chop_code_level1','chop_level1','chop_code_level2','chop_level2','chop_code_level3','chop_level3','chop_code_level4','chop_level4','chop_code_level5','chop_level5','chop_code_level6','chop_level6','chop_status','chop_sap_katalog_id','cases']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].case.surgeries[0].chop, attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].case.surgeries[0].chop, attribute))) + # + # # Medication object + # print('\nMedication object') + # for attribute in ['patient_id','case_id','drug_text','drug_atc','drug_quantity','drug_unit','drug_dispform','drug_submission']: + # print(getattr(patient_data['rooms']['BH N 125'].moves[0].case.medications[0], attribute), type(getattr(patient_data['rooms']['BH N 125'].moves[0].case.medications[0], attribute))) + # + # # Partner object - these are found in the 'referrers' set attribute, which is why the attribute is converted to a list() + # print('\nPartner object') + # for attribute in ['gp_art','name1','name2','name3','land','pstlz','ort','ort2','stras','krkhs','referred_cases']: + # print(getattr(list(patient_data['rooms']['BH N 125'].moves[0].case.referrers)[0], attribute), type(getattr(list(patient_data['rooms']['BH N 125'].moves[0].case.referrers)[0], attribute))) + # + # # Device object + # print('\nDevice object') + # for attribute in ['geraet_id','geraet_name']: + # print(getattr(patient_data['devices']['64174'], attribute), type(getattr(patient_data['devices']['64174'], attribute))) + # + # # Risk object + # print('\nRisk object --> see class definition') + ########################################################################## + # [END OF FILE] + + + + + + diff --git a/spitalhygiene/vre/src/main/python/vre/feature_extractor.py b/spitalhygiene/vre/src/main/python/vre/feature_extractor.py new file mode 100644 index 0000000..47e6672 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/feature_extractor.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +"""This script contains the feature_extractor class, which controls the following aspects of VRE: + +- Preparation of the feature vector +- Extraction of patient-patient contacts +- Export to various sources (Gephi, CSV, Neo4J, etc.) + +----- +""" + +# from sklearn.feature_extraction import DictVectorizer +# import numpy as np +# import pandas as pd +import os +import datetime +from dateutil import relativedelta + + +class feature_extractor: + """Creates pandas dataframes with features, labels and relevant dates, and provides export functions to various + target systems. + """ + @staticmethod + def prepare_features_and_labels(patients): + """*Internal function used in various data exports.* + + Creates the feature ``np.array()`` and label ``np.array()``, along with relevant dates. + + Args: + patients (dict): Dictionary mapping patient ids to Patient() objects of the form + ``{"00001383264": Patient(), "00001383310": Patient(), ...}`` + + Returns: + tuple: tuple of length 4 of the form :math:`\\longrightarrow` *(features, labels, dates, v)* + + Please refer to function code for more details. + """ + risk_factors = [] + labels = [] + dates = [] + for patient in patients.values(): + patient_features = patient.get_features() # Dictionary --> {"length_of_stay" : 47, "nr_cases" : 3, ... } + if patient_features is not None: + risk_factors.append(patient_features) + labels.append(patient.get_label()) # patient.get_label() will return an integer between -1 and 3 + dates.append(patient.get_risk_date()) + # patient.get_risk_date() will return a datetime.datetime() object corresponding to the label risk date + v = DictVectorizer(sparse=False) + features = v.fit_transform(risk_factors) + # features.shape yields a tuple of dimensions in the format (nrow, ncol), + # or a tuple (LENGTH, ) in case of only 1 dimension (i.e. a list) + ############################################################################################################### + # Note: features is a numpy.ndarray() object with one row per patient containing the "fitted" risk factors for + # each patient in the one-of-K fashion as described in the .fit_transform() documentation + # E.g. for a categorical value with levels "a", "b", and "c", it will contain three columns containing 0 or 1 + # and corresponding to value=a, value=b and value=c + ############################################################################################################### + # filter = ~np.isnan(features).any(axis=1) + # features_filtered = features[~np.isnan(features).any(axis=1)] + labels_np = np.asarray(labels) + # --> labels_np is a 1-D numpy.ndarray() object based on labels, which is a list of labels between -1 and 3 + # for each patient in the dataset (e.g. ndarray([1 2 2 -1 3 3 2 1 1 2]) ) + ############################################################################################################### + # labels_np + # labels_filtered = labels_np[filter] + dates_np = np.asarray(dates) + # --> same structure as labels_np, but contains the risk date for each patient in the dataset + # as a datetime.datetime() object + ############################################################################################################### + # dates_filtered = dates_np[filter] + + return features, labels_np, dates_np, v + + @staticmethod + def export_csv(features, labels, dates, v, file_path): + """Function for exporting features, labels and dates to CSV. + + Combines features, labels and dates in a ``pandas.DataFrame()``, and exports all data to the csv file given + in file_path. + + Args: + features (numpy.ndarray()): ``numpy.ndarray()`` with one row per patient containing "fitted" risk factors + for each patient in the one-of-K fashion + labels (numpy.ndarray()): 1-D ``numpy.ndarray()`` containing the labels for each patient + (integers between -1 and 3) + dates (numpy.ndarray()): 1-D ``numpy.ndarray()`` containing risk dates for each patient + v: ``sklearn.feature_extraction.DictVectorizer()`` object with which the *features* + parameter was created + file_path (str): Absolute path to exported csv file + """ + sorted_cols = [k for k in sorted(v.vocabulary_, key=v.vocabulary_.get)] + # --> v.vocabulary_ is a dictionary mapping feature names to feature indices + df = pd.DataFrame(data=features, columns=sorted_cols) + df["label"] = labels + df["diagnosis_date"] = dates + df.to_csv(file_path, sep=",", encoding="utf-8", index=False) + # --> index = False will prevent writing row names in a separate, unlabeled column + + @staticmethod + def export_gephi(features, labels, dates, v, dest_path='.', csv_sep=','): + """Exports the node and edge list in Gephi-compatible format for visualisation. + + Edges and nodes will be exported to files ``dest_path/edge_list.csv`` and ``dest_path/node_list.csv``, + respectively. + + Args: + features (numpy.ndarray()): ``numpy.ndarray()`` with one row per patient containing the "fitted" risk + factors for each patient in the one-of-K fashion. + labels (numpy.ndarray()): 1-D ``numpy.ndarray()`` containing the labels for each patient + (integers between -1 and 3) + dates (numpy.ndarray()): 1-D ``numpy.ndarray()`` containing risk dates for each patient + v: ``sklearn.feature_extraction.DictVectorizer()`` object with which the features + parameter was created + dest_path (str): path into which the edge_list.csv and node_list.csv files will be exported + (defaults to the current working directory) + csv_sep (str): separator for exported csv files (defaults to ``,``) + """ + # --> Process Rooms + room_vocabulary = {key: value for key, value in v.vocabulary_.items() if key.startswith("room")} + # Dictionary of the form --> 'room=INE GE06': 176, 'room=KARR EKG': 183, ...} + + # Maps the feature (i.e. column) names to their respective column index (useful for array slicing later) + room_cols = list(room_vocabulary.values()) # list of indices in v containing room information (for slicing) + + # --> Process Employees + employee_vocabulary = {key: value for key, value in v.vocabulary_.items() if key.startswith("employee")} + # Dictionary of the form --> 'employee=DHGE035': 126, 'employee=0075470': 178, ...} + + # Maps the feature (i.e. column) names to their respective column index (useful for array slicing later) + employee_cols = list(employee_vocabulary.values()) + + # --> Process Devices + device_vocabulary = {key: value for key, value in v.vocabulary_.items() if key.startswith("device")} + # Dictionary of the form --> 'device=ECC': 14, 'device=XY12344': 251, ...} + + # Maps the feature (i.e. column) names to their respective column index (useful for array slicing later) + device_cols = list(device_vocabulary.values()) + + ##################################### + # --> Write EDGE list + ##################################### + edge_list = open(os.path.join(dest_path, 'edge_list.csv'), "w") + edge_list.write(csv_sep.join(['Source', 'Target', 'Weight', 'Type', 'Art\n'])) + nr_patients = len(features) + for ind in range(nr_patients): + if labels.item(ind) >= 1: # only include patients with screening (labels 1,2,3) + for j in range((ind + 1), nr_patients): + if labels.item(j) >= 1: # only include patients with screening (labels 1,2,3) + ##################################### + # --> Process Sources & Targets + ##################################### + # edge goes from older to newer relevant date + source_target_str = str(ind) + csv_sep + str(j) + # Sources and targets are given as integers from 0 to nr_patients - 1 (NOT as patient ids) + if dates[ind] is None or dates[j] is None or dates[ind] > dates[j]: + # indicates that relevant date for patient[j] is older --> switch direction of edge + source_target_str = str(j) + csv_sep + str(ind) + ##################################### + # --> Process shared rooms (used as an edge weight) + ##################################### + weight_rooms = sum(np.logical_and(features[ind, room_cols], features[j, room_cols])) + # logical_and() performs list-wise comparison on multiple lists of the same length and returns + # True if all entries at any position are > 0 and False otherwise + if weight_rooms > 0: + # Only write to edge_list if weight is > 0 - otherwise, this indicates that patients had + # never shared the same room (same applies for all edge weights) + edge_list.write(source_target_str + csv_sep + str(weight_rooms) + csv_sep + "directed" + + csv_sep + "rooms\n") + ##################################### + # --> Process shared employees (used as an edge weight) + ##################################### + weight_employees = sum(np.logical_and(features[ind, employee_cols], features[j, employee_cols])) + if weight_employees > 0: + edge_list.write(source_target_str + csv_sep + str(weight_employees) + csv_sep + 'directed' + + csv_sep + 'employees\n') + ##################################### + # --> Process shared devices (used as an edge weight) + ##################################### + weight_devices = sum( np.logical_and(features[ind, device_cols], features[j, device_cols]) ) + if weight_devices > 0: + edge_list.write(source_target_str + csv_sep + str(weight_devices) + csv_sep + 'directed' + + csv_sep + 'devices\n') + # Close file + edge_list.close() + + ##################################### + # --> Write NODES list + ##################################### + node_list = open(os.path.join(dest_path, 'node_list.csv'), "w") + node_list.write(csv_sep.join(['Id', 'Label', 'Start', 'Category\n'])) + for ind, dt in enumerate(dates): # dates is a vector of either relevant dt.dt() objects or None + infection = "" + if dt is not None: + infection = dt.strftime("%Y-%m-%d") + node_list.write(str(ind) + csv_sep + infection + csv_sep + '0' + csv_sep + str(labels[ind]) + "\n") + + # Close file + node_list.close() + + @staticmethod + def get_contact_patients_for_case(cases, contact_pats): + """Extracts contact patients for specific cases. + + Appends tuples of length 6 (see param contact_pats) directly to contact_pats, which is a list recording all + patient contacts. + + Args: + cases (dict): Dictionary mapping case ids to Case() objects --> {"0003536421" : Case(), + "0003473241" : Case(), ...} + contact_pats (list): List containing tuples of length 6 of either the format: `(source_pat_id, + dest_pat_id, start_overlap_dt, end_overlap_dt, room_name, "kontakt_raum")` in the + case of a contact room, or the format `(source_pat_id, dest_pat_id, + start_overlap_dt, end_overlap_dt, ward_name, "kontakt_org")` in the case of a + contact organization. + """ + for move in cases.moves.values(): + #################################################### + # --> Extract contacts in the same room + #################################################### + if move.bwe_dt is not None and move.room is not None: + overlapping_moves = move.room.get_moves_during(move.bwi_dt, move.bwe_dt) + # get_moves_during() returns a list of all moves overlapping with the .bwi_dt - .bwe_dt interval + for overlap_move in overlapping_moves: + if overlap_move.case is not None and move.case is not None: + if overlap_move.case.fal_ar == "1" and overlap_move.bew_ty in ["1", "2", "3"] and \ + overlap_move.bwe_dt is not None: + start_overlap = max(move.bwi_dt, overlap_move.bwi_dt) + end_overlap = min(move.bwe_dt, overlap_move.bwe_dt) + contact_pats.append( + ( + move.case.patient_id, + overlap_move.case.patient_id, + start_overlap.strftime("%Y-%m-%dT%H:%M"), + end_overlap.strftime("%Y-%m-%dT%H:%M"), + move.room.name, + "kontakt_raum", + )) # append data in the form of a tuple + #################################################### + # --> Extract contacts in the same ward (ORGPF) + #################################################### + if move.bwe_dt is not None and move.ward is not None: + overlapping_moves = move.ward.get_moves_during(move.bwi_dt, move.bwe_dt) + for overlap_move in overlapping_moves: + if overlap_move.case is not None and move.case is not None: + if overlap_move.case.fal_ar == "1" and overlap_move.bew_ty in ["1", "2", "3"] and \ + overlap_move.bwe_dt is not None and \ + (overlap_move.zimmr is None or move.zimmr is None or overlap_move.zimmr != move.zimmr): + start_overlap = max(move.bwi_dt, overlap_move.bwi_dt) + end_overlap = min(move.bwe_dt, overlap_move.bwe_dt) + contact_pats.append( + ( + move.case.patient_id, + overlap_move.case.patient_id, + start_overlap.strftime("%Y-%m-%dT%H:%M"), + end_overlap.strftime("%Y-%m-%dT%H:%M"), + move.ward.name, + "kontakt_org", + )) + + def get_contact_patients(self, patients): + """Extracts all patient contacts. + + Extract all contacts between patients in the same room and same ward which occurred during the last year. + + Args: + patients (dict): Dictionary mapping patient ids to Patient() objects --> {"00001383264" : Patient(), + "00001383310" : Patient(), ...} + + Returns: + list: List containing tuples of length 6 of either the format + + `(source_pat_id, dest_pat_id, start_overlap_dt, end_overlap_dt, room_name, "kontakt_raum")` + + or the format + + `(source_pat_id, dest_pat_id, start_overlap_dt, end_overlap_dt, ward_name, "kontakt_org")` + """ + contact_pats = [] + for pat in patients.values(): + if pat.has_risk(): + for case in pat.cases.values(): + # if case.fal_ar == "1": # only stationary cases + if case.moves_end is not None and case.moves_end > \ + datetime.datetime.now() - relativedelta.relativedelta(years=1): + self.get_contact_patients_for_case(case, contact_pats) + return contact_pats + diff --git a/spitalhygiene/vre/src/main/python/vre/model/Appointment.py b/spitalhygiene/vre/src/main/python/vre/model/Appointment.py new file mode 100644 index 0000000..7ddad92 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Appointment.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +"""This script contains the ``Appointment`` class used in the VRE model. + +----- +""" + +from datetime import datetime +import logging + + +class Appointment: + """Models an appointment from RAP. + """ + + def __init__(self, termin_id, is_deleted, termin_bezeichnung, termin_art, termin_typ, termin_datum, dauer_in_min): + self.termin_id = termin_id + self.is_deleted = is_deleted + self.termin_bezeichnung = termin_bezeichnung + self.termin_art = termin_art + self.termin_typ = termin_typ + self.termin_datum = datetime.strptime(termin_datum, "%Y-%m-%d %H:%M:%S") + try: + self.dauer_in_min = int(float(dauer_in_min)) + except ValueError as e: + self.dauer_in_min = 0 + + self.devices = [] + self.employees = [] + self.rooms = [] + + def add_device(self, device): + """Adds a device to the self.devices() list of this appointment. + + Args: + device (Device() Object): Device() object to append to this appointment. + """ + self.devices.append(device) + + def add_room(self, room): + """Adds a room to the self.rooms() list of this appointment. + + Args: + room (Room() Object): Room() object to append to this appointment. + """ + self.rooms.append(room) + + def add_employee(self, employee): + """Adds an employee to the self.employees() list of this appointment. + + Args: + employee (Employee() Object): Employee() object to append to this appointment. + """ + self.employees.append(employee) + + @staticmethod + def create_termin_map(lines): + """Loads the appointments from a csv reader instance. + + This function will be called by the ``HDFS_data_loader.patient_data()`` function (lines is an iterator object). + The underlying table in the Atelier_DataScience is named ``V_DH_DIM_TERMIN_CUR`` and structured as follows: + + ======== ========== ================= ================ =============== ======================== ========== + TERMINID IS_DELETED TERMINBEZEICHNUNG TERMINART TERMINTYP TERMINDATUM DAUERINMIN + ======== ========== ================= ================ =============== ======================== ========== + 957219 0 K90 HINF K90 HINF Patiententermin 2005-02-04 00:00:00.0000 90.00000 + 957224 0 Konsultation 15' Konsultation 15' Patiententermin 2005-02-03 00:00:00.0000 15.00000 + ======== ========== ================= ================ =============== ======================== ========== + + Args: + lines (iterator() object): csv iterator from which data will be read + + Returns: + dict: + Dictionary mapping appointment ids to Appointment() objects + + --> ``{ '36830543' : Appointment(), ... }`` + """ + logging.debug("create_termin_map") + nr_malformed = 0 + nr_ok = 0 + appointments = dict() + for line in lines: + if len(line) != 7: + nr_malformed += 1 + continue + else: + appointment = Appointment(*line) + appointments[appointment.termin_id] = appointment + nr_ok += 1 + logging.info(f"{nr_ok} ok, {nr_malformed} appointments malformed") + return appointments + + @staticmethod + def add_appointment_to_case(lines, cases, appointments): + """Adds Appointment() objects to the SAP cases based on lines read from a csv file. + + This function will be called by the ``HDFS_data_loader.patient_data()`` function (lines is an iterator object). + The underlying table in the Atelier_DataScience is called ``V_DH_FACT_TERMINPATIENT`` and structured as follows: + + ======== =========== ========== + TERMINID PATIENTID FALLID + ======== =========== ========== + 35672314 00008210020 0005660334 + 17255155 00002042800 0004017880 + ======== =========== ========== + + Args: + lines (iterator() object): csv iterator from which data will be read + cases (dict): Dictionary mapping case ids to Case() objects + + --> ``{ "0003536421" : Case(), "0003473241" : Case(), ...}`` + + appointments (dict): Dictionary mapping appointment ids to Appointment() objects + + --> ``{ '36830543' : Appointment(), ... }`` + """ + logging.debug("add_appointment_to_case") + nr_appointment_not_found = 0 + nr_case_not_found = 0 + nr_ok = 0 + for line in lines: + appointment_id = line[0] + case_id = line[2] + if appointments.get(appointment_id, None) is None: + nr_appointment_not_found += 1 + continue + if cases.get(case_id, None) is None: + nr_case_not_found += 1 + continue + cases[case_id].add_appointment(appointments[appointment_id]) + nr_ok += 1 + logging.info(f"{nr_ok} ok, {nr_case_not_found} cases not found, " + f"{nr_appointment_not_found} appointments not found") diff --git a/spitalhygiene/vre/src/main/python/vre/model/Bed.py b/spitalhygiene/vre/src/main/python/vre/model/Bed.py new file mode 100644 index 0000000..bfd36fa --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Bed.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +"""This script contains the ``Bed`` class used in the VRE model. + +----- +""" + +from datetime import datetime + +class Bed: + """Models a Bed. + """ + def __init__(self, name): + self.name = name + self.moves = [] + + def add_move(self, move): + """Adds a move to the self.moves() list of this bed. + + Args: + move (Move() Object): Move() object to append. + """ + self.moves.append(move) diff --git a/spitalhygiene/vre/src/main/python/vre/model/Care.py b/spitalhygiene/vre/src/main/python/vre/model/Care.py new file mode 100644 index 0000000..4d62a71 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Care.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +"""This script contains the ``Care`` class used in the VRE model. + +----- +""" + +import logging +from datetime import datetime +from Employee import Employee + +class Care: + """Models an entry in TACS. + """ + + def __init__( + self, + patient_patientid, + patient_typ, + patient_status, + fall_nummer, + fall_typ, + fall_status, + datum_betreuung, + dauer_betreuung_in_min, + mitarbeiter_personalnummer, + mitarbeiter_anstellungsnummer, + mitarbeiter_login, + batch_run_id, + ): + self.patient_id = patient_patientid + self.case_id = fall_nummer + try: + self.dt = datetime.strptime(datum_betreuung, "%Y-%m-%d %H:%M:%S") + except ValueError as e: + self.dt = datetime.strptime(datum_betreuung, "%Y-%m-%d %H:%M:%S") + self.duration_in_minutes = int(dauer_betreuung_in_min) + self.employee_nr = mitarbeiter_personalnummer + + self.employee = None + + def add_employee(self, employee): + """Assigns an employee to the ``self.employee`` attribute. + + Note: + Only one employee can be assigned to Care() objects! + + Args: + employee (Employee() Object): Employee() object to assign. + """ + self.employee = employee + + @staticmethod + def add_care_to_case(lines, cases, employees): + """Adds the entries from TACS as instances of Care() objects to the respective Case(). + + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). + The underlying table in the Atelier_DataScience is called ``TACS_DATEN`` and structured as follows: + + ================= ================ ============== =========== ============= =========== ===================== ====================== ========================== ============================= ================= ============ + patient_patientid patient_typ patient_status fall_nummer fall_typ fall_status datum_betreuung dauer_betreuung_in_min mitarbeiter_personalnummer mitarbeiter_anstellungsnummer mitarbeiter_login BATCH_RUN_ID + ================= ================ ============== =========== ============= =========== ===================== ====================== ========================== ============================= ================= ============ + 00013768220 Standard Patient aktiv 0006422111 Standard Fall aktiv 2018-03-11 00:00:00.0 3 0301119 00026556 I0301119 870 + 00000552828 Standard Patient aktiv 0006454306 Standard Fall aktiv 2018-04-10 00:00:00.0 20 0025908 00014648 I0025908 870 + ================= ================ ============== =========== ============= =========== ===================== ====================== ========================== ============================= ================= ============ + + Args: + lines (iterator() object): csv iterator from which data will be read + cases (dict): Dictionary mapping case ids to Case() objects + + --> ``{"0003536421" : Case(), "0003473241" : Case(), ...}`` + employees (dict): Dictionary mapping employee_ids to Employee() objects + + --> ``{'0032719' : Employee(), ... }`` + """ + logging.debug("add_care_to_case") + nr_case_not_found = 0 + nr_employee_created = 0 + nr_employee_found = 0 + for line in lines: + care = Care(*line) + + # discard if we don't have the case + case = cases.get(care.case_id, None) + if case is None: + nr_case_not_found += 1 + continue + case.add_care(care) + + # create employee if not already existing + employee = employees.get(care.employee_nr, None) + if employee is None: + employee = Employee(care.employee_nr) + nr_employee_created += 1 + else: + nr_employee_found += 1 + employees[employee.mitarbeiter_id] = employee + + care.add_employee(employee) + logging.info(f"{nr_case_not_found} cases not found, " + f"{nr_employee_created} employees created, {nr_employee_found} employees found") diff --git a/spitalhygiene/vre/src/main/python/vre/model/Case.py b/spitalhygiene/vre/src/main/python/vre/model/Case.py new file mode 100644 index 0000000..206bf56 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Case.py @@ -0,0 +1,212 @@ +from datetime import datetime +import logging + + +class Case: + def __init__( + self, + patient_id, + case_id, + case_typ, + case_status, + fal_ar, + beg_dt, + end_dt, + patient_typ, + patient_status, + ): + self.patient_id = patient_id + self.case_id = case_id + self.case_typ = case_typ + self.case_status = case_status + self.fal_ar = fal_ar + self.beg_dt = None + if beg_dt and beg_dt != "NULL": + self.beg_dt = datetime.strptime(beg_dt, "%Y-%m-%d") + self.end_dt = None + if end_dt and end_dt != "NULL": + self.end_dt = datetime.strptime(end_dt, "%Y-%m-%d") + self.patient_typ = patient_typ + self.patient_status = patient_status + self.appointments = [] + self.cares = [] + self.surgeries = [] + self.moves = dict() + self.moves_start = None + self.moves_end = None + self.referrers = set() + self.patient = None + self.medications = [] + self.icd_codes = [] + + def is_stationary(self): + """ + Stationary cases have "1" FALAR in SAP table NFAL + :return: + """ + return self.fal_ar == "1" + + def open_before_or_at_date(self, dt): + """ + Did the moves of this case start before or at dt? + :param dt: datetime.date + :return: + """ + if self.moves_start is None: + return False + else: + return self.moves_start.date() <= dt + + def closed_after_or_at_date(self, dt): + """ + Did the moves of this case end after or at dt? + :param dt: datetime.date + :return: + """ + if self.moves_end is None: + return False + else: + return self.moves_end.date() >= dt + + def add_referrer(self, partner): + """ + Add a business partner to the set of referrers for this case. + Referrers can be found in SAP IS-H table NFPZ, or in NBEW. + :param partner: + :return: + """ + self.referrers.add(partner) + + def add_surgery(self, surgery): + """ + Add a surgery to this case. + :param surgery: + :return: + """ + self.surgeries.append(surgery) + + def add_medication(self, medication): + """ + Add a medication to this case. + :param medication: Medication + :return: + """ + self.medications.append(medication) + + def add_appointment(self, appointment): + self.appointments.append(appointment) + + def add_care(self, care): + self.cares.append(care) + + def add_move(self, move): + self.moves[move.lfd_nr] = move + if move.bwe_dt is not None and ( (self.moves_end is None) or (move.bwe_dt > self.moves_end) ): + self.moves_end = move.bwe_dt + if move.bwi_dt is not None and ( (self.moves_start is None) or (move.bwi_dt < self.moves_start) ): + self.moves_start = move.bwi_dt + + def correct_move_enddt(self): + """ + This is required because we can't trust the end date of the movement data. + Call this only after all the movement data is loaded! + Helper function to fix missing movement end dates and times of this Fall: + Set the end date/time as the start date/time of the next move. Only use end date/time if there + is no next move. + """ + sorted_keys = sorted(self.moves) + for i, lfd_nr in enumerate(sorted_keys): + if i < (len(sorted_keys) - 1): + self.moves[lfd_nr].bwe_dt = self.moves[sorted_keys[i + 1]].bwi_dt + + def add_patient(self, p): + self.patient = p + + def add_icd_code(self, icd_code): + """ + Adds an ICD() code object to the self.icd_codes list. + + :param icd_code: an ICD() object + """ + self.icd_codes.append(icd_code) + + def get_length_of_stay(self): + """ + The length of stay is the duration between the start time of the first movement of the case and + the end time of the last movement. + :return: + """ + if self.fal_ar != "1": + return None + if self.moves_end is None: + return datetime.now() - self.moves_start + else: + return self.moves_end - self.moves_start + + def get_length_of_stay_until(self, dt): + """ + Timedelta between moves_start and moves_end or dt, whichever comes first. + :param dt: datetime.datetime + :return: datetime.timedelta + """ + if self.moves_end is None or (self.moves_end > dt): + return dt - self.moves_start + else: + return self.moves_end - self.moves_start + + def get_moves_before_dt(self, dt): + """ + The list of moves that start before a given datetime. + :param dt: datetime.datetime + :return: List of moves + """ + moves = [] + for move in self.moves.values(): + if move.bwi_dt < dt: + moves.append(move) + return moves + + @staticmethod + def create_case_map(lines, patienten, load_limit=None): + """ + Read the case csv and create Case objects from the rows. Populate a dict with cases (case_id -> case) that are not 'storniert'. Note that the function goes both ways, i.e. it adds + Cases to Patients and vice versa. This function will be called by the HDFS_data_loader.patient_data() function. The lines argument corresponds to a csv.reader() instance + which supports the iterator protocol (see documentation for csv.reader in module "csv"). Each iteration over lines will contain a list of the following values + (EXCLUDING the header line): + >> TABLE NAME: V_LA_ISH_NFAL_NORM + [ "PATIENTID", "CASEID", "CASETYP", "CASESTATUS", "FALAR", "BEGDT", "ENDDT", "PATIENTTYP", "PATIENTSTATUS"] --> header line + [ "00008769940", "0003536421", "Standard Fall", "storniert", "2", "", "", "Standard Patient", "aktiv"] + [ "00008770123", "0003473241", "Standard Fall", "aktiv", "2", "", "2010-12-31", "Standard Patient", "aktiv"] + + :param patienten: Dictionary mapping patient ids to Patient() objects --> {"00001383264" : Patient(), "00001383310" : Patient(), ...} + + :return: Dictionary mapping case ids to Case() objects --> {"0003536421" : Case(), "0003473241" : Case(), ...} + """ + logging.debug("create_case_map") + import_count = 0 + nr_not_found = 0 + nr_ok = 0 + nr_not_stationary = 0 + cases = dict() + for line in lines: + fall = Case(*line) + # if fall.fal_ar != '1': # exclude non-stationary cases (for stationary cases: Case().falar == '1' !) + # nr_not_stationary += 1 # NOW INCLUDED DIRECTLY IN THE SQL QUERY + # continue + if fall.case_status == "aktiv": # exclude entries where "CASESTATUS" is "storniert" + cases[fall.case_id] = fall + if patienten.get(fall.patient_id, None) is not None: + patienten[fall.patient_id].add_case(fall) + fall.add_patient(patienten[fall.patient_id]) + import_count += 1 + if load_limit is not None and import_count > load_limit: + break + else: + nr_not_found += 1 + continue + else: + continue + nr_ok += 1 + + logging.info(f"{nr_ok} ok, {nr_not_found} patients not found, {nr_not_stationary} cases not stationary") + return cases diff --git a/spitalhygiene/vre/src/main/python/vre/model/Chop.py b/spitalhygiene/vre/src/main/python/vre/model/Chop.py new file mode 100644 index 0000000..1ab4551 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Chop.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +"""This script contains the ``CHOP`` class used in the VRE model. + +----- +""" + +import logging + +class Chop: + """Models a ``CHOP`` code. + """ + def __init__( + self, + chop_code, + chop_verwendungsjahr, + chop, + chop_code_level1, + chop_level1, + chop_code_level2, + chop_level2, + chop_code_level3, + chop_level3, + chop_code_level4, + chop_level4, + chop_code_level5, + chop_level5, + chop_code_level6, + chop_level6, + chop_status, + chop_sap_katalog_id, + ): + self.chop_code = chop_code + self.chop_verwendungsjahr = chop_verwendungsjahr + self.chop = chop + self.chop_code_level1 = chop_code_level1 + self.chop_level1 = chop_level1 + self.chop_code_level2 = chop_code_level2 + self.chop_level2 = chop_level2 + self.chop_code_level3 = chop_code_level3 + self.chop_level3 = chop_level3 + self.chop_code_level4 = chop_code_level4 + self.chop_level4 = chop_level4 + self.chop_code_level5 = chop_code_level5 + self.chop_level5 = chop_level5 + self.chop_code_level6 = chop_code_level6 + self.chop_level6 = chop_level6 + self.chop_status = chop_status + self.chop_sap_katalog_id = chop_sap_katalog_id + + self.cases = [] # Keep track of all cases that have this chop code + + def add_case(self, case): + """Adds a case to the self.cases list. + + Args: + case (Case() Object): Case() object to append. + """ + self.cases.append(case) + + def get_detailed_chop(self): + """Returns description text from the highest available level for this CHOP. + + Returns: + str: Highest available level for this CHOP code. + """ + for field in [ + self.chop_level6, + self.chop_level5, + self.chop_level4, + self.chop_level3, + self.chop_level2, + self.chop_level1, + ]: + if field is not None and len(field) > 0 and field != "NULL": + return field + + def get_lowest_level_code(self): + """Returns the lowest level of the CHOP code. + + The lowest level is is level 2 and represented as the *first number* of the Code: + + ``Z89.07.24`` :math:`\\longrightarrow` ``Z89`` + + Returns: + str: Lowest available level for this CHOP code. + """ + return self.chop_code_level2 + + @staticmethod + def create_chop_dict(lines): + """Creates and returns a dict of all chop codes. + + The key of a chop code is ``_`` - different catalogs exist for different years. This function + will be called by the ``HDFS_data_loader.patient_data()`` function (lines is an iterator object). The underlying + table in Atelier_DataScience is called ``V_DH_REF_CHOP`` and structured as follows: + + ========= =================== ================================= ============== =========================================================== ============== ======================== ============== ================================= ============== ================================= ============== ==================================== ============== ================================== ========== ================ + CHOPCODE CHOPVERWENDUNGSJAHR CHOP CHOPCODELEVEL1 CHOPLEVEL1 CHOPCODELEVEL2 CHOPLEVEL2 CHOPCODELEVEL3 CHOPLEVEL3 CHOPCODELEVEL4 CHOPLEVEL4 CHOPCODELEVEL5 CHOPLEVEL5 CHOPCODELEVEL6 CHOPLEVEL6 CHOPSTATUS CHOPSAPKATALOGID + ========= =================== ================================= ============== =========================================================== ============== ======================== ============== ================================= ============== ================================= ============== ==================================== ============== ================================== ========== ================ + Z62.99.30 2016 Entnahme von Hoden- oder Neben... C11 Operationen an den männlichen Geschlechtsorganen (60–64) Z62 Operationen an den Hoden Z62.9 Sonstige Operationen an den Hoden Z62.99 Sonstige Operationen an den Hoden Z62.99.0 Detail der Subkategorie 62.99 Z62.99.30 Entnahme von Hoden- oder Nebenh... 0 16 + Z62.99.99 2011 Sonst. Operationen an den Ho... C9 Operationen am Verdauungstrakt (42–54) Z62 Operationen an den Hoden Z62.9 Sonstige Operationen an den Hoden Z62.99 Sonstige Operationen an den Hoden Z62.99.99 Sonstige Operationen an den Hoden... 1 10 + ========= =================== ================================= ============== =========================================================== ============== ======================== ============== ================================= ============== ================================= ============== ==================================== ============== ================================== ========== ================ + + Args: + lines (iterator() object): csv iterator from which data will be read + + Returns: + dict: + Dictionary mapping the chopcode_katalogid entries to Chop() objects + + :math:`\\longrightarrow` ``{ 'Z39.61.10_11': Chop(), ... }`` + """ + logging.debug("create_chop_dict") + chops = dict() + for line in lines: + chop = Chop(*line) + chops[chop.chop_code + "_" + chop.chop_sap_katalog_id] = chop + # based on the schema in the docstring, this would yield "Z62.99.30_16" or "Z62.99.99_10" + logging.info(f"{len(chops)} chops created") + return chops + + def chop_code_stats(self, chops): + """Print frequency of different chop codes to console. + + Args: + chops (dict): Dictionary mapping the chopcode_katalogid entries to Chop() objects + + :math:`\\longrightarrow` ``{ 'Z39.61.10_11': Chop(), ... }`` + """ + for chop in chops.values(): + print( + str(len(chop.cases)) + + ": " + + chop.get_detailed_chop() + + " " + + chop.chop_code + ) diff --git a/spitalhygiene/vre/src/main/python/vre/model/Device.py b/spitalhygiene/vre/src/main/python/vre/model/Device.py new file mode 100644 index 0000000..6b42c23 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Device.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +"""This script contains the ``Device`` class used in the VRE model. + +----- +""" + +import logging + +class Device: + """ + Models a device from RAP. + """ + def __init__(self, geraet_id, geraet_name): + self.geraet_id = geraet_id + self.geraet_name = geraet_name + + @staticmethod + def create_device_map(lines): + """Loads all devices into a dictionary based on lines in the csv file. + + This function will be called by the ``HDFS_data_loader.patient_data()`` function (lines is an iterator object). + The underlying table in the Atelier_DataScience is called ``V_DH_DIM_GERAET_CUR`` and structured as follows: + + ======== ========== + GERAETID GERAETNAME + ======== ========== + 82250 ANS-Fix + 162101 Waage + ======== ========== + + Args: + lines (iterator() object): csv iterator from which data will be read + + Returns: + dict: + Dictionary mapping device ids to Device() objects + + :math:`\\longrightarrow` ``{'64174' : Device(), ... }`` + """ + logging.debug("create_device_map") + devices = dict() + for line in lines: + device = Device(*line) + devices[device.geraet_id] = device + logging.info(f"{len(devices)} devices created") + return devices + + @staticmethod + def add_device_to_appointment(lines, appointments, devices): + """Adds the device in ``devices`` to the respective appointment in ``appointments``. + + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). + The underlying table in the Atelier_DataScience is called V_DH_FACT_TERMINGERAET and structured as follows: + + ======== ======== ======================== ======================== ========== + TERMINID GERAETID TERMINSTART_TS TERMINENDE_TS DAUERINMIN + ======== ======== ======================== ======================== ========== + 26266554 123223 2015-04-03 13:45:00.0000 2015-04-03 15:45:00.0000 120.000000 + 23678836 38006 2014-07-31 10:00:00.0000 2014-07-31 10:30:00.0000 30.000000 + ======== ======== ======================== ======================== ========== + + Args: + lines (iterator() object): csv iterator from which data will be read + appointments (dict): Dictionary mapping appointment ids to Appointment() objects + + :math:`\\longrightarrow` ``{ '36830543' : Appointment(), ... }`` + devices (dict): Dictionary mapping device_ids to Device() objects + + :math:`\\longrightarrow` ``{'64174' : Device(), ... }`` + """ + logging.debug("add_device_to_appointment") + nr_appointment_not_found = 0 + nr_device_not_found = 0 + nr_ok = 0 + for line in lines: + appointment_id = line[0] + if appointments.get(appointment_id, None) is None: + nr_appointment_not_found += 1 + continue + device_id = line[1] + if devices.get(device_id, None) is None: + nr_device_not_found += 1 + continue + appointments[appointment_id].add_device(devices[device_id]) + nr_ok += 1 + logging.info(f"{nr_ok} ok, {nr_appointment_not_found} appointments not found, " + f"{nr_device_not_found} devices not found") diff --git a/spitalhygiene/vre/src/main/python/vre/model/Employee.py b/spitalhygiene/vre/src/main/python/vre/model/Employee.py new file mode 100644 index 0000000..9594d55 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Employee.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +"""This script contains the ``Employee`` class used in the VRE model. + +----- +""" + +import logging + + +class Employee: + """Models an employee (doctor, nurse, etc) from RAP. + """ + + def __init__(self, mitarbeiter_id): + self.mitarbeiter_id = mitarbeiter_id + + @staticmethod + def create_employee_map(lines): + """Reads the appointment to employee file and creates an Employee(). + + + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). + The underlying table in the Atelier_DataScience is called V_DH_FACT_TERMINMITARBEITER and structured as follows: + + ======== ============= ======================= ======================== ========== + TERMINID MITARBEITERID TERMINSTART_TS TERMINENDE_TS DAUERINMIN + ======== ============= ======================= ======================== ========== + 521664 0063239 2003-11-11 07:30:00.000 2003-11-11 08:00:00.0000 30.000000 + 521754 X33671 2003-11-10 09:15:00.000 2003-11-10 09:45:00.0000 30.000000 + ======== ============= ======================= ======================== ========== + + Args: + lines (iterator() object): csv iterator from which data will be read + + Returns: + dict: + Dictionary mapping employee_ids to Employee() objects + + :math:`\\longrightarrow` ``{'0032719' : Employee(), ... }`` + """ + logging.debug("create_employee_map") + employee_dict = dict() + for line in lines: + employee = line[1] + employee_dict[employee] = Employee(employee) + logging.info(f"{len(employee_dict)} employees created") + return employee_dict + + @staticmethod + def add_employee_to_appointment(lines, appointments, employees): + """Adds Employee() in employees to an Appointment(). + + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). + The underlying table is V_DH_FACT_TERMINMITARBEITER, and is identical to the one defined in the + create_employee_map() method above. + + Args: + lines (iterator() object): csv iterator from which data will be read + appointments (dict): Dictionary mapping appointment ids to Appointment() objects + + :math:`\\longrightarrow` ``{ '36830543' : Appointment(), ... }`` + employees (dict): Dictionary mapping employee_ids to Employee() objects + + :math:`\\longrightarrow` ``{'0032719' : Employee(), ... }`` + """ + logging.debug("add_employee_to_appointment") + nr_employee_not_found = 0 + nr_appointment_not_found = 0 + nr_ok = 0 + for line in lines: + employee_id = line[1] + appointment_id = line[0] + if appointments.get(appointment_id, None) is None: + nr_appointment_not_found += 1 + continue + if employees.get(employee_id, None) is None: + nr_employee_not_found += 1 + continue + appointments[appointment_id].add_employee(employees[employee_id]) + nr_ok += 1 + logging.info(f"{nr_ok} ok, {nr_appointment_not_found} appointments not found, " + f"{nr_employee_not_found} employees not found") diff --git a/spitalhygiene/vre/src/main/python/vre/model/ICD.py b/spitalhygiene/vre/src/main/python/vre/model/ICD.py new file mode 100644 index 0000000..24f3718 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/ICD.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +"""This script contains the ``ICD`` class used in the VRE model. + +----- +""" + +import datetime +import logging + +class ICD: + """Models an ``ICD`` object. + """ + def __init__(self, falnr, dkey1, dkat, diadt, drg_cat): + self.fall_nummer = falnr + self.icd_code = dkey1 + self.catalogue_year = int(dkat) # catalogue year of the ICD code (2-digit integer) - may or may not be relevant + self.diagnosis_dt = datetime.datetime.strptime(diadt, '%Y-%m-%d') # date when ICD code was set + self.drg_category = drg_cat # single character specifying the DRG category + + self.cases = [] # List containing all cases with this particular ICD code (currently not used) + + def add_case(self, case): + """Adds a case to this ICD's ``self.cases`` attribute. + + Args: + case (Case() Object): Case() object to add. + """ + self.cases.append(case) + + @staticmethod + def create_icd_dict(lines): + """Creates and returns a dictionary of all icd codes. + + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). + The underlying table in the Atelier_DataScience is LA_ISH_NDIA and structured as follows: + + ============ ======= ======= ============ ============== + 'FALNR' 'DKEY1' 'DKAT1' 'DIADT' 'DRG_CATEGORY' + ============ ======= ======= ============ ============== + '0001832682' 'A41.9' '17' '2018-02-27' 'P' + '0001832682' 'R65.1' '17' '2018-02-27' 'S' + ============ ======= ======= ============ ============== + + Args: + lines (iterator() object): csv iterator from which data will be read + + Returns: + dict: + Dictionary mapping the icd_code entries to ICD() objects + + :math:`\\longrightarrow` ``{ 'Z12.8': ICD(), ... }`` + """ + logging.debug("Creating ICD dictionary") + icd_dict = {} + + for each_line in lines: + this_icd = ICD(*each_line) + icd_dict[this_icd.icd_code] = this_icd + # Write success to log and return dictionary + logging.info(f'Successfully created {len(icd_dict.values())} ICD entries') + return icd_dict + + @staticmethod + def add_icd_to_case(lines, cases): + """Adds ICD codes to cases based on the ICD.fall_nummer attribute. + + For details on how each line in the lines iterator object is formatted, please refer to the function + create_icd_dict() above. + + Args: + lines (iterator() object): csv iterator from which data will be read + cases (dict): Dictionary mapping case ids to Case() objects + :math:`\\longrightarrow` ``{"0003536421" : Case(), "0003473241" : Case(), ...}`` + """ + logging.debug("Adding ICD codes to cases") + cases_not_found = 0 + cases_found = 0 + unique_case_ids = [] # list of unique case ids processed + + for each_line in lines: + this_icd = ICD(*each_line) + if cases.get(this_icd.fall_nummer) is not None: # default value for .get() is None + cases.get(this_icd.fall_nummer).add_icd_code(this_icd) + cases_found += 1 + unique_case_ids.append(cases.get(this_icd.fall_nummer)) + else: + cases_not_found += 1 + logging.info(f'Added {cases_found} ICD codes to {len(set(unique_case_ids))} relevant cases,' + f'{cases_not_found} cases not found') + + + + + + + + + + diff --git a/spitalhygiene/vre/src/main/python/vre/model/Medication.py b/spitalhygiene/vre/src/main/python/vre/model/Medication.py new file mode 100644 index 0000000..cc06918 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Medication.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +"""This script contains the ``Medication`` class used in the VRE model. + +----- +""" + +from datetime import datetime +import logging + + +class Medication: + """Models a ``Medication`` object. + """ + def __init__( + self, + patient_id, + case_id, + drug_text, + drug_atc, + drug_quantity, + drug_unit, + drug_dispform, + drug_submission, + ): + self.patient_id = patient_id + self.case_id = case_id + self.drug_text = drug_text + self.drug_atc = drug_atc + self.drug_quantity = drug_quantity + self.drug_unit = drug_unit + self.drug_dispform = drug_dispform + self.drug_submission = datetime.strptime(drug_submission, "%Y-%m-%d %H:%M:%S") + + def is_antibiotic(self): + """Returns the antibiotic status of a Medication. + + Antibiotics are identified via the prefix ``J01`` in the ``self.drug_atc`` attribute. + + Returns: + bool: Whether or not the medication ``self.drug_atc`` attribute starts with ``J01``. + """ + return self.drug_atc.startswith("J01") + + @staticmethod + def create_drug_map(lines): + """Creates a dictionary of ATC codes to human readable drug names. + + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). + The underlying table in the Atelier_DataScience is called V_LA_IPD_DRUG_NORM and structured as follows: + + =========== ========== ======================================== ======== =============== ========= =============== =========================== + PATIENTID CASEID DRUG_TEXT DRUG_ATC DRUG_QUANTITY DRUG_UNIT DRUG_DISPFORM DRUG_SUBMISSION + =========== ========== ======================================== ======== =============== ========= =============== =========================== + 00001711342 0006437617 Torem Tabl 10 mg (Torasemid) C03CA04 2.0000000000000 Stk p.o. 2018-03-24 09:52:28.0000000 + 00001711342 0006437617 Ecofenac Sandoz Lipogel 1 % (Diclofenac) M02AA15 1.0000000000000 Dos lokal / topisch 2018-03-24 09:52:28.0000000 + =========== ========== ======================================== ======== =============== ========= =============== =========================== + + Args: + lines (iterator() object): csv iterator from which data will be read + + Returns: + dict: + Dictionary mapping drug codes to their respective text description + + :math:`\\longrightarrow` ``{'B02BA01' : 'NaCl Braun Inf Lös 0.9 % 500 ml (Natriumchlorid)', ... }`` + """ + logging.debug("create_drug_map") + drugs = dict() + for line in lines: + if drugs.get(line[3], None) is None: + drugs[line[3]] = line[2] + logging.info(f"{len(drugs)} drugs created") + return drugs + + @staticmethod + def add_medication_to_case(lines, cases): + """Adds Medication() objects to Case() objects. + + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object), and + will add all Medication() objects to their corresponding Case() object in cases. The underlying table is + identical to the one used in the ``create_drug_map`` function. + + Args: + lines (iterator() object): csv iterator from which data will be read + cases (dict): Dictionary mapping case ids to Case() + + :math:`\\longrightarrow` ``{'0005976205' : Case(), ... }`` + """ + logging.debug("add_medication_to_case") + nr_cases_not_found = 0 + nr_malformed = 0 + nr_ok = 0 + for line in lines: + if len(line) != 8: + nr_malformed += 1 + continue + medication = Medication(*line) + if cases.get(medication.case_id, None) is None: + nr_cases_not_found += 1 + continue + cases.get(medication.case_id).add_medication(medication) + nr_ok += 1 + logging.info(f"{nr_ok} ok, {nr_cases_not_found} cases not found, {nr_malformed} malformed") diff --git a/spitalhygiene/vre/src/main/python/vre/model/Move.py b/spitalhygiene/vre/src/main/python/vre/model/Move.py new file mode 100644 index 0000000..9bbfd8a --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Move.py @@ -0,0 +1,156 @@ +from Room import Room +from Ward import Ward +from datetime import datetime +import logging + + +class Move: + def __init__( + self, + fal_nr, + lfd_nr, + bew_ty, + bw_art, + bwi_dt, + bwi_zt, + statu, + bwe_dt, + bwe_zt, + lfd_ref, + kz_txt, + org_fa, + org_pf, + org_au, + zimmr, + bett, + storn, + ext_kh + ): + self.fal_nr = fal_nr + self.lfd_nr = int(lfd_nr) + self.bew_ty = bew_ty + self.bw_art = bw_art + self.bwi_dt = datetime.strptime( + bwi_dt + " " + bwi_zt, "%Y-%m-%d %H:%M:%S" + ) + self.statu = statu + self.bwe_dt = None + try: + if bwe_zt == '24:00:00.000000000': bwe_zt = "23:59:59.000000000" + self.bwe_dt = datetime.strptime( + bwe_dt + " " + bwe_zt, "%Y-%m-%d %H:%M:%S.000000000" + ) + except ValueError: + pass + self.ldf_ref = lfd_ref + self.kz_txt = kz_txt + self.org_fa = org_fa + self.org_pf = org_pf + self.org_au = org_au + self.zimmr = zimmr + self.bett = bett + self.storn = storn + self.ext_kh = ext_kh + self.room = None + self.ward = None + self.case = None + + def add_room(self, r): + self.room = r + + def add_ward(self, ward): + self.ward = ward + + def add_case(self, c): + self.case = c + + def get_duration(self): + end_dt = self.bwe_dt if self.bwe_dt is not None else datetime.now() + return end_dt - self.bwi_dt + + def create_bwart_map(lines): + bwart = dict() + for line in lines: + bwart[line[0]] = line[1] + return bwart + + def add_move_to_case(lines, faelle, rooms, wards, partners, load_limit=None): + """ + Reads the moves csv and performs the following: + --> creates a Move() object from the read-in line data + --> Adds the created Move() to the corresponding Case() + --> Extracts the Room() from each Move() and adds them "to each other" + --> Extracts the Ward() from each Move() and adds them "to each other" + --> Adds referring hospital to the Case() and vice versa + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). The table from which entries are read is structured as follows: + >> TABLE NAME: LA_ISH_NBEW + ["FALNR", "LFDNR", "BEWTY", "BWART", "BWIDT", "BWIZT", "STATU", "BWEDT", "BWEZT", "LFDREF", "KZTXT", "ORGFA", "ORGPF", "ORGAU", "ZIMMR", "BETT", "STORN", "EXTKH"] + ["0004496041", "3", "4", "SB", "2014-01-17", "16:15:00", "30", "2014-01-17", "16:15:00.000", "0", "", "DIAA", "DIAA", "", "", "", "", ""] + ["0004496042", "1", "4", "BE", "2014-03-10", "08:15:00", "30", "2014-03-10", "08:15:00.000", "0", "ej/ nü CT um 10.30 h", "ENDA", "ENDA", "IICA", "", "", "", ""] + + :param faelle: Dictionary mapping case ids to Case() --> {'0005976205' : Case(), ... } + :param rooms: Dictionary mapping room names to Room() --> {'BH N 125' : Room(), ... } + :param room_ids: Dictionary mapping room IDs to Room() --> {'127803' : Room(), ... } + :param wards: Dictionary mapping ward names to Ward() --> {'N NORD' : Ward(), ... } + :param partners: Dictionary mapping partner ids to Partner() --> {'0010000990' : Partner(), ... } + """ + logging.debug("add_move_to_case") + nr_not_found = 0 + nr_not_formatted = 0 + nr_ok = 0 + nr_wards_updated = 0 + for counter, line in enumerate(lines): + if len(line) != 18: + nr_not_formatted += 1 + else: + move = Move(*line) + # don't consider cancelled movements + # if move.storn == "X": # NOW INCLUDED DIRECTLY IN THE SQL QUERY + # continue + + if faelle.get(move.fal_nr, None) is not None: + faelle[move.fal_nr].add_move(move) + move.add_case(faelle[move.fal_nr]) + else: + nr_not_found += 1 + continue + ward = None + ### Add ward to move and vice versa + if move.org_pf != "" and move.org_pf != "NULL": + if wards.get(move.org_pf, None) is None: + ward = Ward(move.org_pf) + wards[move.org_pf] = ward + wards[move.org_pf].add_move(move) + move.add_ward(wards[move.org_pf]) + ### Add move to room and vice versa (including an update of the Room().ward attribute) + if move.zimmr != "" and move.zimmr != "NULL": + if rooms.get(move.zimmr, None) is None: + # Note that the rooms are primarily identified through their name + # The names in this file come from SAP (without an associated ID), so they will NOT match the names already present in the rooms dictionary ! + this_room = Room(move.zimmr) + rooms[move.zimmr] = this_room + + # In order to extract the Room ID, we need to 'backtrace' the key in room_ids for which room_ids[key] == move.zimmr (this will not be available for most Rooms) + # If a backtrace is not possible, the room object will be initiated without an ID + # correct_room_id = [(value_tuple[0], value_tuple[1].name) for value_tuple in room_ids.items() if value_tuple[1] == move.zimmr] + # if len(correct_room_id) == 1: + # logging.info(f"Found room ID for room name {move.zimmr} !") + # r = Room(move.zimmr, correct_room_id[0][0] ) # correct_room_id at this point will be a list containing one tuple --> [ ('123456', 'BH O 128') ] + # else: + # r = Room(move.zimmr) # Create the Room() object without providing an ID + # rooms[move.zimmr] = r + # Then add the ward to this room, and update moves with rooms and vice versa + rooms[move.zimmr].add_ward(ward) + rooms[move.zimmr].add_move(move) + move.add_room(rooms[move.zimmr]) + nr_wards_updated += 1 + ### Parse patients from external referrers + if move.ext_kh != "": + if move.case is not None and partners.get(move.ext_kh, None) is not None: + partners[move.ext_kh].add_case(move.case) + move.case.add_referrer(partners[move.ext_kh]) + nr_ok += 1 + if load_limit is not None and nr_ok > load_limit: + break + + logging.info(f"{nr_ok} ok, {nr_not_found} cases not found, {nr_not_formatted} malformed, {nr_wards_updated} wards updated") diff --git a/spitalhygiene/vre/src/main/python/vre/model/Partner.py b/spitalhygiene/vre/src/main/python/vre/model/Partner.py new file mode 100644 index 0000000..e629876 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Partner.py @@ -0,0 +1,97 @@ +import logging + +class Partner: + ''' + Defines a Business partner as stored in SAP IS-H table NGPA. + This is used to track referring physicians for the patients' cases. + The cases that were referred by a physician are tracked in referred_cases. + ''' + def __init__(self, gp_art, name1, name2, name3, land, pstlz, ort, ort2, stras, krkhs): + ''' + Constructor. Subset of fields from IS-H table NGPA. + :param gp_art: + :param name1: + :param name2: + :param name3: + :param land: + :param pstlz: + :param ort: + :param ort2: + :param stras: + :param krkhs: + ''' + self.gp_art = gp_art + self.name1 = name1 + self.name2 = name2 + self.name3 = name3 + self.land = land + self.pstlz = pstlz + self.ort = ort + self.ort2 = ort2 + self.stras = stras + self.krkhs = krkhs + + self.referred_cases = dict() + + def add_case(self, case): + ''' + Track the cases that a physician has referred. + :param case: + :return: + ''' + self.referred_cases[case.case_id] = case + + @staticmethod + def create_partner_map(lines): + ''' + Creates and returns a map of business partners from a csv reader. This function will be called by the HDFS_data_loader.patient_data() function. The lines argument corresponds to a + csv.reader() instance which supports the iterator protocol (see documentation for csv.reader in module "csv"). Each iteration over lines will contain a list of the + following values (EXCLUDING the header line): + >> TABLE NAME: LA_ISH_NGPA + [ "GPART", "NAME1", "NAME2", "NAME3", "LAND", "PSTLZ", "ORT", "ORT2", "STRAS", "KRKHS"] --> header line + [ "1001503842", "Fontanellaz", "Christian David", "", "CH", "3010", "Bern", "", "", ""] + [ "1001503845", "Dulcey", "Andrea Sara", "", "CH", "3010", "Bern", "", "", ""] + + Returns: Dictionary mapping partners to Partner() objects --> {'1001503842' : Partner(), '1001503845' : Partner(), ... } + ''' + nr_malformed = 0 + partners = dict() + for line in lines: + if len(line) != 10: + nr_malformed += 1 + continue + partner = Partner(*line) + partners[partner.gp_art] = partner + + logging.info(f"{len(partners)} created, {nr_malformed} partners malformed") + return partners + + @staticmethod + def add_partners_to_cases(lines, cases, partners): + ''' + Reads lines from csv reader originating from SAP IS-H table NFPZ, and updates the referring physician (Partner() object) from partners to the corresponding case, + and also adds the corresponding Case() to Partner() from cases. This function is called by the HDFS_data_loader.patient_data() method. + The table used for updating cases has the following structure: + >> TABLE NAME: LA_ISH_NFPZ + [ "EARZT", "FARZT", "FALNR", "LFDNR", "PERNR", "STORN"] + [ "H", "2", "0006451992" "1", "0010217016" ""] + [ "H", "2", "0006451992", "3", "0010217016", ""] + + Referring physicians (EARZT = 'U') are added only to cases which are NOT cancelled, i.e. STORN != 'X'. + ''' + logging.debug("add_partners_to_cases") + nr_cases_not_found = 0 + nr_partners_not_found = 0 + nr_ok = 0 + for line in lines: + if line[0] == 'U' and line[5] != 'X': # line[5] corresponds to the "STORN" column ('X' --> storniert) + if cases.get(line[2], None) is None: + nr_cases_not_found += 1 + continue + if partners.get(line[4], None) is None: + nr_partners_not_found += 1 + continue + cases[line[2]].add_referrer(partners[line[4]]) + partners[line[4]].add_case(cases[line[2]]) + nr_ok += 1 + logging.info(f"{nr_ok} ok, {nr_cases_not_found} cases not found, {nr_partners_not_found} partners not found") diff --git a/spitalhygiene/vre/src/main/python/vre/model/Patient.py b/spitalhygiene/vre/src/main/python/vre/model/Patient.py new file mode 100644 index 0000000..c43506b --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Patient.py @@ -0,0 +1,498 @@ +import datetime +import logging +from dateutil.relativedelta import relativedelta + + +class Patient: + def __init__(self, patient_id, geschlecht, geburtsdatum, plz, wohnort, kanton, sprache): + self.patient_id = patient_id + self.geschlecht = geschlecht + self.geburtsdatum = None + if geburtsdatum: + self.geburtsdatum = datetime.datetime.strptime(geburtsdatum, "%Y-%m-%d") + self.plz = plz + self.wohnort = wohnort + self.kanton = kanton + self.sprache = sprache + self.cases = dict() + self.risks = dict() # dictionary mapping dt.dt() objects to Risk() objects, indicating at which datetime a particular VRE code has been entered in one of the Insel systems + + def get_relevant_case_and_date(self): + case = self.get_relevant_case() + if case is None: + return (None, None) + relevant_date = self.get_relevant_date() + dt = datetime.datetime.combine( + relevant_date, datetime.datetime.min.time() + ) # need datetime, not date + return (case, dt) + + def add_case(self, case): + self.cases[case.case_id] = case + + def add_risk(self, risk): + """ + Adds a new risk to the Patients risk dict. + :param risk: + :return: + """ + self.risks[risk.entry_date] = risk + + def has_risk(self, code_text_list=[(32, None), (42, None), (142, None)]): + """ + Returns true if there if at least one of the risk_code, risk_text tuples are found in the Patient's risk dict. + risk_text can be none if the text does not matter. False if none of the risks is found. + :param code_text_list: + :return: + """ + for code_text in code_text_list: + if self.risks.get(code_text[0], None) is not None: + if ( + code_text[1] is None + or self.risks[code_text[0]].kz_txt == code_text[1] + ): + return True + return False + + def get_risk_date(self, code_text_list=[(32, None), (42, None), (142, None)]): + """ + Identify risk date for a patient. + + :param code_text_list: currently a list of the form --> [(32, None), (42, None), (142, None)] (default value) + + :return: Date of the first risk from the code_text_list that is found in the Patient's risk dict, in the form of a datetime.datetime() object + If none of the risks is found, None is returned instead. + """ + for code_text in code_text_list: + if self.risks.get(code_text[0], None) is not None: + if code_text[1] is None or self.risks[code_text[0]].kz_txt == code_text[1]: + return self.risks[code_text[0]].er_dt + return None + + def get_age(self): + """ + Calculates age at relevant date, based on birth date. + None if no birth date is in the data or no relevant date + :return: + """ + dt = self.get_relevant_date() + if dt is None: + return None + if self.geburtsdatum is None: + return None + + return ( + dt.year + - self.geburtsdatum.year + - ((dt.month, dt.day) < (self.geburtsdatum.month, self.geburtsdatum.day)) + ) + + def get_relevant_date(self, dt=datetime.datetime.now().date()): + """ + Definition of relevant date: + For patients with risk factor: The date attached to the risk factor. + For patients without risk factor: Date as provided in dt, default date = now. + :return: + """ + risk_dt = self.get_risk_date() + if risk_dt is not None: + dt = risk_dt.date() + return dt + + def get_relevant_case(self, dt=datetime.datetime.now().date(), since=datetime.datetime(2017, 12, 31, 0, 0).date() ): + """ + Definition of relevant case: + The most recent stationary case, which was still open during relevant date or closed after "since" date. + :param dt: Relevant date for patients without risk factor. + :param since: Relevant case must still be open at "since" + :return: A Case() object in case there is a relevant case, or None otherwise + """ + relevant_dt = self.get_relevant_date(dt) + + # candidate relevant case must be + # 1. stationary + # 2. open before "relevant_dt" + # 3. closed after "since" + # from all candidates we take the one with highest closing date + + relevant_case = None + for case in self.cases.values(): + if ( + case.is_stationary() + and case.open_before_or_at_date(relevant_dt) + and case.closed_after_or_at_date(since) + ): + if relevant_case is None or case.closed_after_or_at_date( relevant_case.moves_end.date() ): + # Here we make sure to consider only the LATEST case, by comparing whether case() was closed after the closing date of the relevant case --> update relevant case ! + relevant_case = case + return relevant_case + + def get_referrers(self): + """ + Find referrers for relevant case. + :return: + """ + case = self.get_relevant_case() + return case.referrers + + def get_length_of_relevant_case(self): + """ + For the relevant case, length of stay is defined as the period between the moves_start and moves_end, + or between moves_start and relevant date if the case is still open at relevant date. + For non-stationary cases, length of stay is not defined (None). + :param dt: datetime.datetime + :return: datetime.timedelta, None if no relevant case + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return None + return case.get_length_of_stay_until(dt) + + def get_relevant_rooms(self): + """ + Set of names of rooms visited during relevant case before relevant date. + Rooms visits can come from SAP IS-H or from RAP. + :return: set of room names, None if no relevant case + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return None + rooms = set() + # moves from SAP IS-H + moves = case.get_moves_before_dt(dt) + for move in moves: + if move.room is not None: + rooms.add(move.room.name) + # appointments from RAP + for appointment in case.appointments: + if appointment.termin_datum < dt: + for room in appointment.rooms: + rooms.add(room.name) + return rooms + + def has_icu_stay(self, orgs): + """ + The patient has a stay in ICU during relevant case before relevant date if there is a move to one of the organizational units provided in orgs. + :param orgs: list of ICU organizational unit names + :return: boolean, False if no relevant case + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return False + moves = case.get_moves_before_dt(dt) + for move in moves: + if move.org_pf in orgs: + return True + return False + + def get_nr_cases(self, delta=relativedelta(years=1)): + """ + How many SAP cases (stationary and ambulatory) did the patient have in one year (or delta provided) before the relevant date. + (moves_start is before relevant_date and moves_end after relevant_date - delta + :return: int + """ + nr_cases = 0 + relevant_date = self.get_relevant_date() + dt = datetime.datetime.combine( + relevant_date, datetime.datetime.min.time() + ) # need datetime, not date + for case in self.cases.values(): + if case.moves_start is not None and case.moves_end is not None: + if case.moves_start <= dt and case.moves_end >= (dt - delta): + nr_cases += 1 + return nr_cases + + def get_antibiotic_exposure(self): + """ + Which antibiotics did the patient get, during which days in the relevant case, before the relevant date. + :return: dict from ATC code to set of date, None if no relevant case + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return None + relevant_medications = dict() + for medication in case.medications: + if medication.is_antibiotic() and ( + case.moves_start <= medication.drug_submission <= dt + ): + if relevant_medications.get(medication.drug_atc, None) is None: + date_set = set() + date_set.add(medication.drug_submission.date()) + relevant_medications[medication.drug_atc] = date_set + else: + relevant_medications[medication.drug_atc].add( + medication.drug_submission.date() + ) + + return relevant_medications + + def get_chop_codes(self): + """ + Which chop codes was the patient exposed to during the relevant case, before the relevant date. + :return: Set of Chops, None if no relevant case + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return None + relevant_chops = set() + for surgery in case.surgeries: + if surgery.bgd_op <= dt: + relevant_chops.add(surgery.chop) + return relevant_chops + + def has_surgery(self): + """ + Does the patient have a chop code during relevant case before relevant date? + :return: boolean + """ + relevant_chops = self.get_chop_codes() + return False if relevant_chops is None else len(relevant_chops) > 0 + + def get_dispform(self): + """ + What kind of forms of drug administration happened during the relevant case before the relevant date? + :return: set of names of administration forms, None if no relevant case + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return None + dispforms = set() + for medication in case.medications: + if case.moves_start <= medication.drug_submission <= dt: + dispforms.add(medication.drug_dispform) + return dispforms + + def get_employees(self): + """ + employee IDS and duration of care or appointment of employees that were involved in appointments or care + during the relevant case, before the relevant date. + :return: set of employee_id + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return None + employees = dict() + for appointment in case.appointments: + if appointment.termin_datum < dt: + for employee in appointment.employees: + employees[employee.mitarbeiter_id] = ( + appointment.dauer_in_min + if employees.get(employee.mitarbeiter_id, None) is None + else employees[employee.mitarbeiter_id] + appointment.dauer_in_min + ) + for care in case.cares: + if care.dt < dt: + employees[care.employee.mitarbeiter_id] = ( + care.duration_in_minutes + if employees.get(care.employee.mitarbeiter_id, None) is None + else employees[care.employee.mitarbeiter_id] + care.duration_in_minutes + ) + return employees + + def get_devices(self): + """ + Names of devices that were used during the relevant case, before the relevant date. + :return: set of device names, None if no relevant case + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return None + device_names = set() + for appointment in case.appointments: + if appointment.termin_datum < dt: + for device in appointment.devices: + device_names.add(device.geraet_name) + return device_names + + def get_partner(self): + """ + Returns the referring partner(s) for the relevant case. + :return: set of Partner + """ + (case, dt) = self.get_relevant_case_and_date() + if case is None: + return None + return case.referrers + + def get_label(self): + """ + All patients are categorized with the following labels: + -1: no relevant case + 0: unknown + 1: screening with unconclusive results + 2: screening negative + 3: screening positive + Higher number labels take precedence over lower ones. + :return: int + """ + label = -1 + if self.get_relevant_case() is not None: + label = 0 + if self.has_risk([(42, None)]): + label = 1 + if self.has_risk([(142, None)]): + label = 2 + if self.has_risk([(32, None)]): + label = 3 + return label + + def get_features(self): + """ + Creates the sparse feature vector for this patient. + + :return: Dictionary mapping feature names to the values for this patient --> {"length_of_stay" : 47, "nr_cases" : 3, ... } + If the patient has no relevant case, None is returned instead. + """ + features = dict() + if self.get_relevant_case() is None: + return None + + length_of_stay = self.get_length_of_relevant_case() + features["length_of_stay"] = length_of_stay.days + + features["nr_cases"] = self.get_nr_cases() + + features["age"] = self.get_age() + features["gender"] = self.geschlecht + features["language"] = self.sprache + features["plz"] = self.plz + features["kanton"] = self.kanton + + features["surgery"] = self.has_surgery() + + features["icu"] = self.has_icu_stay( # True if the patient has spent time in one of the ICU units, False otherwise + [ + "IN E GR", + "INEBL 1", + "INEBL 2", + "INEGE 1", + "INEGE 2", + "E108-09", + "E116", + "E113-15", + "E120-21", + ] + ) + + antibiotic_exposure = self.get_antibiotic_exposure() + for antibiotic_atc, days_administred in antibiotic_exposure.items(): + features["antibiotic=" + antibiotic_atc] = len(days_administred) + + dispforms = self.get_dispform() + if dispforms is not None: + for dispform in dispforms: + features["dispform=" + dispform] = True + + chops = self.get_chop_codes() + if chops is not None: + for chop in chops: + features["chop=" + chop.get_lowest_level_code()] = True + + stat_rooms = self.get_relevant_rooms() + for room in stat_rooms: + features["room=" + room] = True + + stat_employees = self.get_employees() + for employee_id, employee_duration in stat_employees.items(): + features["employee=" + employee_id] = employee_duration + + stat_devices = self.get_devices() + for device in stat_devices: + features["device=" + device] = True + + # Extract all ICD codes from the relevant case ONLY + relevant_case = self.get_relevant_case() # returns a Case() object corresponding to the relevant case for this patient + for icd_code in relevant_case.icd_codes: # Case.idc_codes is a list of ICD() objects for this particular case + features['icd=' + icd_code.icd_code] = True + + return features + + def get_location_info(self, focus_date, comparison_type='exact'): + """Returns ward, room and bed information for a patient at a specific date. + + This function will go through all Move() objects of each Cases() object of this patient, and return a tuple of + Move() objects for which + + Move().bwi_dt <= focus_date <= Move().bwe_dt + + The exact location of a patient at focus_date can then be extracted from the ``Move().org_fa``, + ``Move().org_pf``, ``Move().org_au``, ``Move().zimmr`` and ``Move().bett`` attributes. + + Args: + focus_date (datetime.date()): Date for which all moves are to be extracted from a patient + comparison_type (str): Type of comparison between Move() objects and focus_date. If set to + ``exact`` (the default), only Move() objects with non-None Move().bwi_dt + `and` Move().bwe_dt attributes will be considered. + + Returns: + tuple: tuple of Move() objects for which Move().bwi_dt < focus_date < Move().bwe_dt + """ + location_moves = [] + + for each_case in self.cases.values(): + for each_move in each_case.moves.values(): + if comparison_type == 'exact' and (each_move.bwi_dt is None or each_move.bwe_dt is None): + continue + # bwi_dt_date = datetime.date(each_move.bwi_dt.day, each_move.bwi_dt.month, each_move.bwi_dt.day) + # bwe_dt_date = datetime.date(each_move.bwe_dt.day, each_move.bwe_dt.month, each_move.bwe_dt.day) + if each_move.bwi_dt.date() <= focus_date <= each_move.bwe_dt.date(): + location_moves.append(each_move) + + return tuple(location_moves) + + def get_moves_at_date(self, target_date): + """Extracts all moves from cases for this patient at ``target_date``. + + This function returns a list of all moves for this patient that took place at ``target_date``. This should + ideally be only a single entry, since a patient cannot be stationed in two or more places simultaneously. + However, a situation may arise when a patient is transferred between wards, in which case there will be two + moves matching the criteria. + + Args: + target_date (datetime.date()): Date for which moves will be extracted form this patient's cases. + + Returns: + list: List of Move() objects which have taken place for this patient at ``target_date``. + """ + candidate_moves = [] + for each_case in self.cases.values(): + # print(each_case.moves) + for each_move in each_case.moves.values(): + # print(each_move.bwe_dt) + # each_move is a dictionary of the form {1: Move(), 2: Move(),...} + move_start = each_move.bwi_dt.date() if each_move.bwi_dt is not None else None + move_end = each_move.bwe_dt.date() if each_move.bwe_dt is not None else None + if move_start is None or move_end is None: + continue + else: + if move_start <= target_date <= move_end: + candidate_moves.append(each_move) + + return candidate_moves + + @staticmethod + def create_patient_dict(lines, load_limit=None): + """ + Read the patient csv and create Patient objects from the rows. + Populate a dict (patient_id -> patient). This function will be called by the HDFS_data_loader.patient_data() function. The lines argument corresponds to a csv.reader() instance + which supports the iterator protocol (see documentation for csv.reader in module "csv"). Each iteration over lines will contain a list of the following values + (EXCLUDING the header line): + >> TABLE NAME: V_DH_DIM_PATIENT_CUR + [ "PATIENTID" , "GESCHLECHT" , "GEBURTSDATUM" , "PLZ" , "WOHNORT" , "KANTON" , "SPRACHE"] --> header line (for documentation purposes only) + [ "00001383264" , "weiblich" , "1965-03-15" , "3072" , "Ostermundigen" , "BE" , "Deutsch"] + [ "00001383310" , "weiblich" , "1949-02-11" , "3006" , "Bern" , "BE" , "Russisch"] + + Returns: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + """ + logging.debug("create_patient_dict") + import_count = 0 + patients = dict() + for line in lines: + patient = Patient(*line) + patients[patient.patient_id] = patient + import_count += 1 + if load_limit is not None and import_count > load_limit: + break + + logging.info(f"{len(patients)} patients created") + return patients diff --git a/spitalhygiene/vre/src/main/python/vre/model/Risk.py b/spitalhygiene/vre/src/main/python/vre/model/Risk.py new file mode 100644 index 0000000..e7d6af8 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Risk.py @@ -0,0 +1,269 @@ +import datetime +import logging + + +class Risk: + """Models a ``Risk`` (i.e. Screening) object. + """ + def __init__(self, auftrag_nr, erfassung, entnahme, vorname, nachname, gbdat, pat_nr, pruefziffer, PID, + auftraggeber, kostenstelle, material_type, transport, resultat, analyse_methode, screening_context): + """Initiates a Risk (i.e. Screening) object. + """ + self.auftrag_nbr = auftrag_nr + self.erfassung = datetime.datetime.strptime(erfassung, '%Y-%m-%d').date() if erfassung != '' else None + self.entnahme = datetime.datetime.strptime(entnahme, '%Y-%m-%d').date() if entnahme != '' else None + self.vorname = vorname + self.nachname = nachname + self.geburtsdatum = gbdat + self.patient_nbr = pat_nr + self.pruefziffer = pruefziffer + self.pid = PID + self.auftraggeber = auftraggeber + self.kostenstelle = kostenstelle + self.material_type = material_type + self.transport = transport + self.result = resultat + self.analysis_method = analyse_methode + self.screening_context = screening_context + + def NOLONGERUSED_add_risk_to_patient(lines, patients): + """Reads the risk csv, create Risk objects from the rows and adds these to the respective patients. + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). The underlying table is structured as follows: + >> TABLE NAME: V_LA_ISH_NRSF_NORM + ["PATIENTID", "RSFNR", "KZTXT", "ERDAT", "ERTIM"] + ["00004887743", "000042", "Screening auf VRE. Spezielle L", "2018-08-23", "10:25:30"] + ["00004963016", "000042", "Screening auf VRE. Spezielle L", "2018-05-09", "15:48:48"] + + :param patients: Dictionary mapping patient ids to Patient() --> {'00008301433' : Patient(), ... } + """ + logging.debug("add_risk_to_patient") + nr_not_found = 0 + nr_ok = 0 + for line in lines: + risk = Risk(*line) + if patients.get(risk.patient_id, None) is not None: + patients[risk.patient_id].add_risk(risk) + else: + nr_not_found += 1 + continue + nr_ok += 1 + logging.info(f"{nr_ok} risks added, {nr_not_found} patients not found for risk") + + def NOLONGERUSED_add_deleted_risk_to_patient(lines, patients): + ''' + Read the deleted risk csv and creates Risk objects with code '000142' for deleted VRE screening from the rows. + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). The underlying table is structured as follows: + + >> TABLE NAME: deleted_screenings + ["VNAME", "NNAME", "PATIENTID", "GBDAT", "ScreeningDate"] + ["Margarete", "Bucher", "00014616742", "1963-11-11", "2018-03-12"] + ["Edouard", "Kurth", "00014533820", "1954-02-15", "2018-02-16"] + + Discard entries which are missing the date of screening. + + :param patients: Dictionary mapping patient ids to Patient() --> {'00008301433' : Patient(), ... } + ''' + logging.debug("add_deleted_risk_to_patient") + nr_not_found = 0 + nr_malformed = 0 + nr_ok = 0 + for line in lines: + if len(line[4]) > 0 and line[4] != "NULL": + deleted_risk = Risk(*[line[2], '000142', 'Deleted VRE Screening', line[4], '00:00:00']) + else: + nr_malformed += 1 + continue + if patients.get(deleted_risk.patient_id, None) is not None: + patients[deleted_risk.patient_id].add_risk(deleted_risk) + else: + nr_not_found += 1 + continue + nr_ok += 1 + logging.info(f"{nr_ok} risks added, {nr_not_found} patients not found for deleted risk, {nr_malformed} malformed.") + + @staticmethod + def generate_screening_overview_map(lines): + """Generates the ward screening overview dictionary. + + This function will generate a dictionary containing an overview of which screenings were active at any + particular day. This information is extracted from an iterator object (``lines``), which provides data from + the Atelier_DataScience in the table ``dbo.WARD_SCREENINGS``. The table is formatted as follows: + + ============= ======= ============== + screening_day org_pf screening_type + ============= ======= ============== + 2018-01-01 O MITTE W + 2018-01-01 O SUED E + ============= ======= ============== + + Args: + lines (iterator() object): csv iterator from which data will be read + + Returns: + dict: + Dictionary mapping ``datetime.date()`` objects to tuples of length 2 of the form + ``(ward_name, screening_type)`` + + :math:`\\longrightarrow` ``{'2018-10-22' : ('O SUED', 'W'), '2018-09-15' : ('IB BLAU', 'E'), ...}`` + """ + screening_dict = {} + for each_line in lines: + extracted_date = datetime.datetime.strptime(each_line[0], '%Y-%m-%d').date() + if extracted_date not in screening_dict: + screening_dict[extracted_date] = [(each_line[1], each_line[2])] + else: + screening_dict[extracted_date].append( (each_line[1], each_line[2]) ) + + return screening_dict + + @staticmethod + def generate_oe_pflege_map(lines): + """Generates the oe_pflege_map dictionary. + + This function will generate a dictionary mapping "inofficial" ward names to the names of official *Pflegerische + OEs*. This information is extracted from an iterator object (lines), which provides data from the + Atelier_DataScience in the table ``dbo.OE_PFLEGE_MAP``. The table is formatted as follows: + + ========= ============= + oe_pflege oe_pflege_map + ========= ============= + E 108 E120-21 + BEWA C WEST + ========= ============= + + Args: + lines (iterator() object): csv iterator from which data will be read + + Returns: + dict: + Dictionary mapping inofficial ward names to official names of *pflegerische OEs* + + :math:`\\longrightarrow` ``{'BEWA' : 'C WEST', 'E 121' : 'E 120-21', ...} `` + """ + oe_pflege_dict = {} + for each_line in lines: + oe_pflege_dict[each_line[0]] = each_line[1] + + return oe_pflege_dict + + @staticmethod + def add_annotated_screening_data_to_patients(lines, patient_dict): + """Annotates and adds screening data to all patients in the model. + + This function is the core piece for adding VRE screening data to the model. It will read all screenings exported + from the Atelier_DataScience ``V_VRE_SCREENING_DATA`` view, which is structured as follows: + + ========== ========== ========== =============== ============= ============ ========== =========== =========== ============ ============ ============ ========= ======== =============== ================= + auftrag_nr erfassung entnahme vorname nachname geburtsdatum patient_nr pruefziffer patient_id auftraggeber kostenstelle material_typ transport resultat analyse_methode screening_context + ========== ========== ========== =============== ============= ============ ========== =========== =========== ============ ============ ============ ========= ======== =============== ================= + 1234567 2018-33-33 2018-33-33 Mister X 2999-33-33 0000000000 8 00000000000 S099 sepi are tmpo nn PCR NULL + 1234567 2018-33-33 2018-33-33 Mister X 2999-33-33 0000000000 8 00000000000 sepi sepi are tmpo nn PCR NULL + 1234567 2018-33-33 2018-33-33 Misses Y 2999-33-33 0000000000 9 00000000000 sepi sepi are tmpo nn PCR Klinisch + ========== ========== ========== =============== ============= ============ ========== =========== =========== ============ ============ ============ ========= ======== =============== ================= + + A Risk() object will be created from each line received. These risk objects have the following arguments: + + - auftrag_nr + - erfassung + - entnahme + - patient_nr + - pruefziffer + - patient_id + - auftraggeber + - kostenstelle + - material_typ + - transport + - resultat + - analyse_methode + - screening_context + - ward_name + - room_name + - pflege_oe_name + + The ``screening_context`` attribute will either be set to *Klinisch* or ``NULL``. If set to *Klinisch*, this + indicates that the screening was performed as a non-ordinary VRE screening such as testing a urine sample for + the presence of VRE bacteria. These screenings will be added to the VRE model without further processing. The + same is true for VRE screening entries set to *Ausland*, which refer to patients having been transpored from + abroad to Switzerland and being screened here upon hospital entry. + + For most screenings however, the ``screening_context`` will be set to NULL. In this case, the context must be + **extracted** from the data available in the model. This process is tedious and involves the following steps: + + 1) Extract Move() (in german: *Aufenthalt*) objects for screening which were available to the patient at the + time of ``erfassung``. This **must** be a single move, as a patient cannot be stationed in two different + wards simultaneously. + 2) Extract the Ward() for the Move() extracted in step 1, and add the ward name to the ``self.ward_name`` + attribute. + 3) Extract the exact Room() name where the patient was located from the Move(), and add it to the + ``self.room_name`` attribute. This room name is sometimes indicated for Moves(), but will not be available + for the vast majority of data. In that case, room_name will be set to ``None`` instead. + 4) Map the extracted Ward() name to an official *Pflegerische OE* using the ``oe_pflege_map`` dictionary. This + dictionary maps "inofficial" ward names to an official name of a *pflegerische OE*. This step is very + important, as the official name of the *pflegerische OE* allows the creation of a link to Waveware and + thereby the correct floor, building, and room (in some situations) in which the screening has taken place. + The officially extracted pflegerische OE will be appended to the ``self.pflege_oe_name`` attribute. + 5) Check which type of screening was active in the *pflegerische OE* extracted in step 4. This information is + found in the ``ward_screening_overview`` dictionary, which indicates the various types of screenings that + were active at specific dates in different pflegerische OEs. Note that in this step, if no screening + context can be found for a particular VRE screen at its date of ``erfassung``, the dates in + ``ward_screening_overview`` will be matched with a tolerance of **+/- 1 day** due to the fact that VRE + screenings are occasionally performed one day sooner or later than planned. + + Args: + lines (iterator): iterator object of the to-be-read file `not` containing the header line + patient_dict (dict): Dictionary mapping patient ids to Patient() --> {'00008301433' : Patient(), ... } + """ + load_count = 0 + for each_line in lines: + this_risk = Risk(*each_line) + + # Check if this patient's PID exists in patient_dict + if this_risk.pid in patient_dict: + load_count += 1 + + potential_moves = patient_dict[this_risk.pid].get_moves_at_date(this_risk.erfassung) + print(potential_moves) + # if load_count > 5: + # break + + + + # + # + # + # move_wards = [] + # screening_wards = [] + # logging.debug("adding_all_screenings_to_patients") + # nr_pat_not_found = 0 + # nr_ok = 0 + # for line in lines: + # this_risk = Risk(*line) + # if patient_dict.get(this_risk.pid) is None: # Check whether or not PID exists + # nr_pat_not_found += 1 + # continue + # potential_moves = patient_dict.get(this_risk.pid).get_location_info(focus_date=this_risk.erfassung) + # if len(potential_moves) > 0: # indicates at least one potential match + # move_wards.append('+'.join([each_move.org_fa for each_move in potential_moves])) + # screening_wards.append(this_risk.options) + # nr_ok += 1 + # # print results to file + # with open('match_results.txt', 'w') as writefile: + # for i in range(len(move_wards)): + # writefile.write(f"{move_wards[i]}; {screening_wards[i]}\n") + # + # + # + # + # + # # patient_dict[this_risk.patient_id].add_risk(this_risk) + # # nr_ok += 1 + # # else: + # # nr_pat_not_found += 1 + # # continue + # logging.info(f"{nr_ok} screenings added, {nr_pat_not_found} patients from VRE screening data not found.") + + + + + + diff --git a/spitalhygiene/vre/src/main/python/vre/model/Room.py b/spitalhygiene/vre/src/main/python/vre/model/Room.py new file mode 100644 index 0000000..119571d --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Room.py @@ -0,0 +1,153 @@ +from datetime import datetime +from Bed import Bed +import logging + + +class Room: + ''' + Models a room in the hospital and contains lists of moves and appointments that happened in this room. + ''' + def __init__(self, name): + """ + Note that most rooms will not have an ID ! (for some reason yet to be discovered) + + :param name: Room name + """ + self.name = name + self.ids = [] + self.ward = None + self.moves = [] + self.appointments = [] + self.beds = dict() + + def add_move(self, move): + ''' + Add a Move from SAP IS-H to this room. + In case the Move has information about the bed, the Bed is added to the room (if not yet exists). + :param move: Move + :return: + ''' + self.moves.append(move) + if move.bett is not None and move.bett is not "": + if self.beds.get(move.bett, None) is None: + b = Bed(move.bett) + self.beds[move.bett] = b + self.beds[move.bett].add_move(move) + + def add_id(self, id, system): + """ + Adds an (id, system) tuple to the Room().ids attribute list if id has not already been added - this prevents duplicate entries. + This list should be useful in determining whether or not there are overlapping room names among different hospital systems. + + :param id: id string + :param system: system string + """ + if id not in [id_tuple[0] for id_tuple in self.ids]: + self.ids.append( (str(id), str(system)) ) + + def add_appointment(self, appointment): + ''' + Add an appointment from RAP to this room. + :param appointment: Appointment + :return: + ''' + self.appointments.append(appointment) + + def add_ward(self, ward): + """ + Updates the self.ward attribute. + + :param ward: ward object to be set as the new attribute. + """ + self.ward = ward + + def get_ids(self): + """ + Returns all ids in the self.ids attribute. + :return: a @-delimited list of [room_id]_[system] for all (id, system) tuples in the self.ids attribute list, or None if the list is empty. + """ + if len(self.ids) == 0: + return None + else: + return '@'.join([each_tuple[0] + '_' + each_tuple[1] for each_tuple in self.ids]) + + def get_moves_during(self, start_dt, end_dt): + ''' + List of moves that overlap with the start_dt, end_dt time interval. + :param start_dt: datetime.datetime + :param end_dt: datetime.datetime + :return: List of Move + ''' + overlapping_moves = [] + for move in self.moves: + e_dt = move.bwe_dt if move.bwe_dt is not None else datetime.now() + if e_dt >= start_dt and move.bwi_dt <= end_dt: + overlapping_moves.append(move) + return overlapping_moves + + def create_room_id_map(lines, rooms): + ''' + Initializes the dictionary mapping room ids to Room() objects based on the provided csv file. + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). The underlying table is structured as follows: + >> TABLE NAME: V_DH_DIM_RAUM_CUR + ["RAUMID", "RAUMNAME"] + ["128307", "Kollmann Zahraa [00025783]"] + ["80872"," Audiometrie"] + + The function will also update the provided rooms dictionary. + + :param rooms: Dictionary mapping room names to Room() objects --> {'BH N 129' : Room(), ... } + :return: Dictionary mapping room ids to Room() objects --> {'127803' : Room(), ... } + ''' + logging.debug("create_room_id_map") + room_id_map = dict() + for line in lines: + room_obj = Room(line[1]) + room_obj.add_id(id = line[0], system = 'Polypoint') + # Update the room_id_map dictionary + room_id_map[line[0]] = Room(line[1]) + # Update the rooms dictionary + if line[1] not in rooms.keys(): + rooms[line[1]] = room_obj + else: + rooms[line[1]].add_id(id = line[0], system = 'Polypoint') + + logging.info(f"{len(room_id_map)} rooms created") + return room_id_map + + def add_room_to_appointment(lines, appointments, rooms): + """ + Reads room data from the csv file and adds the created Room() to an Appointment() in appointments and vice versa. + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). The underlying table is structured as follows: + >> TABLE NAME: V_DH_FACT_TERMINRAUM --> LEFT JOIN V_DH_DIM_RAUM --> ON [V_DH_FACT_TERMINRAUM].RAUMID = V_DH_DIM_RAUM_CUR.RAUMID + ["TERMINID", "RAUMID", "RAUMNAME", "TERMINSTART_TS", "TERMINENDE_TS", "DAUERINMIN"] + ["2295658", "11190", "P2 A534", "2007-12-10 08:45:00.0000", "2007-12-10 10:15:00.0000", "90.000000"] + ["2410965", "61994", "C 316", "2008-02-21 14:00:00.0000", "2008-02-21 15:00:00.0000", "60.000000"] + + :param appointments: Dictionary mapping appointment ids to Appointment() objects --> { '36830543' : Appointment(), ... } + :param room_id_map: Dictionary mapping room ids to Room() object --> {'127803' : Room(), ... } + :param rooms: Dictionary mapping room names to a Room() object --> {'BH N 125' : Room(), ... } + """ + logging.debug("add_room_to_appointment") + nr_appointments_not_found = 0 + nr_rooms_not_found = 0 + nr_ok = 0 + for line in lines: + appointment_id = line[0] + room_id = line[1] + room_name = line[2] + if appointments.get(appointment_id, None) is None: + nr_appointments_not_found += 1 + continue + # if room_id_map.get(room_id, None) is None: + # nr_rooms_not_found += 1 + # continue + # name = room_id_map[room_id] + if rooms.get(room_name, None) is None: + new_room = Room(room_name) + new_room.add_id(id = room_id, system = 'Polypoint' ) + rooms[room_name] = new_room + rooms[room_name].add_appointment(appointments[appointment_id]) + appointments[appointment_id].add_room(rooms[room_name]) + nr_ok += 1 + logging.info(f"{nr_ok} rooms added to appointments, {nr_appointments_not_found} appointments not found, {nr_rooms_not_found} rooms not found") diff --git a/spitalhygiene/vre/src/main/python/vre/model/Surgery.py b/spitalhygiene/vre/src/main/python/vre/model/Surgery.py new file mode 100644 index 0000000..1c01e00 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Surgery.py @@ -0,0 +1,62 @@ +from datetime import datetime +import logging + +class Surgery: + ''' + A surgery maps a case to a chop code. + Table schema: + [LFDBEW],[ICPMK],[ICPML],[ANZOP],[BGDOP],[LSLOK],[STORN],[FALNR],[ORGPF] + ''' + def __init__(self, LFDBEW,ICPMK,ICPML,ANZOP,BGDOP,LSLOK,STORN,FALNR,ORGPF): + self.bgd_op = datetime.strptime(BGDOP, "%Y-%m-%d") # date of surgery + self.lfd_bew = LFDBEW + self.icpmk = ICPMK # catalog ID + self.icpml = "Z" + ICPML # chop code (comes without leading Z!) + self.anzop = ANZOP # ?? + self.lslok = LSLOK # ?? + self.fall_nr = FALNR # case ID in SAP! + self.storn = STORN # cancelled ("" or "X")? + self.org_pf = ORGPF # ward + + self.chop = None + + def add_surgery_to_case(lines, cases, chops): + ''' + Creates Surgery() objects from a csv reader, and performs the following: + --> Finds the CHOP code in the chops dict (given the chop code and catalog id, which is read from the csv) + --> Finds the case with the correct case id and adds the surgery to the case. + This function will be called by the HDFS_data_loader.patient_data() function (lines is an iterator object). The underlying table is structured as follows: + >> TABLE NAME: LA_ISH_NICP + ["LFDBEW", "ICPMK", "ICPML", "ANZOP", "BGDOP", "LSLOK", "STORN", "FALNR", "ORGPF"] + ["0", "10", "96.08", "0", "2011-12-23", "", "", "0003807449", "Q MITTE"] + ["0", "10", "87.41.00", "0", "2011-12-27", "", "", "0003807449", "Q MITTE"] + + :param cases: Dictionary mapping case ids to Case() objects --> {"0003536421" : Case(), "0003473241" : Case(), ...} + :param chops: Dictionary mapping the chopcode_katalogid entries to Chop() objects --> { 'Z39.61.10_11': Chop(), ... } + ''' + logging.debug("add_surgery_to_case") + nr_chop_not_found = 0 + nr_case_not_found = 0 + nr_surgery_storniert = 0 + nr_ok = 0 + for line in lines: + surgery = Surgery(*line) + if surgery.storn == 'X': # ignore 'stornierte' surgeries + nr_surgery_storniert += 1 + continue + chop = chops.get(surgery.icpml + "_" + surgery.icpmk, None) + case = cases.get(surgery.fall_nr, None) + if chop is not None: + surgery.chop = chop + else: + nr_chop_not_found += 1 + continue + if case is not None: + case.add_surgery(surgery) + chop.add_case(case) + else: + nr_case_not_found += 1 + continue + nr_ok += 1 + logging.info(f"{nr_ok} ok, {nr_case_not_found} cases not found, {nr_chop_not_found} chop codes not found, {nr_surgery_storniert} surgeries storniert") + diff --git a/spitalhygiene/vre/src/main/python/vre/model/Ward.py b/spitalhygiene/vre/src/main/python/vre/model/Ward.py new file mode 100644 index 0000000..463c331 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/model/Ward.py @@ -0,0 +1,19 @@ +from datetime import datetime + +class Ward: + def __init__(self, name): + self.name = name + self.moves = [] + self.appointments = [] + + def add_move(self, move): + self.moves.append(move) + + def get_moves_during(r, start_dt, end_dt): + overlapping_moves = [] + for m in r.moves: + e_dt = m.bwe_dt if m.bwe_dt is not None else datetime.now() + if e_dt >= start_dt and m.bwi_dt <= end_dt: + overlapping_moves.append(m) + return overlapping_moves + diff --git a/spitalhygiene/vre/src/main/python/vre/model/__init__.py b/spitalhygiene/vre/src/main/python/vre/model/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/spitalhygiene/vre/src/main/python/vre/networkx_graph.py b/spitalhygiene/vre/src/main/python/vre/networkx_graph.py new file mode 100644 index 0000000..687bdfd --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/networkx_graph.py @@ -0,0 +1,1257 @@ +# -*- coding: utf-8 -*- +"""This script contains the code to run statistics on various types of network models that will be used as proposed by +Theus (see README.md for more details). + +**Surface Model**: This model assumes that VRE is transmitted on "surfaces", and contains all potential transmitting +surfaces as nodes. These currently include: + +- Patients +- Employees +- Rooms +- Devices + +Edges are based on contact between two nodes if the contact (e.g. a patient being in a particular room) has occurred +during a patient's relevant stay. + +**Handshake Model**: This model assumes that VRE is transmitted via "handshakes", and contains only Patients as nodes. +In contrast to the Surface model, edges in this model correspond to the transmission vectors, and represent common rooms +or employees via which two patients may have been (indirectly) in contact with each other. + +----- +""" + +import copy +import os +import networkx as nx +import logging +import datetime +import itertools +import json +import random +from collections import Counter + + +def create_model_snapshots(orig_model, snapshot_dt_list): + """Creates model snapshots based on the datetime.datetime() values provided in snapshot_dt_list. + + Note: + For obvious reasons, all of the values provided must be smaller (earlier than) orig_model.snapshot_dt (i.e. + the snapshot date of the model used as a basis for the other models). + + Args: + orig_model (surface_model): Original surface_model() object that will be used as starting point for all + subsequent snapshots + snapshot_dt_list (list): List of dt.dt() objects, all smaller than self.snapshot_dt + + Returns: + list: List of independent surface_model() objects corresponding to the various model snapshots in increasing + order (i.e. the oldest snapshot is first in the list). The last entry in the returned list contains orig_model, + meaning the list has length `len(snapshot_dt_list) + 1`. If no snapshot creation is possible, ``None`` is + returned instead. + """ + if orig_model.snapshot_dt is None: + logging.error('Please add data to the model before taking snapshots !') + return None + if True in [dt_value > orig_model.snapshot_dt for dt_value in snapshot_dt_list]: + logging.error('All snapshot values must be smaller than the snapshot time of the current model !') + return None + sorted_snapshots = sorted(snapshot_dt_list, reverse=True) + model_list = [copy.deepcopy(orig_model)] + for dt_value in sorted_snapshots: + logging.info(f"Creating snapshot for {dt_value.strftime('%d.%m.%Y %H:%M:%S')}...") + temp_model = copy.deepcopy(orig_model) + temp_model.trim_model(snapshot_dt=dt_value) + temp_model.remove_isolated_nodes(silent = True) + model_list.append(temp_model) + logging.info(f'--> Success ! Snapshot contains {len(temp_model.S_GRAPH.nodes())} nodes ' + f'and {len(temp_model.S_GRAPH.edges())} edges') + # Reverse order of models (oldest snapshot should be the first entry) and return the list of snapshot models + model_list.reverse() + return model_list + + +class surface_model: + """Represents the `Surface Model` graph in networkx. + + Nodes can be one of: + + - Patients :math:`\\longrightarrow` added with attribute dict ``{'type' : "Patient" }`` + - Employees :math:`\\longrightarrow` added with attribute dict ``{'type' : "Employee" }`` + - Rooms :math:`\\longrightarrow` added with attribute dict ``{'type' : "Room" }`` + - Devices :math:`\\longrightarrow` added with attribute dict ``{'type' : "Device" }`` + + A few details on how the model graph is set up: + + - All node objects will be represented in the form of string objects, where unique identifiers are as follows: + + - Patients :math:`\\longrightarrow` ``patient ID`` + - Rooms: :math:`\\longrightarrow` ``room name`` + - Employees: :math:`\\longrightarrow` ``employee ID`` + - Devices: :math:`\\longrightarrow` ``Device ID`` + + - All node objects will have at least two attributes: + + - ``type`` (see docstring above) + - ``cluster`` (for visual representation with Alchemy) --> added via the function `add_node_clustering()` + + - All edge objects will have at least the following attributes: + + - ``from`` :math:`\\longrightarrow` indicates start of the interaction (a dt.dt() object) + - ``to`` :math:`\\longrightarrow` indicates end of the interaction (a dt.dt() object) + - ``type`` :math:`\\longrightarrow` indicates what node types are linked by this edge, e.g. "Patient-Room", + "Patient-Employee", etc. + - ``origin`` :math:`\\longrightarrow` indicates the source that was used to add this edge (e.g. "Move", + 'Appointment", etc.) + + - Edge descriptions always contain the nodes in alphabetical order, i.e. Device-Room (NOT Room-Device), + Employee-Patient (NOT Patient-Employee), etc. + - Each node is only present once + - Edges from a node to itself are not supported + - Multiple parallel edges between two nodes are supported (:math:`\\longrightarrow` MultiGraph) + - Edges do not have associated weights, but have at least one attribute "type" (e.g. Patient-Room, Employee-Patient) + - The edge attribute "type" is **not** sorted alphabetically, and instead features the following 6 variations: + + - Patient-Room + - Device-Patient + - Employee-Patient + - Device-Room + - Employee-Room + - Device-Employee + + - The graph is undirected + - The python built-in ``None``-type object should not be used for attributes according to the networkx docs. + Instead, unknown values will be set to "NULL" or "" + + """ + + def __init__(self, data_dir='.', edge_types=None): + """Initiates the graph in networkx (see class docstring for details). + + Args: + data_dir (str): Data directory in which to store output files from various data extraction functions. + edge_types (tuple): Tuple containing all edge types to include in the model (can be any combination of the 4 + node types). This value defaults to None, resulting in the inclusion of all edge types. + See class docstring for details. + """ + self.S_GRAPH = nx.MultiGraph() + # Flag indicating whether or not the self.add_edge_infection() function has been called on the graph + # -> introduces the "infected" attribute for edges + self.edges_infected = False + self.Nodes = { + 'Patient': set(), + 'Room': set(), + 'Device': set(), + 'Employee': set(), + } # Dictionary mapping to sets of respective node string identifiers + # Initiate various counters: + self.edge_add_warnings = 0 # Number of warnings encountered during the addition of edges to the network + self.room_add_warnings = 0 # Number of warnings encountered during the addition of room nodes + self.patient_add_warnings = 0 # Number of warnings encountered during the addition of patient nodes + self.employee_add_warnings = 0 # Number of warnings encountered during the addition of employee nodes + self.device_add_warnings = 0 # Number of warnings encountered during the addition of device nodes + + self.betweenness_centrality = None + # Changed to a dictionary mapping nodes to betweenness centrality scores once the + # self.export_node_betweenness() function is called + + self.data_dir = data_dir + + self.shortest_path_stats = False + # indicates whether shortest path statistics have been added to nodes via update_shortest_path_statistics() + + self.node_files_written = False # indicates whether node files (in JSON format) are present in self.data_dir + + self.snapshot_dt = None # time at which "snapshot" of the model is taken (important for "sub-snapshots") + + self.edge_types = ('Patient-Room', 'Device-Patient', 'Employee-Patient', 'Device-Room', 'Employee-Room', + 'Device-Employee') if edge_types is None else edge_types + # tuple containing types of edges to include in the network (or None --> includes edge types) + + ########################################################################## + # Class-specific Exceptions + ########################################################################## + class NodeBetweennessException(Exception): + """ Class-specific exception used for betweenness calculation functions. + """ + pass + + ########################################################################## + # Base Functions + ########################################################################## + @staticmethod + def parse_filename(filename, replace_char='@'): + """Parses filename and replaces problematic characters with replace_char. + + Args: + filename (str): Name of file to parse. + replace_char (str): Replacement problematic characters. + + Returns: + str: parsed (unproblematic) filename + """ + return filename.replace('/', replace_char) + + @staticmethod + def save_to_json(path_to_file, saved_object): + """Saves object to `path_to_file` in JSON format. + + Args: + path_to_file (str): Path to file to be saved (must include ``.json`` suffix) + saved_object: Object to be saved to file in JSON format. + """ + json.dump(saved_object, open(path_to_file, 'w')) + + @staticmethod + def load_from_json(path_to_file): + """Loads the .json file specified in `path_to_file`. + + Args: + path_to_file (str): Path to `.json` file to be loaded. + + Returns: + The object loaded from `path_to_file`. + """ + loaded_object = json.load(open(path_to_file)) # default mode for open(...) is 'r' + return loaded_object + + def identify_id(self, string_id): + """Checks whether a node with string_id exists in the network. + + Returns the type (e.g. 'Patient', 'Employee', etc.) of `string_id` in the network. If string_id does not exist + in the network, ``None`` is returned instead. + + Args: + string_id (str): string identifier of the node to be identified. + + Returns: + str or None: The type of node of string_id, or ``None`` if string_id is not found in the network. + """ + for key in self.Nodes.keys(): + if string_id in self.Nodes[key]: # self.Nodes[key] will be a set --> in operator performs very well + return key + return None + + def identify_node(self, node_id, node_type): + """Checks whether node_id is found in self.Nodes[node_type]. + + This function is more performant than identify_id(), since it already assumes that the node type of the string + to be identified is known. + + Args: + node_id (str): String identifier of the node to be identified. + node_type (str): Type of node to be identified (e.g. 'Patient') + + Returns: + bool: `True` if node_id is found in self.Nodes[node_type], `False` otherwise. + """ + if node_id in self.Nodes[node_type]: + return True + return False + + ########################################################################## + # Functions for expanding or reducing the graph + ########################################################################## + def new_generic_node(self, string_id, attribute_dict): + """Adds a new generic node to the graph. + + String_id will be used as the unique identifier, and all key-value pairs in attribute_dict as additional + information to add to the node. If a node already exists, only new entries in attribute_dict will be added to + it, but it will otherwise be left unchanged. + + Args: + string_id (str): string identifier for node + attribute_dict (dict): dictionary of key-value pairs containing additional information + """ + self.S_GRAPH.add_node(str(string_id), **attribute_dict) + + def new_patient_node(self, string_id, risk_dict, warn_log=False): + """Add a patient node to the network. + + Automatically sets the 'type' attribute to "Patient". Also adds risk_dict to the 'risk' attribute as defined + for patient nodes. It will also add an attribute 'vre_status' ('pos' or 'neg') depending on whether or not code + 32 is found in risk_dict. Note that if string_id is empty (''), no node will be added to the network and a + warning will be logged if warn_log is `True`. + + Args: + string_id (str): string identifier of patient to be added. + risk_dict (dict): dictionary mapping dt.dt() to Risk() objects corresponding to a patient's VRE screening + history. + warn_log (bool): flag indicating whether or not to log warning messages. + """ + if string_id == '': + if warn_log == True: + logging.warning('Empty patient identifier - node is skipped') + self.patient_add_warnings += 1 + return + risk_codes = [each_risk.screen_code for each_risk in risk_dict.values()] + self.S_GRAPH.add_node(str(string_id), type='Patient', risk=risk_dict, vre_status='pos' + if 32 in risk_codes else 'neg') + self.Nodes['Patient'].add(string_id) + + def new_room_node(self, string_id, ward=None, room_id=None, warn_log=False): + """Add a room node to the network. + + Automatically sets the 'type' attribute to "Room" and ward to the "ward" attribute, and sets room_id to either + the specified value or "NULL". Note that if string_id is empty (''), no node will be added to the network and a + warning will be logged if warn_log is `True`. + + Args: + string_id (str): string identifier of room to be added. + ward (str): name of ward of this room + room_id (str): room id (in string form) of this room + warn_log (bool): flag indicating whether or not to log warning messages. + """ + if string_id == '': + if warn_log == True: + logging.warning('Empty room identifier - node is skipped') + self.room_add_warnings += 1 + return + attribute_dict = {'ward': 'NULL' if ward is None else str(ward), 'room_id': 'NULL' + if room_id is None else str(room_id), 'type': 'Room'} + self.S_GRAPH.add_node(str(string_id), **attribute_dict) + self.Nodes['Room'].add(string_id) + + def new_device_node(self, string_id, name, warn_log=False): + """Add a device node to the network. + + Automatically sets the 'type' attribute to "Device". Note that if string_id is empty (''), no node will be + added to the network and a warning will be logged if warn_log is `True`. + + Args: + string_id (str): string identifier of device to be added. + name (str): name of device. + warn_log (bool): flag indicating whether or not to log warning messages. + """ + if string_id == '': + if warn_log == True: + logging.warning('Empty device identifier - node is skipped') + self.device_add_warnings += 1 + return + self.S_GRAPH.add_node(str(string_id), type='Device', name = name) + self.Nodes['Device'].add(string_id) + + def new_employee_node(self, string_id, warn_log=False): + """Add an employee node to the network. + + Automatically sets the 'type' attribute to "employee". Note that if string_id is empty (''), no node will be + added to the network and a warning will be logged if warn_log is `True`. + + Args: + string_id (str): string identifier of employee to be added. + warn_log (bool): flag indicating whether or not to log warning messages. + """ + if string_id == '': + if warn_log == True: + logging.warning('Empty employee identifier - node is skipped') + self.employee_add_warnings += 1 + return + self.S_GRAPH.add_node(str(string_id), type='Employee') + self.Nodes['Employee'].add(string_id) + + def new_edge(self, source_id, source_type, target_id, target_type, att_dict, log_warning=False): + """Adds a new edge to the network. + + The added edge will link source_id of source_type to target_id of target_type. Note that the edge will ONLY be + added if both source_id and target_id are found in the self.Nodes attribute dictionary. In addition, all + key-value pairs in att_dict will be added to the newly created edge. + + Args: + source_id (str): String identifying the source node + source_type (str): source_id type, which must be one of ['Patient', 'Room', 'Device', 'Employee'] + target_id (str): String identifying the target node + target_type (str): target_id type, which must be one of ['Patient', 'Room', 'Device', 'Employee'] + att_dict (dict): dictionary containing attribute key-value pairs for the new edge. + log_warning (bool): flag indicating whether or not to log a warning each time a faulty edge is encountered + """ + if self.identify_id(source_id) is None: + if log_warning == True: + logging.warning(f'Did not find node {source_id} of type {source_type} - no edge added') + self.edge_add_warnings += 1 + return + if self.identify_id(target_id) is None: + if log_warning == True: + logging.warning(f'Did not find node {target_id} of type {target_type} - no edge added') + self.edge_add_warnings += 1 + return + self.S_GRAPH.add_edge(source_id, target_id, **att_dict) + + def remove_isolated_nodes(self, silent=False): + """Removes all isolated nodes from the network. + + Isolated nodes are identified as having degree 0. + + Args: + silent (bool): Flag indicating whether or not to log progress (defaults to ``False``) + """ + node_degrees = [self.S_GRAPH.degree(each_node) for each_node in self.S_GRAPH.nodes] + if silent == False: + logging.info(f"##################################################################################") + logging.info('Removing isolated nodes:') + logging.info(f'--> Before processing, network contains {len(node_degrees)} total nodes, out of which ' + f'{node_degrees.count(0)} are isolated.') + remove_count = 0 + delete_list = [] + for each_node in self.S_GRAPH.nodes: + if self.S_GRAPH.degree(each_node) == 0: # degree of 0 indicates an isolated node + delete_list.append(each_node) + for node in delete_list: + self.S_GRAPH.remove_node(node) + remove_count += 1 + node_degrees_after = [self.S_GRAPH.degree(each_node) for each_node in self.S_GRAPH.nodes] + if silent == False: + logging.info(f'--> After processing, network contains {len(node_degrees_after)} total nodes, out of which ' + f'{node_degrees_after.count(0)} are isolated.') + + def trim_model(self, snapshot_dt): + """Trims the current model. + + Removes all edges for which the ``to`` attribute is larger than snapshot_dt, and updates the self.snapshot_dt + attribute. However, this function does NOT remove isolated nodes. + + Args: + snapshot_dt (dt.dt()): dt.dt() object specifying to which timepoint the model should be trimmed + """ + deleted_edges = [edge_tuple for edge_tuple in self.S_GRAPH.edges(data=True, keys=True) + if edge_tuple[3]['to'] > snapshot_dt] + # S_GRAPH.edges() returns a list of tuples of length 4 --> ('source_id', 'target_id', key, attr_dict) + self.S_GRAPH.remove_edges_from(deleted_edges) + self.snapshot_dt = snapshot_dt + + ################################################################################################################ + # Functions for updating attributes + ################################################################################################################ + def update_edge_attributes(self, edge_tuple, attribute_dict): + """Updates the edge identified in edge_tuple. + + Add all key-value pairs in attribute_dict. Existing attributes will be overwritten. + + Args: + edge_tuple (tuple): Tuple of length 3 identifying the edge :math:`\\longrightarrow` (source_id, + target_id, key) (`key` is required to uniquely identify MultiGraph() edges) + attribute_dict (dict): dictionary of key-value pairs with which to update the edge + """ + attrs = {edge_tuple : attribute_dict} + # to update a specific edge, the dictionary passed to set_edge_attributes() must be formatted + # as --> { ('bla', 'doodel', 0) : {'newattr' : 'somevalue'} } + nx.set_edge_attributes(self.S_GRAPH, attrs) + + def update_node_attributes(self, node_id, attribute_dict): + """Updates the node identified in node_id. + + The node will be updated with all key-value pairs in attribute_dict. Note that existing attributes will be + overwritten with the values in attribute_dict. + + Args: + node_id (str): string identifier for the node + attribute_dict (dict): dictionary of key-value pairs with which the node will be updated + """ + attrs = {node_id : attribute_dict} + # to update a specific edge, the dictionary passed to set_node_attributes() must be formatted + # as --> { 'node_id' : {'newattr' : 'somevalue'} } + nx.set_node_attributes(self.S_GRAPH, attrs) + + def add_edge_infection(self): + """Sets "infected" attribute to all edges. + + This function will iterate over all edges in the network and set an additional attribute ``infected``, which + will be set to ``True`` if it connects to a patient node for which the ``vre_status`` attribute is set to + ``pos``. For all other edges, this attribute will be set to ``False``. + """ + logging.info(f"##################################################################################") + logging.info('Adding infection data to network edges...') + neg_infect_count = 0 + pos_infect_count = 0 + error_count = 0 + for each_edge in self.S_GRAPH.edges(data=True, keys=True): + # will yield a list of tuples of length 4 --> (source_id, target_id, key, attribute_dict) + try: + edge_types = [self.identify_id(each_edge[i]) for i in range(0, 2)] + if None in edge_types: + # will yield a list of length 2 --> [source_id_type, target_id_type] (e.g. ['Patient', 'Room'] ) + logging.warning(f"Encountered an edge for which at least one node could not be identified") + self.update_edge_attributes(edge_tuple=(each_edge[0], each_edge[1], each_edge[2]), + attribute_dict={'infected': False}) + continue + if 'Patient' not in edge_types: # indicates an edge between two non-patient nodes + self.update_edge_attributes(edge_tuple=(each_edge[0], each_edge[1], each_edge[2]), + attribute_dict={'infected' : False}) + neg_infect_count += 1 + else: + # indicates an edge of one of the following types: + # Patient-Room, Patient-Device or Patient-Employee (patient id is always in each_edge[0]) + pat_index = edge_types.index('Patient') + # returns the index (0 or 1) containing the patient node (can either be source_id or target_id) + if self.S_GRAPH.node[each_edge[pat_index]]['vre_status'] == 'neg': + self.update_edge_attributes(edge_tuple=(each_edge[0], each_edge[1], each_edge[2]), + attribute_dict={'infected': False}) + neg_infect_count += 1 + else: + self.update_edge_attributes(edge_tuple=(each_edge[0], each_edge[1], each_edge[2]), + attribute_dict={'infected': True}) + pos_infect_count += 1 + except Exception as e: # e is currently not used + self.update_edge_attributes(edge_tuple=(each_edge[0], each_edge[1], each_edge[2]), + attribute_dict={'infected': False}) + error_count += 1 + logging.warning(f'Encountered {error_count} errors during the identification of infected patient nodes') + logging.info(f"Successfully added 'infected' status to network edges ({pos_infect_count} infected, " + f"{neg_infect_count} uninfected edges)") + self.edges_infected = True + + def update_shortest_path_statistics(self, focus_nodes=None, approximate=False, max_path_length=None): + """Prerequisite function for calculating betweenness centrality. + + Adds new attributes to all nodes in focus_nodes, where each attribute is a pair of nodes (sorted alphabetically) + with a ``SP-`` prefix to tuples of length 2 containing (shortest_paths_through_this_node, total_shortest_paths) + For example: + + ``{ 'SP-Node1-Node2': (2, 5) , 'SP-Node1-Node3': (1, 8), ... } }`` + + Note: + This may result in a lot of additional attributes for nodes which are integral to the network. This ap- + proach is chosen because the networkx module does not allow updates to a dict-of-a-dict type + of attributes - i.e. if these attributes were to be combined in a 1st-level key 'shortest-paths', the entire + content would have to be copied every time a new node-pair attribute is added, which would make the function + extremely inefficient. + + This is an important prerequisite function for the calculation of betweenness centrality. + + Args: + focus_nodes (list): list of node IDs. If set to ``None`` (the default), all nodes in the network will be + considered. **WARNING: this may be extremely ressource-intensive !** + approximate (bool): Flag indicating whether to consider all shortest paths in the network + (``False``, default) or approximate the betweenness statistic using the + `max_path_length` argument. Note that if this is set to `False`, attributes of all + nodes will be written **to file** so as to avoid memory overflows. This requires a + preceding call to `self.write_node_files()`. + max_path_length (int): Maximum path length to consider for pairs of nodes when `approximate` == ``True``. + If set to ``None`` (default), all possible shortest paths will be considered. + """ + target_nodes = focus_nodes if focus_nodes is not None else self.S_GRAPH.nodes + node_combinations = list(itertools.combinations(target_nodes, 2)) + # Returns a list of tuples containing all unique pairs of nodes in considered_nodes + + logging.info(f'--> Adding shortest path statistics considering {len(target_nodes)} nodes yielding ' + f'{len(node_combinations)} combinations.') + logging.info(f"Approximate set to {approximate}, maximum path length set to {max_path_length}") + for count, combo_tuple in enumerate(node_combinations): + if count % 100 == 0: + logging.info(f" <> Processed {count} combinations") + if nx.has_path(self.S_GRAPH, combo_tuple[0], combo_tuple[1]) is False: + continue # indicates a node pair in disconnected network parts + # measure = datetime.datetime.now() + if approximate is False: # This will write required prerequisites to the node files in self.data_dir + if self.node_files_written is False: + raise self.NodeBetweennessException('Missing a required call to self.write_node_files() !') + all_shortest_paths = list(nx.all_shortest_paths(self.S_GRAPH, source=combo_tuple[0], + target=combo_tuple[1])) + # Remove the first and last node (i.e. source and target) of all shortest paths + trim_short_paths = [path_list[1:(len(path_list)-1)] for path_list in all_shortest_paths] + involved_nodes = [node for sublist in trim_short_paths for node in sublist] + node_counts = Counter(involved_nodes) + for each_key in node_counts: + json_filepath = os.path.join(self.data_dir, self.parse_filename(each_key) + '.json') + node_dict = self.load_from_json(path_to_file=json_filepath) # Returns type dictionary + if 'BW-Stats' not in node_dict.keys(): + node_dict['BW-Stats'] = [(node_counts[each_key], len(all_shortest_paths))] # -> list of tuples + else: + node_dict['BW-Stats'].append((node_counts[each_key], len(all_shortest_paths))) + # Save node_dict back to file + self.save_to_json(path_to_file=json_filepath, saved_object=node_dict) + else: + shortest_pair_path = nx.shortest_path(self.S_GRAPH, source=combo_tuple[0], target=combo_tuple[1]) + if max_path_length is not None and len(shortest_pair_path) > max_path_length: + continue # indicates a path too long to be considered relevant for transmission + # If shortest paths are "short enough", calculate the exact measure + all_shortest_paths = list(nx.all_shortest_paths(self.S_GRAPH, source=combo_tuple[0], + target=combo_tuple[1])) + # Remove the first and last node (i.e. source and target) of all shortest paths + trim_short_paths = [path_list[1:(len(path_list)-1)] for path_list in all_shortest_paths] + involved_nodes = [node for sublist in trim_short_paths for node in sublist] + node_counts = Counter(involved_nodes) + sorted_pair = sorted([combo_tuple[0], combo_tuple[1]]) + update_attr_dict = {each_key: {'SP-' + sorted_pair[0] + '-' + sorted_pair[1]: (node_counts[each_key], + len(all_shortest_paths))} + for each_key in node_counts if each_key not in sorted_pair} + # Then update node attributes + nx.set_node_attributes(self.S_GRAPH, update_attr_dict) + # Write it all to log + logging.info(f"Successfully added betweenness statistics to the network !") + # Adjust the self.shortest_path_stats + self.shortest_path_stats = True + + ########################################################################## + # Customized Network Functions + ########################################################################## + def inspect_network(self): + """Important inspect function for the graph. + + An important function that will inspect all properties of the network and return diagnostic measures on the + "quality". This includes: + + - Total number of nodes in the network + - Number of isolated nodes in the network + - Number of nodes in the network of type: + + - Patient + - Device + - Employee + - Room + + - Total number of edges in the network + - Number of edges in the network of type: + + - Patient-Device + - Patient-Room + - Patient-Employee + - Employee-Device + - Employee-Room + - Device-Room + + - Number of improperly formatted edges. These include: + + - Edges for which at least one node is empty, i.e. "" + - Edges for which any one of the ``from``, ``to``, ``type``, ``origin``, and ``infected`` + (if self.edge_infected == ``True``) attributes are not present + + All result statistics are printed to log. + """ + logging.info(f"###############################################################") + logging.info(f"Running network statistics...") + + all_nodes = self.S_GRAPH.nodes(data=True) # list of tuples of ('source_id', key, {attr_dict } ) + node_degrees = [self.S_GRAPH.degree(each_node) for each_node in self.S_GRAPH.nodes] + all_edges = self.S_GRAPH.edges(data=True, keys=True) # tuple list -> ('source', 'target', key, {attr_dict } ) + + # Overall network statistics + logging.info(f'--> Model Snapshot date: {self.snapshot_dt.strftime("%d.%m.%Y %H:%M:%S")}') + logging.info(f"--> Total {len(all_nodes)} nodes, out of which {node_degrees.count(0)} are isolated") + logging.info(f"--> Total {len(all_edges)} edges") + logging.info('------------------------------') + + # Extract specific node statistics + nbr_pat_nodes = len(['_' for node_data_tuple in all_nodes if node_data_tuple[1]['type'] == 'Patient']) + nbr_dev_nodes = len(['_' for node_data_tuple in all_nodes if node_data_tuple[1]['type'] == 'Device']) + nbr_emp_nodes = len(['_' for node_data_tuple in all_nodes if node_data_tuple[1]['type'] == 'Employee']) + nbr_room_nodes = len(['_' for node_data_tuple in all_nodes if node_data_tuple[1]['type'] == 'Room']) + accounted_for = nbr_pat_nodes + nbr_dev_nodes + nbr_emp_nodes + nbr_room_nodes + logging.info('Node overview:') + logging.info(f"--> {nbr_pat_nodes} Patient nodes") + logging.info(f"--> {nbr_dev_nodes} Device nodes") + logging.info(f"--> {nbr_emp_nodes} Employee nodes") + logging.info(f"--> {nbr_room_nodes} Room nodes") + logging.info(f"--> TOTAL: {accounted_for} nodes ({len(all_nodes) - accounted_for} out of " + f"{len(all_nodes)} nodes not accounted for)") + logging.info('------------------------------') + + # Extract specific edge statistics + type_count_dict = {} + + faulty_sourceordest_id = 0 # Counts edges for which the source or target id are wrongly formatted + nbr_missing_attr = 0 # Counts edges which are missing at least one of the attributes + nbr_ok = 0 # Counts edges passing all tests + + test_attribute_keys = ['type', 'from', 'to', 'origin'] if self.edges_infected == False \ + else ['type', 'from', 'to', 'origin', 'infected'] + + for each_edge in all_edges: + # Check source and target id + if each_edge[0] == '' or each_edge[1] == '': + faulty_sourceordest_id += 1 + continue + attr_keys = each_edge[3].keys() + if len([entry for entry in test_attribute_keys if entry not in attr_keys]) > 0: + # indicates at least one missing attribute + nbr_missing_attr += 1 + continue + # If everything is ok, update type_count_dict + nbr_ok += 1 + if each_edge[3]['type'] not in type_count_dict: + type_count_dict[each_edge[3]['type']] = 1 + else: + type_count_dict[each_edge[3]['type']] += 1 + type_count_keys = sorted(list(type_count_dict.keys())) + # Write all remaining results to log + logging.info(f"--> {faulty_sourceordest_id} edges with a faulty source or target id") + logging.info(f"--> {nbr_missing_attr} edges missing at least one attribute") + logging.info(f"--> {nbr_ok} edges ok:") + for each_key in type_count_keys: + logging.info(f" >> {type_count_dict[each_key]} edges of type {each_key}") + accounted_for = sum([value for value in type_count_dict.values()]) + logging.info(f"--> TOTAL: {accounted_for} edges ({len(all_edges) - accounted_for} out of {len(all_edges)} " + f"edges not accounted for)") + logging.info('------------------------------') + + # Number of positive patients in the network + nbr_pos_pat = len(['_' for node_data_tuple in all_nodes if node_data_tuple[1]['type'] == 'Patient' and + node_data_tuple[1]['vre_status'] == 'pos']) + logging.info(f"--> {nbr_pos_pat} VRE-positive Patients in the network") + logging.info('------------------------------') + + # Graph connectivity + logging.info(f"--> Graph onnected: {nx.is_connected(self.S_GRAPH)}") + logging.info(f"###############################################################") + + def add_network_data(self, patient_dict, subset='relevant_case', snapshot=datetime.datetime.now()): + """Adds nodes and edges data to the network. + + Nodes and edges are added based on the data in patient_dict according to the subset specified (see description + of parameters below). + + Args: + patient_dict (dict): Dictionary containing all data required to build the graph. Please refer to + "Patient_Data_Overview.dov" for details on this argument. + subset (str): Subset of data to be used, can be one of: + + - ``relevant_case`` :math:`\\longrightarrow` includes patients with a relevant case + (regardless of involvement in VRE screenings) and the data of relevant cases + - ``risk`` :math:`\\longrightarrow` includes patients with an associated risk (i.e. + at least one VRE screening) and data of relevant cases + + snapshot (dt.dt()): datetime.datetime() object specifying to which point in time data are to be + imported. Defaulting to the time of execution, this parameter can be used to create + a "snapshot" of the model, and will *ignore* (i.e. not add) edges in patient_dict + for which the 'to' attribute is larger than this parameter. Note that all nodes from + patient_dict will be added, but most "new" nodes will be created in isolation. And + since a call to this function is usually followed by a call to + *remove_isolated_nodes()*, these isolated nodes will then be stripped from the + network. + """ + logging.info(f"Filter set to: {subset}") + logging.info(f"Snapshot created at: {snapshot.strftime('%d.%m.%Y %H:%M:%S')}") + self.snapshot_dt = snapshot + ############################################################# + # --> Measures for the created network + ############################################################# + # ### General measures + nbr_pat_no_rel_case = 0 # Counts patients without... + nbr_pat_rel_case = 0 # ... and with a relevant case + nbr_room_no_id = 0 # Counts number of unidentifyable... + nbr_room_id = 0 # ... and identifyable rooms + nbr_app = 0 # number of appointments parsed + + # ### Edge measures + nbr_pat_emp = 0 # number of Patient-Employee edges + nbr_pat_room = 0 # number of Patient-Room edges + nbr_pat_device = 0 # number of Patient-Device edges + nbr_room_device = 0 # number of Room-Device edges + nbr_device_emp = 0 # number of Device-Employee edges + nbr_emp_room = 0 # number of Employee-Room edges + + for each_pat in patient_dict['patients'].values(): + # Apply subset filter here --> relevant_case + if subset == 'relevant_case': + pat_rel_case = each_pat.get_relevant_case() # Returns a Case() object or None + if pat_rel_case is None: + nbr_pat_no_rel_case += 1 + continue + nbr_pat_rel_case += 1 + this_pat_id = pat_rel_case.patient.patient_id + if this_pat_id == '': + logging.warning('Encountered empty patient ID !') + continue + # Add patient node + self.new_patient_node(str(this_pat_id), risk_dict=each_pat.risks) + ######################################### + # --> Step 1: Add rooms based on Move() objects to the network + ######################################### + for each_move in pat_rel_case.moves.values(): # iterate over all moves in a Patient's relevant case + zimmer = each_move.zimmr # will either be the room's name or None + this_ward = each_move.ward.name # will either be the ward's name or None + if zimmer is None: # --> If room is not identified, add it to the 'generic' Room node "Room_Unknown" + if "Room_Unknown" not in self.S_GRAPH.nodes: + self.new_room_node('Room_Unknown') + this_room = 'Room_Unknown' + nbr_room_no_id += 1 + else: # --> room is identified + this_room = each_move.zimmr + # Add room node - this will only overwrite attributes if node is already present + # --> does not matter since room_id and ward are the same + self.new_room_node(each_move.zimmr, ward = this_ward, room_id=each_move.room.get_ids() + if each_move.room is not None else None) + # .get_ids() will return a '@'-delimited list of [room_id]_[system] entries, or None + nbr_room_id += 1 + # Add Patient-Room edge if it's within scope of the current snapshot + edge_dict = {'from': each_move.bwi_dt, 'to': each_move.bwe_dt, + 'type': 'Patient-Room', 'origin': 'Move'} + if edge_dict['to'] < snapshot: + self.new_edge(str(this_pat_id), 'Patient', this_room, 'Room', att_dict=edge_dict) + ######################################### + # --> Step 2: Add Rooms, devices and employees based on the relevant Case().appointments + # This will add various edges in the network, since all Appointments contain information on the + # patient, employees, devices and rooms: + # --> Device-Patient + # --> Patient-Room + # --> Employee-Patient + # --> Device-Employee + # --> Employee-Room + # --> Device-Room + # (Remember: nodes in edge specifications are sorted alphabetically) + ######################################### + for each_app in pat_rel_case.appointments: + nbr_app += 1 + duration_from = each_app.termin_datum + duration_to = each_app.termin_datum + datetime.timedelta(hours=each_app.dauer_in_min / 60) + edge_attributes = {'from': duration_from, 'to': duration_to, 'origin': 'Appointment'} + # 'type' key will be added during the creation of edges, see below + device_list = [] + employee_list = [] + room_list = [] + #################################### + # --> ADD NODES + #################################### + # --> Add device nodes + for each_device in each_app.devices: + self.new_device_node(str(each_device.geraet_id), name=str(each_device.geraet_name)) + device_list.append(str(each_device.geraet_id)) + # --> Add employee nodes + for each_emp in each_app.employees: + self.new_employee_node(str(each_emp.mitarbeiter_id)) + employee_list.append(str(each_emp.mitarbeiter_id)) + # --> Add Room nodes + for each_room in each_app.rooms: + self.new_room_node(string_id=each_room.name, ward=each_room.ward.name if each_room.ward is not None else None, room_id=each_room.get_ids()) + room_list.append(each_room.name) + #################################### + # --> ADD EDGES based on specifications in self.edge_types + #################################### + # --> Device-Patient + if 'Device-Patient' in self.edge_types: + edge_attributes['type'] = 'Device-Patient' + for device in device_list: + if edge_attributes['to'] < snapshot: + self.new_edge(this_pat_id, 'Patient', device, 'Device', att_dict=edge_attributes) + nbr_pat_device += 1 + # --> Patient-Room + if 'Patient-Room' in self.edge_types: + edge_attributes['type'] = 'Patient-Room' + for room in room_list: + if edge_attributes['to'] < snapshot: + self.new_edge(this_pat_id, 'Patient', room, 'Room', att_dict=edge_attributes) + nbr_pat_room += 1 + # --> Employee-Patient + if 'Employee-Patient' in self.edge_types: + edge_attributes['type'] = 'Employee-Patient' + for emp in employee_list: + if edge_attributes['to'] < snapshot: + self.new_edge(this_pat_id, 'Patient', emp, 'Employee', att_dict=edge_attributes) + nbr_pat_emp += 1 + # --> Device-Employee + if 'Device-Employee' in self.edge_types: + edge_attributes['type'] = 'Device-Employee' + for emp in employee_list: + for device in device_list: + if edge_attributes['to'] < snapshot: + self.new_edge(emp, 'Employee', device, 'Device', att_dict=edge_attributes) + nbr_device_emp += 1 + # --> Employee-Room + if 'Employee-Room' in self.edge_types: + edge_attributes['type'] = 'Employee-Room' + for emp in employee_list: + for room in room_list: + if edge_attributes['to'] < snapshot: + self.new_edge(emp, 'Employee', room, 'Room', att_dict=edge_attributes) + nbr_emp_room += 1 + # --> Device-Room + if 'Device-Room' in self.edge_types: + edge_attributes['type'] = 'Device-Room' + for device in device_list: + for room in room_list: + if edge_attributes['to'] < snapshot: + self.new_edge(device, 'Device', room, 'Room', att_dict=edge_attributes) + nbr_room_device += 1 + ######################################### + logging.info(f"##################################################################################") + logging.info(f"Encountered {nbr_room_no_id} moves without associated room, {nbr_room_id} rooms identified.") + logging.info('------------------------------------------------------------------') + total_warnings = self.room_add_warnings + self.employee_add_warnings + self.device_add_warnings + \ + self.patient_add_warnings + if total_warnings > 0: + logging.warning(f'Encountered the following errors during the addition of nodes to the network:') + logging.warning(f'--> {self.room_add_warnings} errors during the addition of room nodes to the network') + logging.warning(f'--> {self.employee_add_warnings} errors during the addition of employee nodes to the ' + f'network') + logging.warning(f'--> {self.patient_add_warnings} errors during the addition of patient nodes to the ' + f'network') + logging.warning(f'--> {self.device_add_warnings} errors during the addition of device nodes to the network') + if self.edge_add_warnings > 0: + logging.info('------------------------------------------------------------------') + logging.warning(f'Encountered {self.edge_add_warnings} warnings while adding edges to the network.') + logging.info(f"##################################################################################") + # Log "global" statistics + self.inspect_network() + + ################################################################################################################ + # Data Export Functions + ################################################################################################################ + def export_patient_degree_ratio(self, csv_sep=';', export_path=None): + """Calculates and exports patient degree ratio for all nodes in the network. + + The patient degree ratio is defined for a single node_x as: + + number of infected edges between node_x and patients / number of total edges between node_x and patients + + Args: + csv_sep (str): Separator for created csv file. + export_path (str): Path to which export file will be written. If set to `None` (the default), the + exported file will be written to `self.data_dir`. + + The result file will be written to the path [self.data_dir]/[self.snapshot_dt]_pdr.txt, and contains the + following columns: + - Node ID + - Node type + - Degree ratio + - Number of infected edges (always patient-related) + - Total number of patient-related edges + - Total number of edges (i.e. degree of node_x) + """ + if self.edges_infected == False: + logging.error('This operation requires infection data on edges !') + return None + exact_path = self.data_dir if export_path is None else export_path + logging.info('Calculating patient degree ratio...') + write_count = 0 + with open(os.path.join(exact_path, self.snapshot_dt.strftime('%Y_%m_%d') + '_pdr.txt'), 'w') as outfile: + outfile.write(f"Node ID{csv_sep}Node Type{csv_sep}Degree Ratio{csv_sep}Number of Infected Edges{csv_sep}" + f"Total Patient Edges{csv_sep}Total Edges\n") + for each_node in self.S_GRAPH.nodes(data=True): + # each_node will be a tuple of length 2 --> ( 'node_id', {'att_1' : 'att_value1', ... } ) + this_node_edges = self.S_GRAPH.edges(each_node[0], data=True, keys=True) + # will return a list of tuples + # --> [ ( 'node_id', 'target_id1', key, {attr_dict} ), ('node_id', 'target_id2', key, {attr_dict} ),...] + pat_edges = [edge for edge in this_node_edges if 'Patient' in edge[3]['type']] + # indicates a type of "Patient-XXX" node + infected_pat_edges = [True if edge[3]['infected'] is True else False for edge in pat_edges] + + # Write to output file: + outfile.write(f"{each_node[0]}{csv_sep}{each_node[1]['type']}{csv_sep}" + f"{sum(infected_pat_edges)/len(infected_pat_edges)}{csv_sep}" + f"{len([entry for entry in infected_pat_edges if entry == True])}{csv_sep}" + f"{len(pat_edges)}{csv_sep}{len(this_node_edges)}\n") + write_count += 1 + # Write to log + logging.info(f"Successfully wrote patient degree ratios for {write_count} nodes to " + f"{os.path.join(exact_path, self.snapshot_dt.strftime('%Y_%m_%d') + '_pdr.txt')}") + + def export_total_degree_ratio(self, csv_sep=';', export_path=None): + """Exports total degree ratio (TDR) for all nodes in the network. + + Will calculate and export the total degree ratio for all nodes in the network, which is defined for a + single *node_x* as: + + :math:`TDR = \\frac{Number~of~infected~edges~between~node + \\_x~and~patients}{Total~number~of~edges~leading~to~node\\_x}` + + The result file will be written to a file [self.data_dir]/[self.snapshot_dt]_tdr.txt, and contains the + following columns: + + - Node ID + - Node type + - Degree ratio + - Number of infected edges (always patient-related) + - Total number of edges for node_x (also includes non-patient-related edges) + + Args: + csv_sep (str): Separator for created csv file. + export_path (str): Path to which node files will be written. If set to `None` (the default), the + exported file will be written to `self.data_dir`. + """ + exact_path = self.data_dir if export_path is None else export_path + logging.info('Calculating total degree ratio...') + write_count = 0 + with open(os.path.join(exact_path, self.snapshot_dt.strftime('%Y_%m_%d') + '_tdr.txt'), 'w') as outfile: + outfile.write(f"Node ID{csv_sep}Node Type{csv_sep}Degree Ratio{csv_sep}" + f"Number of Infected Edges{csv_sep}Total Edges\n") + for each_node in self.S_GRAPH.nodes(data=True): + # each_node will be a tuple of length 2 --> ( 'node_id', {'att_1' : 'att_value1', ... } ) + this_node_edges = self.S_GRAPH.edges(each_node[0], data=True, keys=True) + # will return a list of tuples + # --> [ ( 'node_id', 'target_id1', key, {attr_dict} ), ('node_id', 'target_id2', key, {attr_dict} ),...] + infected_edges = [True if edge[3]['infected'] == True else False for edge in this_node_edges] + # Write to output file: + outfile.write(f"{each_node[0]}{csv_sep}{each_node[1]['type']}{csv_sep}" + f"{sum(infected_edges)/len(this_node_edges)}{csv_sep}" + f"{len([entry for entry in infected_edges if entry == True])}{csv_sep}" + f"{len(this_node_edges)}\n") + write_count += 1 + # Write to log + logging.info(f"Successfully wrote total degree ratios for {write_count} nodes to " + f"{os.path.join(exact_path, self.snapshot_dt.strftime('%Y_%m_%d') + '_tdr.txt')}") + + def export_shortest_path_length_overview(self, focus_nodes=None, csv_sep=';', export_path=None): + """Exports an overview of shortest path lengths in the network to self.data_dir. + + Exports an overview of shortest path lengths of all nodes in the network (if *focus_nodes* is `None`, + default). If a list of node identifiers is provided in *focus_nodes* instead, only these nodes will be + considered for the data export. Data are exported to the self.data_dir directory. + + Note: + This overview only considers *one* possible shortest path between any given pair of nodes, not all paths. + + Args: + focus_nodes (list or None): List of nodes considered for distribution. + csv_sep (str): Separator used in export file. + export_path (str): Path to which node files will be written. If set to `None` (the default), the + exported file will be written to `self.data_dir`. + + """ + exact_path = self.data_dir if export_path is None else export_path + target_nodes = focus_nodes if focus_nodes is not None else self.S_GRAPH.nodes + node_combinations = list(itertools.combinations(target_nodes, 2)) + logging.info(f'Exporting shortest path length overview for {len(node_combinations)} node combinations...') + shortest_path_lengths = [] # list holding all encountered shortest path lengths + for count, combo_tuple in enumerate(node_combinations): + if count % 1000 == 0: + logging.info(f" <> Processed {count} combinations") + if nx.has_path(self.S_GRAPH, combo_tuple[0], combo_tuple[1]) is False: + continue # indicates a node pair in disconnected parts of network + shortest_path_lengths.append(len(nx.shortest_path(self.S_GRAPH, source=combo_tuple[0], + target=combo_tuple[1]))) + # Then export results to file + node_counts = Counter(shortest_path_lengths) + with open(os.path.join(exact_path, self.snapshot_dt.strftime('%Y_%m_%d') + '_plov.txt'), 'w') as outfile: + outfile.write(f"Path Length{csv_sep}Count\n") + for each_key in sorted(node_counts.keys()): + outfile.write(f"{each_key}{csv_sep}{node_counts[each_key]}\n") + # Write to log and exit + logging.info(f'Successfully exported shortest path length overview to' + f"{os.path.join(exact_path, self.snapshot_dt.strftime('%Y_%m_%d') + '_plov.txt')}") + + def export_node_betweenness(self, csv_sep=';', export_path=None): + """Exports node betweenness. + + This function will export node betweenness for all nodes from the network. This is done by exporting the sum + of all fractions of shortest paths a particular node is in, which is found in the attribute keys starting with + the "SP-" prefix (e.g. "SP-Node1-Node4"). These tuples contain 2 entries, the first being the fraction of + shortest path this node is found in (between the given pair of nodes), and the second one being the total + number of shortest paths. This function can only be executed once the function update_shortest_path_statistics() + has been called. Nodes without an attribute key starting with the "SP-" prefix will have node betweenness of 0. + + The exported file is written into self.data_dir and contains 3 columns: + + - Node ID + - Node Type + - Betweenness Score + + Args: + csv_sep (str): separator to be used in export file. + export_path (str): Path to which node files will be written. If set to `None` (the default), the + exported file will be written to `self.data_dir`. + """ + if self.shortest_path_stats is False: + logging.error(f"This function requires specific node attributes which can be added with the function " + f"update_shortest_path_statitics()") + raise self.NodeBetweennessException('Missing a required call to update_shortest_path_statitics()') + exact_path = self.data_dir if export_path is None else export_path + with open(os.path.join(exact_path, self.snapshot_dt.strftime('%Y_%m_%d') + '_nbc.txt'), 'w') as outfile: + outfile.write(csv_sep.join(['Node ID', 'Node Type', 'Betweenness_Score']) + '\n') + write_counter = 0 + for node_tuple in self.S_GRAPH.nodes(data=True): + # Returns tuples of length 2 --> (node_id, attribute_dict) + write_string = [node_tuple[0], self.identify_id(node_tuple[0])] + target_keys = [each_key for each_key in node_tuple[1].keys() if each_key.startswith('SP-')] + betweenness_score = sum([node_tuple[1][each_key][0] / node_tuple[1][each_key][1] + for each_key in target_keys]) + write_string.append(str(betweenness_score)) + # Then write to outfile + outfile.write(csv_sep.join(write_string) + '\n') + write_counter += 1 + # Log progress + logging.info(f"Successfully wrote betweenness scores for {write_counter} nodes to " + f"{os.path.join(exact_path, self.snapshot_dt.strftime('%Y_%m_%d') + '_nbc.txt')}") + + def write_node_files(self, attribute_subset=None, export_path=None): + """Writes a JSON representation of all nodes in the network to file. + + For each node in the network, this function will write a JSON representation containing a dictionary of + node_data_dict[keys], where keys are all entries in node_attributes if that key is actually found in the node + data dictionary. The file is named [node_id].json and written to the self.data_dir directory. + + This function is mainly used to "hard-store" values for highly ressource intensive calculations such as + betweenness centrality and avoid memory overflows. + + Note: + This function will also set the self.node_files_written flag to ``True``. + + Args: + attribute_subset (list): List of keys in a node's attribute_dict to be included in the written JSON + representation of the node. If set to `None` (the default), all attributes will + be included. + export_path (str): Path to which node files will be written. If set to `None` (the default), all + files will be written to `self.data_dir`. + """ + exact_path = self.data_dir if export_path is None else export_path + logging.info(f"Writing node files to {os.path.abspath(exact_path)}...") + write_count = 0 + for node_tuple in self.S_GRAPH.nodes(data=True): + # Returns tuples of length 2 --> (node_id, attribute_dict) + if write_count % 5000 == 0: + logging.info(f"Wrote {write_count} nodes to file.") + if attribute_subset is None: + write_keys = [key for key in node_tuple[1]] + else: + write_keys = [key for key in node_tuple[1] if key in attribute_subset] + write_dict = {node_attr: node_tuple[1][node_attr] for node_attr in write_keys} + json.dump(write_dict, open(os.path.join(exact_path, self.parse_filename(node_tuple[0]) + '.json'), 'w')) + write_count += 1 + # Write to log and exit + logging.info(f"Successfully wrote all nodes to file!") + self.node_files_written = True + + + + + + + + + + + # logging.info(f"##################################################################################") + # logging.info(f'Calculating betweenness centrality (based on {"all" if considered_nodes is None else "specific"} nodes)...') + # if considered_nodes is None: + # bw_cent = nx.betweenness_centrality(self.S_GRAPH, normalized=normalize) + # logging.info('Successfully calculated betweenness centrality for all nodes.') + # return bw_cent + # else: + # node_combinations = list(itertools.combinations(considered_nodes, 2)) # Returns a list of tuples containing all unique pairs of nodes in considered_nodes + # logging.info(f'--> Calculating betweenness centrality considering {len(considered_nodes)} nodes yielding {len(node_combinations)} combinations') + # node_dict = {each_node[0] : {'scores' : [], 'attributes' : each_node[1] } # dictionary mapping node identifiers to lists of tuples of the format (shortest_paths_through_node, number_of_shortest_paths) for each combination of considered_nodes + # for each_node in self.S_GRAPH.nodes(data=True) } # S_GRAPH.nodes(data=True) returns tuples of length 2 of ('node_id', attribute_dict) + # for index, combo_tuple in enumerate(node_combinations): + # if index % 1 == 0: + # logging.info(f" <> Processed {index} combinations") + # all_shortest_paths = list(nx.all_shortest_paths(self.S_GRAPH, source=combo_tuple[0], target=combo_tuple[1])) # Yields a list of list of shortest paths for that combination of nodes + # all_shortest_paths = [path_list[1:len(path_list) - 1] for path_list in all_shortest_paths] # Remove the source and target node in each list of shortest path, as these should not be counted + # involved_nodes = set([node for sublist in all_shortest_paths for node in sublist]) # includes all involved nodes + # for inv_node in involved_nodes: + # involved_list = [True if inv_node in sub_list else False for sub_list in all_shortest_paths] # list containing True if inv_node is involved in a particular list of all_shortest_paths, and False otherwise + # node_dict[inv_node]['scores'].append((sum(involved_list), len(involved_list))) + # ### Then combine all entries in node_dict to true scores + # score_dict = {each_key : sum([each_tuple[0]/each_tuple[1] for each_tuple in node_dict[each_key]['scores']]) for each_key in node_dict.keys()} + # if normalize == True: + # total_nodes = len(self.S_GRAPH.nodes) + # score_dict = {each_key : score_dict[each_key] * ( 2 / ((total_nodes-1) * (total_nodes-2)) ) for each_key in score_dict.keys()} + # logging.info('Successfully calculated betweenness centrality.') + # self.betweenness_centrality = score_dict + # ### Write results to file + # with open(os.path.join(self.data_dir, self.snapshot_dt.strftime('%Y_%m_%d') + '_node_bwn.txt'), 'w') as outfile: + # outfile.write(f'Node ID{csv_sep}Node Type{csv_sep}Betweenness_Score\n') + # for key in sorted(score_dict.keys()): + # outfile.write(f"{key}{csv_sep}{node_dict[key]['attributes']['type']}{csv_sep}{score_dict[key]}\n") + # logging.info(f"Successfully wrote betweenness scores for {len(score_dict.keys())} nodes to {os.path.join(self.data_dir, self.snapshot_dt.strftime('%Y_%m_%d') + '_node_bwn.txt')}") + + + + # def add_patient_contacts(self, patient_contacts, patient_dict): + # """ + # Adds patient-patient interactions in cases where 2 patients came into contact with each other during their relevant case. + # + # :param patient_contacts: List containing tuples of length 6 of either the format: (source_pat_id, dest_pat_id, start_overlap_dt, end_overlap_dt, room_name, "kontakt_raum") + # or the format: (source_pat_id, dest_pat_id, start_overlap_dt, end_overlap_dt, ward_name, "kontakt_org") + # :param patient_dict: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + # """ + # contact_count = 0 + # for contact in patient_contacts: # list of tuples --> can be directly added to the network + # if patient_dict[contact[0]].get_relevant_case() is not None and patient_dict[contact[1]].get_relevant_case() is not None: # make sure to include only patients with a relevant date ! + # self.S_GRAPH.add_node(str(contact[0]), type = 'Patient') + # self.S_GRAPH.add_node(str(contact[1]), type = 'Patient') + # self.S_GRAPH.add_edge(str(contact[0]), str(contact[1])) + # contact_count += 1 + # # Update log file + # logging.info(f'GRAPH: Created {contact_count} patient-patient contacts.') + + # def add_patient_room(self, patient_dict): + # """ + # Adds patient-room interactions, i.e. rooms occupied by a particular patient during his or her relevant case. + # + # :param patient_dict: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + # """ + # relcase_count = 0 + # patient_count = 0 + # room_count = 0 + # + # for patient in patient_dict.values(): + # patient_count += 1 + # pat_rel_case = patient.get_relevant_case() + # if pat_rel_case is not None: + # relcase_count += 1 + # for move in pat_rel_case.moves.values(): # the Case().moves attribute is a dictionary mapping the order of moves to Moves() objects + # if move.room is not None: + # self.S_GRAPH.add_node(str(patient.patient_id), type = 'Patient') + # self.S_GRAPH.add_node(str(move.room.name), type='Room') + # self.S_GRAPH.add_edge(str(patient.patient_id), str(move.room.name)) + # room_count += 1 + # # update logfile + # logging.info(f'GRAPH: Wrote {room_count} rooms (based on moves) for {relcase_count} patients with relevant cases out of {patient_count} patients.') + # + # def add_patient_employee(self, patient_dict): + # """ + # Add all contacts between an employee and a patient during the patient's relevant case. + # + # :param patient_dict: Dictionary mapping PATIENTID to Patient() objects, i.e. {"00001383264" : Patient(), "00001383310" : Patient(), ...} + # """ + # pat_count = 0 + # relcase_count = 0 + # contact_count = 0 + # + # for k, p in patient_dict.items(): + # pat_rel_case = p.get_relevant_case() # returns None if patient has no relevant case + # pat_count += 1 + # if pat_rel_case is not None: + # relcase_count += 1 + # for t in pat_rel_case.appointments: + # for e in t.employees: + # if e.mitarbeiter_id != "-1": # indicates an unknown mitarbeiter - these cases are ignored + # self.S_GRAPH.add_node(str(p.patient_id), type = 'Patient') + # self.S_GRAPH.add_node(str(e.mitarbeiter_id), type = 'Employee') + # self.S_GRAPH.add_edge(str(p.patient_id), str(e.mitarbeiter_id)) + # contact_count += 1 + # # Update log file + # logging.info(f'GRAPH: Created {contact_count} contacts in {relcase_count} relevant cases from {pat_count} patients.') + + ################################################################################################################ + ### Functions for retrieving graph statistics + ################################################################################################################ + + # def get_node_count(self, type_filter = None): + # """ + # Return the number of nodes in the graph, possibly also applying a filter to the type of nodes to be returned. + # + # :param type_filter: Filter for the "type" attribute of nodes. If None, all nodes in the graph will be returned. + # :return: Number of nodes matching the specified filter criteria. + # """ + # if filter is None: + # return self.S_GRAPH.number_of_nodes() + # else: + # return len([data for node, data in self.S_GRAPH.nodes(data=True) if data['type'] == type_filter]) # S_GRAPH.nodes() returns a list of dictionaries of all node attributes + # + # def get_edge_count(self, type_filter = None): + # """ + # Return the number of edges in the graph, possibly also applying a filter to the type of edge to be returned. + # + # :param type_filter: Filter for the "type" attribute of edges. If None, all edges in the graph will be returned. + # :return: Number of edges matching the specified filter criteria. + # """ + # if type is None: + # return self.S_GRAPH.number_of_edges() + # else: + # return len([data_tuple for data_tuple in self.S_GRAPH.edges(data=True) if data_tuple[2]['type'] == type_filter]) # S_GRAPH.edges() returns a list of tuples of length 3 in the format (source_node, dest_node, {'attribute1' : value1, ...}) + # + # def get_node_degree_centrality(self): + # """ + # Calculate the node degree centrality for each node of the graph. + # + # :return: List of tuples of length 2 --> (node_name, centrality) + # """ + # return nx.degree_centrality(self.S_GRAPH) + # + # def get_node_betweenness_centrality(self, normalized = False): + # """ + # Calculate the node betweenness centrality (raw or normalized) for each node of the graph. + # + # :param normalized: boolean indicating whether or not to return raw (False) or normalized (True) values + # :return: List of tuples of length 2 --> (node_name, centrality) + # """ + # return nx.betweenness_centrality(self.S_GRAPH, normalized=normalized) + # [END OF FILE} diff --git a/spitalhygiene/vre/src/main/python/vre/quality_control/__init__.py b/spitalhygiene/vre/src/main/python/vre/quality_control/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/spitalhygiene/vre/src/main/python/vre/quality_control/data_purification.py b/spitalhygiene/vre/src/main/python/vre/quality_control/data_purification.py new file mode 100644 index 0000000..1485081 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/quality_control/data_purification.py @@ -0,0 +1,64 @@ +import os + +################################################################################################################ +### Contains important functions for writing data used in the VRE model to file, so that the underlying data +### can be inspected and potentially purified manually +################################################################################################################ +### define functions for writing to various files + +this_filepath = os.path.dirname(os.path.realpath(__file__)) +csv_sep = ';' + +def write_geraet(geraet_tuple, filepath = os.path.join(this_filepath, 'Geraet.csv'), csv_sep = csv_sep): + """ + Will write the two entries in geraet_tuple (tuple of length 2) to filepath using csv_sep. + :param geraet: Tuple of length 2 + :param filepath: Path of the CSV file to write to in "a" mode + :param csv_sep: CSV separator to use in the file + """ + try: + with open(filepath, 'a') as writefile: + writefile.write(f"{geraet_tuple[0]}{csv_sep}{geraet_tuple[1]}\n") + except Exception as e: + print(f"Error: {e}") + +def write_patient_case(case, has_risk, filepath = os.path.join(this_filepath, 'Pat_Case.csv'), csv_sep = csv_sep): + """ + Will write the relevant case and associated patient ID (each case also has an associated patient ID) to filepath using csv_sep. + :param case: A Case() object + :param has_risk: Boolean indicating whether or not the patient had a risk during his or her relevant stay (i.e. was ever in contact with any VRE screening) + :param filepath: Path of the CSV file to write to in "a" mode + :param csv_sep: CSV separator to use in the file + """ + try: + with open(filepath, 'a') as writefile: + writefile.write(f"{case.patient_id}{csv_sep}{case.case_id}{csv_sep}{has_risk}\n") + except Exception as e: + print(f"Error: {e}") + +def write_employee(employee_id, filepath = os.path.join(this_filepath, 'Employee.csv')): + """ + Will write employee_id to filepath using csv_sep. + :param employee: string + :param filepath: Path of the CSV file to write to in "a" mode + """ + try: + with open(filepath, 'a') as writefile: + writefile.write(f"{employee_id}\n") + except Exception as e: + print(f"Error: {e}") + +def write_room(room, filepath = os.path.join(this_filepath, 'Room.csv') ): + """ + Will write room to filepath using csv_sep. + :param room: string + :param filepath: Path of the CSV file to write to in "a" mode + """ + try: + with open(filepath, 'a') as writefile: + writefile.write(f"{room}\n") + except Exception as e: + print(f"Error: {e}") + + + diff --git a/spitalhygiene/vre/src/main/python/vre/quality_control/distribution_export.py b/spitalhygiene/vre/src/main/python/vre/quality_control/distribution_export.py new file mode 100644 index 0000000..393e072 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/quality_control/distribution_export.py @@ -0,0 +1,133 @@ + + +################################################################################################################ +### Contains important functions for exporting count distributions (i.e. appearances) of various +### entities in the VRE model +################################################################################################################ + +def get_room_distribution(appointment_dict, file_path, write_mode = 'w', csv_sep = ';'): + """ + This function will count the occurrence of all Room() objects based on the data contained in all Appointments() of the VRE model. This information + is extracted from the Appointment().rooms attribute. The written file contains: + --> Room names + --> Occurrence counts + + :param appointment_dict: A dictionary mapping appointment.termin_id to corresponding Appointment() objects + :param file_path: Path for writing to file + :param write_mode: Write mode for file_path (default is 'w') + :param csv_sep: Separator for CSV files (default is ';') + """ + room_counts = {} # dictionary containing room counts + for apmnt in appointment_dict.values(): + for room_obj in apmnt.rooms: + if room_obj.name not in room_counts.keys(): + room_counts[room_obj.name] = 1 + else: # increment occurrence of room + room_counts[room_obj.name] += 1 + ### Then write results to file + room_names = sorted(list(room_counts.keys())) + with open(file_path, write_mode) as writefile: + writefile.write(f"Room_Name{csv_sep}Count\n") + for each_name in room_names: + writefile.write(f"{each_name}{csv_sep}{room_counts[each_name]}" + '\n') + print('\nSuccessfully wrote room counts to file !\n') + +def get_ward_distribution(case_dict, file_path, write_mode = 'w', csv_sep = ';'): + """ + This function will count the occurrence of all Ward() objects in the data. + This information is extracted from the Case().moves attribute, since each Move() object has an attribute Move().ward . The written file contains + --> Ward names + --> Occurrence counts + + :param case_dict: A dictionary mapping Case().case_id to corresponding Case() objects + :param file_path: Path for written file + :param write_mode: Write mode for file_path (default is 'w') + :param csv_sep: Separator for CSV files (default is ';') + """ + ward_count = {} + for each_case in case_dict.values(): + for each_move in each_case.moves.values(): + if each_move.ward is None: + continue + if each_move.ward.name not in ward_count.keys(): + ward_count[each_move.ward.name] = 1 + else: + ward_count[each_move.ward.name] += 1 + ### Write results to file + ward_names = sorted(list(ward_count.keys())) + with open(file_path, write_mode) as writefile: + writefile.write(f"Ward_Name{csv_sep}Count\n") + for each_name in ward_names: + writefile.write(f"{each_name}{csv_sep}{ward_count[each_name]}" + '\n') + print('\nSuccessfully wrote ward counts to file !\n') + +def collect_patient_info(pat_dict, file_path, write_mode = 'w', csv_sep = ';'): + """ + This function will collect important information on all patients in the VRE dataset. This information will be taken from various sources, most + importantly the Patient().cases attribute list. The information collected (and printed to file) includes: + --> Patient ID + --> Number of cases + --> Number of appointments + --> Total duration of all appointments + --> number of moves + --> Number of surgeries + --> Number of medications + + :param pat_dict: Dictionary mapping Patient().patient_id to corresponding Patient() objects + :param file_path: Path for written file + :param write_mode: Write mode for file_path (default is 'w') + :param csv_sep: Separator for CSV files (default is ';') + """ + pat_counts = {} # dictionary mapping patient ids to the various measures described in the docstring + for pat_tuple in pat_dict.items(): + if pat_tuple[0] not in pat_counts.keys(): + pat_counts[pat_tuple[0]] = {} + ### Extract number of cases + pat_counts[pat_tuple[0]]['Case Count'] = len(list(pat_tuple[1].cases)) + ### Extract other required metrics + apmnt_count = 0 + apmnt_duration = 0 + move_count = 0 + surgery_count = 0 + medication_count = 0 + for each_case in pat_tuple[1].cases: + apmnt_count += len(each_case.appointments) + for each_apmnt in each_case.appointments: + apmnt_duration += each_apmnt.dauer_in_min + move_count += len(list(each_case.moves.keys())) + surgery_count += len(each_case.surgeries) + medication_count += len(each_case.medications) + ### Add all extracted measures to pat_counts + pat_counts[pat_tuple[0]]['Apmnt Count'] = apmnt_count + pat_counts[pat_tuple[0]]['Apmnt Duration'] = apmnt_duration + pat_counts[pat_tuple[0]]['Move Count'] = move_count + pat_counts[pat_tuple[0]]['Surgery Count'] = surgery_count + pat_counts[pat_tuple[0]]['Medication Count'] = medication_count + ### Then write results to file + with open(file_path, write_mode) as write_file: + write_file.write(f"Patient_ID{csv_sep}Case_Count{csv_sep}Appointment_Count{csv_sep}Appointment_Duration{csv_sep}Move_Count{csv_sep}Surgery_Count{csv_sep}Medication_Count\n") + for each_tuple in pat_counts.items(): + data_list = [each_tuple[0]] + data_list.append(each_tuple[1]['Case Count']) + data_list.append(each_tuple[1]['Apmnt Count']) + data_list.append(each_tuple[1]['Apmnt Duration']) + data_list.append(each_tuple[1]['Move Count']) + data_list.append(each_tuple[1]['Surgery Count']) + data_list.append(each_tuple[1]['Medication Count']) + write_file.write(csv_sep.join(data_list) + '\n') + print('\nSuccessfully collected patient information !\n') + + + + + + + + + + + + + + + diff --git a/spitalhygiene/vre/src/main/python/vre/test.py b/spitalhygiene/vre/src/main/python/vre/test.py new file mode 100644 index 0000000..79b0fd6 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/test.py @@ -0,0 +1,18 @@ +from collections import Counter +# import networkx as nx +import datetime +import itertools +import random +import json + +# print(0 % 4) +# print(1 % 4) +# +# print(6 % 5) + + +# print(datetime.datetime.now() > datetime.date(2017, 1, 1)) + + + + diff --git a/spitalhygiene/vre/src/main/python/vre/test_data_loader.py b/spitalhygiene/vre/src/main/python/vre/test_data_loader.py new file mode 100644 index 0000000..3a68234 --- /dev/null +++ b/spitalhygiene/vre/src/main/python/vre/test_data_loader.py @@ -0,0 +1,22 @@ +# Small script to test the patient import defined in the HDFS_data_loader.py script + +from HDFS_data_loader import HDFS_data_loader +from feature_extractor import feature_extractor +from networkx_graph import surface_model, create_model_snapshots +import logging +import os +import datetime +import configparser +import calendar +import pathlib + +config_reader = configparser.ConfigParser() + +config_reader.read(pathlib.Path('BasicConfig.ini')) + +logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', level=logging.INFO, datefmt='%d.%m.%Y %H:%M:%S') + +loader = HDFS_data_loader(hdfs_pipe=False) # hdfs_pipe = False --> files will be loaded directly from CSV +patient_data = loader.patient_data() + +