Skip to content

Commit

Permalink
Merge pull request #50 from UMCUGenetics/release/v1.7.0
Browse files Browse the repository at this point in the history
Release/v1.7.0
  • Loading branch information
rernst authored Mar 29, 2023
2 parents 5e187f6 + 8e84f33 commit 3be07da
Show file tree
Hide file tree
Showing 21 changed files with 401 additions and 119 deletions.
44 changes: 44 additions & 0 deletions .github/workflows/python.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions

name: Python (flake8, pytest)

on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]

jobs:
build:

runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
python-version: [3.6]

steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: "Install Apache package"
run: sudo apt install -y apache2-dev
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest
18 changes: 16 additions & 2 deletions clarity_epp.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def export_tapestation(args):

def export_tecan(args):
"""Export samplesheets for tecan machine."""
clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.output_file)
clarity_epp.export.tecan.samplesheet(lims, args.process_id, args.type, args.output_file)


def export_workflow(args):
Expand All @@ -147,7 +147,10 @@ def upload_samples(args):

def upload_tecan_results(args):
"""Upload tecan results."""
clarity_epp.upload.tecan.results(lims, args.process_id)
if args.type == 'qc':
clarity_epp.upload.tecan.results_qc(lims, args.process_id)
elif args.type == 'purify_normalise':
clarity_epp.upload.tecan.results_purify_normalise(lims, args.process_id)


def upload_tapestation_results(args):
Expand Down Expand Up @@ -221,6 +224,11 @@ def placement_complete_step(args):
clarity_epp.placement.step.finish_protocol_complete(lims, args.process_id)


def placement_tecan(args):
"""Placement tecan process, distribute artifacts over two containers"""
clarity_epp.placement.tecan.place_artifacts(lims, args.process_id)


if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers()
Expand Down Expand Up @@ -330,6 +338,7 @@ def placement_complete_step(args):

parser_export_tecan = subparser_export.add_parser('tecan', help='Create tecan samplesheets', parents=[output_parser])
parser_export_tecan.add_argument('process_id', help='Clarity lims process id')
parser_export_tecan.add_argument('type', choices=['qc', 'purify_normalise'], help='Samplesheet type')
parser_export_tecan.set_defaults(func=export_tecan)

parser_export_workflow = subparser_export.add_parser(
Expand Down Expand Up @@ -357,6 +366,7 @@ def placement_complete_step(args):

parser_upload_tecan = subparser_upload.add_parser('tecan', help='Upload tecan results')
parser_upload_tecan.add_argument('process_id', help='Clarity lims process id')
parser_upload_tecan.add_argument('type', choices=['qc', 'purify_normalise'], help='Tecan process type')
parser_upload_tecan.set_defaults(func=upload_tecan_results)

parser_upload_magnis = subparser_upload.add_parser('magnis', help='Upload magnis results')
Expand Down Expand Up @@ -418,5 +428,9 @@ def placement_complete_step(args):
parser_placement_unpooling.add_argument('process_id', help='Clarity lims process id')
parser_placement_unpooling.set_defaults(func=placement_unpooling)

parser_placement_tecan = subparser_placement.add_parser('tecan', help='Placement of samples in tecan step')
parser_placement_tecan.add_argument('process_id', help='Clarity lims process id')
parser_placement_tecan.set_defaults(func=placement_tecan)

args = parser.parse_args()
args.func(args)
13 changes: 9 additions & 4 deletions clarity_epp/export/email.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,12 @@ def sequencing_run(lims, email_settings, process_id):

if process.step.actions.escalation:
message += "\nManager Review LIMS:\n"
message += "{0}: {1}\n".format(process.step.actions.escalation['author'].name, process.step.actions.escalation['request'])
message += "{0}: {1}\n".format(process.step.actions.escalation['reviewer'].name, process.step.actions.escalation['answer'])

send_email(email_settings['server'], email_settings['from'], email_settings['to_sequencing_run_complete'], subject, message)
message += "{0}: {1}\n".format(
process.step.actions.escalation['author'].name,
process.step.actions.escalation['request']
)

send_email(
email_settings['server'], email_settings['from'], email_settings['to_sequencing_run_complete'],
subject, message
)
13 changes: 8 additions & 5 deletions clarity_epp/export/labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,13 @@ def container_sample(lims, process_id, output_file, description=''):
def storage_location(lims, process_id, output_file):
"""Generate storage location label file."""
process = Process(lims, id=process_id)

# Write header
output_file.write('Bakje\tpos\n')

for artifact in process.analytes()[0]:
storage_location = artifact.samples[0].udf['Dx Opslaglocatie']
output_file.write('{sample}\t{storage_location}\t{birth_date}\n'.format(
sample=artifact.samples[0].name,
storage_location=storage_location,
birth_date=artifact.samples[0].udf['Dx Geboortejaar']
storage_location = artifact.samples[0].udf['Dx Opslaglocatie'].split()
output_file.write('{tray}\t{pos}\n'.format(
tray=storage_location[0][2:6], # Select 4 digits from: CB[1-9][1-9][1-9][1-9]KK
pos=storage_location[1]
))
66 changes: 46 additions & 20 deletions clarity_epp/export/manual_pipetting.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,24 +10,38 @@ def samplesheet_purify(lims, process_id, output_file):
"""Create manual pipetting samplesheet for purifying samples."""
output_file.write('Fractienummer\tConcentration(ng/ul)\taantal ng te isoleren\tul gDNA\tul Water\n')
process = Process(lims, id=process_id)
# Find all QC process types
qc_process_types = clarity_epp.export.utils.get_process_types(lims, ['Dx Qubit QC', 'Dx Tecan Spark 10M QC'])

for container in process.output_containers():
artifact = container.placements['1:1'] # asume tubes
input_artifact = artifact.input_artifact_list()[0] # asume one input artifact
sample = artifact.samples[0] # asume one sample per tube

# Find last qc process for artifact
qc_process = lims.get_processes(type=qc_process_types, inputartifactlimsid=input_artifact.id)
if qc_process:
qc_process = sorted(
lims.get_processes(type=qc_process_types, inputartifactlimsid=input_artifact.id),
key=lambda process: int(process.id.split('-')[-1])
)[-1]
for qc_artifact in qc_process.outputs_per_input(input_artifact.id):
if qc_artifact.name.split(' ')[0] == artifact.name:
concentration = float(qc_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])

else: # Fallback on previous process if qc process not found.
if 'Dx Concentratie fluorescentie (ng/ul)' in input_artifact.udf:
concentration = float(input_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])
elif 'Dx Concentratie OD (ng/ul)' in input_artifact.udf:
concentration = float(input_artifact.udf['Dx Concentratie OD (ng/ul)'])
elif 'Dx Concentratie (ng/ul)' in sample.udf:
concentration = float(sample.udf['Dx Concentratie (ng/ul)'])

if 'Dx Fractienummer' in sample.udf:
fractienummer = sample.udf['Dx Fractienummer']
else: # giab
fractienummer = sample.name

if 'Dx Concentratie fluorescentie (ng/ul)' in input_artifact.udf:
concentration = float(input_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])
elif 'Dx Concentratie OD (ng/ul)' in input_artifact.udf:
concentration = float(input_artifact.udf['Dx Concentratie OD (ng/ul)'])
elif 'Dx Concentratie (ng/ul)' in sample.udf:
concentration = float(sample.udf['Dx Concentratie (ng/ul)'])

input_gdna_ng = float(artifact.udf['Dx input hoeveelheid (ng)'])
ul_gdna = input_gdna_ng/concentration
ul_water = 200 - ul_gdna
Expand Down Expand Up @@ -311,6 +325,7 @@ def samplesheet_multiplex_sequence_pool(lims, process_id, output_file):
input_pools = []
total_sample_count = 0
total_load_uL = 0
final_volume = float(process.udf['Final volume'].split()[0])

for input_pool in process.all_inputs():
input_pool_conc = float(input_pool.udf['Dx Concentratie fluorescentie (ng/ul)'])
Expand Down Expand Up @@ -339,11 +354,11 @@ def samplesheet_multiplex_sequence_pool(lims, process_id, output_file):
# Last calcuations and print sample
for input_pool in input_pools:
input_pool_load_pM = (float(process.udf['Dx Laadconcentratie (pM)'])/total_sample_count) * input_pool['sample_count']
input_pool_load_uL = 150.0 / (input_pool['pM']/input_pool_load_pM)
input_pool_load_uL = final_volume / (input_pool['pM']/input_pool_load_pM)
total_load_uL += input_pool_load_uL
output_file.write('{0}\t{1:.2f}\n'.format(input_pool['name'], input_pool_load_uL))

tris_HCL_uL = 150 - total_load_uL
tris_HCL_uL = final_volume - total_load_uL
output_file.write('{0}\t{1:.2f}\n'.format('Tris-HCL', tris_HCL_uL))


Expand All @@ -363,20 +378,27 @@ def samplesheet_normalization(lims, process_id, output_file):
sample = input_artifact.samples[0] # asume one sample per input artifact

# Find last qc process for artifact
qc_process = sorted(
lims.get_processes(type=qc_process_types, inputartifactlimsid=input_artifact.id),
key=lambda process: int(process.id.split('-')[-1])
)[-1]
qc_process = lims.get_processes(type=qc_process_types, inputartifactlimsid=input_artifact.id)
if qc_process:
qc_process = sorted(
lims.get_processes(type=qc_process_types, inputartifactlimsid=input_artifact.id),
key=lambda process: int(process.id.split('-')[-1])
)[-1]
qc_artifacts = qc_process.outputs_per_input(input_artifact.id)
else: # Fallback on previous process if qc process not found.
qc_process = input_artifact.parent_process
qc_artifacts = qc_process.all_outputs()

# Find concentration measurement
for qc_artifact in qc_process.outputs_per_input(input_artifact.id):
for qc_artifact in qc_artifacts:
if qc_artifact.name.split(' ')[0] == artifact.name:
concentration = float(qc_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])

final_volume = float(artifact.udf['Dx Eindvolume (ul)'])
input_ng = float(artifact.udf['Dx Input (ng)'])
if 'Dx pipetteervolume (ul)' in artifact.udf:
input_ng = concentration * float(artifact.udf['Dx pipetteervolume (ul)'])

sample_volume = input_ng / concentration
water_volume = final_volume - sample_volume
evaporate = 'N'
Expand All @@ -390,7 +412,7 @@ def samplesheet_normalization(lims, process_id, output_file):

# Save output under container location (well)
well = ''.join(artifact.location[1].split(':'))
output[well] = (
output_data = (
'{sample}\t{concentration:.1f}\t{sample_volume:.1f}\t{water_volume:.1f}\t'
'{output:.1f}\t{evaporate}\t{container}\t{well}\n'
).format(
Expand All @@ -403,6 +425,10 @@ def samplesheet_normalization(lims, process_id, output_file):
container=artifact.location[0].name,
well=well
)
if well == '11': # Tube
output_file.write(output_data)
else: # plate
output[well] = output_data

for well in clarity_epp.export.utils.sort_96_well_plate(output.keys()):
output_file.write(output[well])
Expand Down Expand Up @@ -455,11 +481,11 @@ def sammplesheet_exonuclease(lims, process_id, output_file):

# Caculate for sample count
for i, item in enumerate(data):
data[i].append(sample_count * item[1] * 1.25)
data[i].append(sample_count * item[1] * 1.30)

# Calculate total
data.append([
'TOTAL (incl. 25% overmaat)',
'TOTAL (incl. 30% overmaat)',
sum([item[1] for item in data]),
sum([item[2] for item in data]),
])
Expand Down Expand Up @@ -555,9 +581,9 @@ def samplesheet_mip_multiplex_pool(lims, process_id, output_file):
if input_artifact['concentration'] < avg_concentration * 0.5:
input_artifact['volume'] = 20
elif input_artifact['concentration'] > avg_concentration * 1.5:
input_artifact['volume'] = 2
input_artifact['volume'] = 1
else:
input_artifact['volume'] = 5
input_artifact['volume'] = 2

if input_artifact['plate_id'] not in input_containers:
input_containers[input_artifact['plate_id']] = {}
Expand Down Expand Up @@ -667,7 +693,7 @@ def samplesheet_pool_magnis_pools(lims, process_id, output_file):
output_file.write('Pool\tContainer\tSample count\tVolume (ul)\n')

# Get input pools, sort by name and print volume
for input_artifact in sorted(process.all_inputs(resolve=True), key=lambda artifact: artifact.name):
for input_artifact in sorted(process.all_inputs(resolve=True), key=lambda artifact: artifact.id):
sample_count = 0
for sample in input_artifact.samples:
if 'Dx Exoomequivalent' in sample.udf:
Expand Down
40 changes: 28 additions & 12 deletions clarity_epp/export/tecan.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,21 +5,37 @@
import clarity_epp.export.utils


def samplesheet(lims, process_id, output_file):
def samplesheet(lims, process_id, type, output_file):
"""Create Tecan samplesheet."""
output_file.write('Position\tSample\n')
process = Process(lims, id=process_id)
well_plate = {}

for placement, artifact in process.output_containers()[0].placements.items():
placement = ''.join(placement.split(':'))
if len(artifact.samples) == 1: # Remove 'meet_id' from artifact name if artifact is not a pool
well_plate[placement] = artifact.name.split('_')[0]
else:
well_plate[placement] = artifact.name

for well in clarity_epp.export.utils.sort_96_well_plate(well_plate.keys()):
output_file.write('{well}\t{sample}\n'.format(
well=well,
sample=well_plate[well]
))
well_plate[placement] = artifact

if type == 'qc':
output_file.write('Position\tSample\n')
for well in clarity_epp.export.utils.sort_96_well_plate(well_plate.keys()):
# Set correct artifact name
artifact = well_plate[well]
if len(artifact.samples) == 1:
artifact_name = artifact.name.split('_')[0]
else:
artifact_name = artifact.name

output_file.write('{well}\t{artifact}\n'.format(
well=well,
artifact=artifact_name
))

elif type == 'purify_normalise':
output_file.write('SourceTubeID;PositionID;PositionIndex\n')
for well in clarity_epp.export.utils.sort_96_well_plate(well_plate.keys()):
artifact = well_plate[well]
sample = artifact.samples[0] # assume one sample per tube
output_file.write('{sample};{well};{index}\n'.format(
sample=sample.udf['Dx Fractienummer'],
well=well,
index=clarity_epp.export.utils.get_well_index(well, one_based=True)
))
Loading

0 comments on commit 3be07da

Please sign in to comment.