Skip to content

Commit

Permalink
Merge pull request #42 from UMCUGenetics/release/v1.6.0
Browse files Browse the repository at this point in the history
Release/v1.6.0
  • Loading branch information
rernst authored Sep 2, 2022
2 parents b248ae4 + c87f975 commit f0ec206
Show file tree
Hide file tree
Showing 10 changed files with 385 additions and 138 deletions.
26 changes: 25 additions & 1 deletion clarity_epp.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@ def export_sample_indications(args):
)


def export_sample_related_mip(args):
"""Export related MIP samples"""
clarity_epp.export.sample.sample_related_mip(lims, args.process_id, args.output_file)


def export_tapestation(args):
"""Export samplesheets for Tapestation machine."""
clarity_epp.export.tapestation.samplesheet(lims, args.process_id, args.output_file)
Expand All @@ -130,6 +135,8 @@ def export_workflow(args):
"""Export workflow overview files."""
if args.type == 'magnis':
clarity_epp.export.workflow.helix_magnis(lims, args.process_id, args.output_file)
elif args.type == 'mip':
clarity_epp.export.workflow.helix_mip(lims, args.process_id, args.output_file)


# Upload Functions
Expand Down Expand Up @@ -174,6 +181,11 @@ def qc_qubit(args):
clarity_epp.qc.qubit.set_qc_flag(lims, args.process_id)


def qc_sample_mip(args):
"""Set mip data ready udf for wes samples from same person and test."""
clarity_epp.qc.sample.set_mip_data_ready(lims, args.process_id)


# Placement functions
def placement_automatic(args):
"""Copy container layout from previous step."""
Expand Down Expand Up @@ -304,6 +316,12 @@ def placement_complete_step(args):
)
parser_export_sample_indications.set_defaults(func=export_sample_indications)

parser_export_sample_related_mip = subparser_export.add_parser(
'sample_related_mip', help='Export related mip samples.', parents=[output_parser]
)
parser_export_sample_related_mip.add_argument('process_id', help='Clarity lims process id')
parser_export_sample_related_mip.set_defaults(func=export_sample_related_mip)

parser_export_tapestation = subparser_export.add_parser(
'tapestation', help='Create tapestation samplesheets', parents=[output_parser]
)
Expand All @@ -317,7 +335,7 @@ def placement_complete_step(args):
parser_export_workflow = subparser_export.add_parser(
'workflow', help='Export workflow result file', parents=[output_parser]
)
parser_export_workflow.add_argument('type', choices=['magnis'], help='Workflow type')
parser_export_workflow.add_argument('type', choices=['magnis', 'mip'], help='Workflow type')
parser_export_workflow.add_argument('process_id', help='Clarity lims process id')
parser_export_workflow.set_defaults(func=export_workflow)

Expand Down Expand Up @@ -361,6 +379,12 @@ def placement_complete_step(args):
parser_qc_qubit.add_argument('process_id', help='Clarity lims process id')
parser_qc_qubit.set_defaults(func=qc_qubit)

parser_qc_sample_mip = subparser_qc.add_parser(
'sample_mip', help='Set mip data ready udf for wes samples from same person and test.'
)
parser_qc_sample_mip.add_argument('process_id', help='Clarity lims process id')
parser_qc_sample_mip.set_defaults(func=qc_sample_mip)

# placement
parser_placement = subparser.add_parser('placement', help='Container placement functions')
subparser_placement = parser_placement.add_subparsers()
Expand Down
17 changes: 11 additions & 6 deletions clarity_epp/export/caliper.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,13 @@ def samplesheet_dilute(lims, process_id, output_file):
nM_pool = process.udf['Dx Pool verdunning (nM)']
output_ul = process.udf['Eindvolume (ul)']

for input_artifact in process.all_inputs():
output_artifact = process.outputs_per_input(input_artifact.id, Analyte=True)[0]
# Get input and output plate id from 1 sample, input plate is the same for all samples.
input_artifacts = process.all_inputs()
plate_id_artifact = input_artifacts[0]
plate_id_input = plate_id_artifact.location[0].name
plate_id_output = process.outputs_per_input(plate_id_artifact.id, Analyte=True)[0].location[0].name

for input_artifact in input_artifacts:
# Get QC stats
size = float(input_artifact.udf['Dx Fragmentlengte (bp)'])
concentration = float(input_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])
Expand All @@ -181,9 +185,9 @@ def samplesheet_dilute(lims, process_id, output_file):
well = ''.join(input_artifact.location[1].split(':'))
output[well] = '{name}\t{plate_id_input}\t{well}\t{plate_id_output}\t{volume_dna:.1f}\t{volume_water:.1f}\n'.format(
name=input_artifact.name,
plate_id_input=input_artifact.location[0].name,
plate_id_input=plate_id_input,
well=well,
plate_id_output=output_artifact.location[0].name,
plate_id_output=plate_id_output,
volume_dna=ul_sample,
volume_water=ul_water
)
Expand All @@ -196,7 +200,8 @@ def samplesheet_dilute(lims, process_id, output_file):
if well in output:
output_file.write(output[well])
else:
output_file.write('Leeg\tNone\t{well}\t{plate_id_output}\t0\t0\n'.format(
output_file.write('Leeg\t{plate_id_input}\t{well}\t{plate_id_output}\t0\t0\n'.format(
plate_id_input=plate_id_input,
well=well,
plate_id_output=output_artifact.location[0].name,
plate_id_output=plate_id_output,
))
97 changes: 71 additions & 26 deletions clarity_epp/export/manual_pipetting.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,12 @@ def samplesheet_multiplex_library_pool(lims, process_id, output_file):
sample_given_ul.udf['Dx Familie status']
)

output.udf['Dx input pool (ng)'] = round(ng_sample[output.samples[0].name] + ng_sample[output.samples[1].name] + ng_sample[output.samples[2].name], 2)
output.udf['Dx input pool (ng)'] = round(
ng_sample[output.samples[0].name] +
ng_sample[output.samples[1].name] +
ng_sample[output.samples[2].name],
2
)
output.put()

else:
Expand Down Expand Up @@ -345,9 +350,10 @@ def samplesheet_multiplex_sequence_pool(lims, process_id, output_file):
def samplesheet_normalization(lims, process_id, output_file):
"""Create manual pipetting samplesheet for normalizing (MIP) samples."""
output_file.write(
'Sample\tConcentration (ng/ul)\tVolume sample (ul)\tVolume water (ul)\tOutput (ng)\tIndampen\n'
'Sample\tConcentration (ng/ul)\tVolume sample (ul)\tVolume water (ul)\tOutput (ng)\tIndampen\tContainer\tWell\n'
)
process = Process(lims, id=process_id)
output = {}

# Find all QC process types
qc_process_types = clarity_epp.export.utils.get_process_types(lims, ['Dx Qubit QC', 'Dx Tecan Spark 10M QC'])
Expand Down Expand Up @@ -382,14 +388,24 @@ def samplesheet_normalization(lims, process_id, output_file):
evaporate = 'J'
water_volume = 0

output_file.write('{sample}\t{concentration:.1f}\t{sample_volume:.1f}\t{water_volume:.1f}\t{output:.1f}\t{evaporate}\n'.format(
# Save output under container location (well)
well = ''.join(artifact.location[1].split(':'))
output[well] = (
'{sample}\t{concentration:.1f}\t{sample_volume:.1f}\t{water_volume:.1f}\t'
'{output:.1f}\t{evaporate}\t{container}\t{well}\n'
).format(
sample=sample.name,
concentration=concentration,
sample_volume=sample_volume,
water_volume=water_volume,
output=input_ng,
evaporate=evaporate
))
evaporate=evaporate,
container=artifact.location[0].name,
well=well
)

for well in clarity_epp.export.utils.sort_96_well_plate(output.keys()):
output_file.write(output[well])


def samplesheet_capture(lims, process_id, output_file):
Expand Down Expand Up @@ -502,16 +518,16 @@ def samplesheet_mip_multiplex_pool(lims, process_id, output_file):
))

for input_artifact in process.all_inputs(resolve=True):
concentration = None
# Find last qc process for artifact
qc_process = sorted(
lims.get_processes(type=qc_process_types, inputartifactlimsid=input_artifact.id),
key=lambda process: int(process.id.split('-')[-1])
)[-1]
qc_processes = lims.get_processes(type=qc_process_types, inputartifactlimsid=input_artifact.id)

# Find concentration measurement
for qc_artifact in qc_process.outputs_per_input(input_artifact.id):
if qc_artifact.name == input_artifact.name:
concentration = float(qc_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])
if qc_processes:
qc_process = sorted(qc_processes, key=lambda process: int(process.id.split('-')[-1]))[-1]
# Find concentration measurement
for qc_artifact in qc_process.outputs_per_input(input_artifact.id):
if qc_artifact.name == input_artifact.name:
concentration = float(qc_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])

input_artifacts.append({
'name': input_artifact.name,
Expand All @@ -521,13 +537,21 @@ def samplesheet_mip_multiplex_pool(lims, process_id, output_file):
'manual': input_artifact.samples[0].udf['Dx Handmatig']
})

# Calculate avg concentration for all non manual samples
concentrations = [input_artifact['concentration'] for input_artifact in input_artifacts if not input_artifact['manual']]
# Calculate avg concentration for all non manual samples with a measured concentration
concentrations = [
input_artifact['concentration'] for input_artifact in input_artifacts
if input_artifact['concentration'] and not input_artifact['manual']
]
avg_concentration = sum(concentrations) / len(concentrations)

# Set volume and store input_artifact per plate to be able print samplesheet sorted on plate and well
input_containers = {}
for input_artifact in input_artifacts:
# Set avg concentration as concentration for artifacts without a measured concentration
if not input_artifact['concentration']:
input_artifact['concentration'] = avg_concentration

# Set volumes
if input_artifact['concentration'] < avg_concentration * 0.5:
input_artifact['volume'] = 20
elif input_artifact['concentration'] > avg_concentration * 1.5:
Expand All @@ -544,7 +568,7 @@ def samplesheet_mip_multiplex_pool(lims, process_id, output_file):
input_artifacts = input_containers[input_container]
for well in clarity_epp.export.utils.sort_96_well_plate(input_artifacts.keys()):
input_artifact = input_artifacts[well]
output_file.write('{sample}\t{volume}\t{plate_id}\t{well_id}\t{concentration}\t{manual}\n'.format(
output_file.write('{sample}\t{volume}\t{plate_id}\t{well_id}\t{concentration:.3f}\t{manual}\n'.format(
sample=input_artifact['name'],
volume=input_artifact['volume'],
plate_id=input_artifact['plate_id'],
Expand All @@ -559,7 +583,9 @@ def samplesheet_mip_pool_dilution(lims, process_id, output_file):
process = Process(lims, id=process_id)

# Write header
output_file.write('{sample}\t{ul_sample_10}\t{ul_EB_10}\t{ul_sample_20}\t{ul_EB_20}\t{ul_sample_40}\t{ul_EB_40}\t\n'.format(
output_file.write((
'{sample}\t{ul_sample_10}\t{ul_EB_10}\t{ul_sample_20}\t{ul_EB_20}\t{ul_sample_40}\t{ul_EB_40}\t\n'
).format(
sample='Sample',
ul_sample_10='ul Sample (10 ul)',
ul_EB_10='ul EB buffer (10 ul)',
Expand All @@ -573,11 +599,14 @@ def samplesheet_mip_pool_dilution(lims, process_id, output_file):
concentration = float(input_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])
fragment_length = float(input_artifact.udf['Dx Fragmentlengte (bp)'])

dna = (concentration * (10.0**3.0 / 1.0) * (1.0 / 649.0) * (1.0 / fragment_length) ) * 1000.0
dna = (concentration * (10.0**3.0 / 1.0) * (1.0 / 649.0) * (1.0 / fragment_length)) * 1000.0
ul_sample = 2 / dna * 10
ul_EB = 10 - ul_sample

output_file.write('{sample}\t{ul_sample_10:.2f}\t{ul_EB_10:.2f}\t{ul_sample_20:.2f}\t{ul_EB_20:.2f}\t{ul_sample_40:.2f}\t{ul_EB_40:.2f}\t\n'.format(
output_file.write((
'{sample}\t{ul_sample_10:.2f}\t{ul_EB_10:.2f}\t{ul_sample_20:.2f}\t{ul_EB_20:.2f}\t'
'{ul_sample_40:.2f}\t{ul_EB_40:.2f}\t\n'
).format(
sample=input_artifact.name,
ul_sample_10=ul_sample,
ul_EB_10=ul_EB,
Expand All @@ -593,7 +622,7 @@ def samplesheet_pool_samples(lims, process_id, output_file):
process = Process(lims, id=process_id)

# print header
output_file.write('Sample\tContainer\tWell\tPool\n')
output_file.write('Sample\tContainer\tWell\tPool\tVolume (ul)\n')

# Get all input artifact and store per container
input_containers = {}
Expand All @@ -611,12 +640,21 @@ def samplesheet_pool_samples(lims, process_id, output_file):
for input_container in sorted(input_containers.keys()):
input_artifacts = input_containers[input_container]
for well in clarity_epp.export.utils.sort_96_well_plate(input_artifacts.keys()):
input_artifact = input_artifacts[well]
input_sample = input_artifact.samples[0] # Asume one sample

if 'Dx Exoomequivalent' in input_sample.udf:
volume = 5 * input_sample.udf['Dx Exoomequivalent']
else:
volume = 5

output_file.write(
'{sample}\t{container}\t{well}\t{pool}\n'.format(
sample=input_artifacts[well].name,
container=input_artifacts[well].location[0].name,
'{sample}\t{container}\t{well}\t{pool}\t{volume}\n'.format(
sample=input_artifact.name,
container=input_artifact.location[0].name,
well=well,
pool=process.outputs_per_input(input_artifacts[well].id, Analyte=True)[0].name
pool=process.outputs_per_input(input_artifact.id, Analyte=True)[0].name,
volume=volume
)
)

Expand All @@ -630,11 +668,18 @@ def samplesheet_pool_magnis_pools(lims, process_id, output_file):

# Get input pools, sort by name and print volume
for input_artifact in sorted(process.all_inputs(resolve=True), key=lambda artifact: artifact.name):
sample_count = 0
for sample in input_artifact.samples:
if 'Dx Exoomequivalent' in sample.udf:
sample_count += sample.udf['Dx Exoomequivalent']
else:
sample_count += 1

output_file.write(
'{pool}\t{container}\t{sample_count}\t{volume}\n'.format(
pool=input_artifact.name,
container=input_artifact.container.name,
sample_count=len(input_artifact.samples),
volume=len(input_artifact.samples) * 1.25
sample_count=sample_count,
volume=sample_count * 1.25
)
)
38 changes: 33 additions & 5 deletions clarity_epp/export/sample.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
"""Sample export functions."""
import datetime

from genologics.entities import Process

import clarity_epp.export.utils
from .. import get_sequence_name


def removed_samples(lims, output_file):
Expand Down Expand Up @@ -144,11 +147,36 @@ def sample_indications(lims, output_file, artifact_name=None, sequencing_run=Non
if samples:
output_file.write('Sample\tIndication\n')
for sample_name, sample in samples.items():
output_file.write(
'{sample}\t{indication}\n'.format(
sample=sample_name,
indication=sample.udf['Dx Onderzoeksindicatie'].split(';')[0] # select newest indication
if 'Dx Onderzoeksindicatie' in sample.udf:
output_file.write(
'{sample}\t{indication}\n'.format(
sample=sample_name,
indication=sample.udf['Dx Onderzoeksindicatie'].split(';')[0] # select newest indication
)
)
else:
output_file.write(
'{sample}\t{indication}\n'.format(
sample=sample_name,
indication='unkown_indication'
)
)
)
else:
print("no_sample_found")


def sample_related_mip(lims, process_id, output_file):
"""Export related mip samples for all samples in process."""
process = Process(lims, id=process_id)

# Create output item per artifact
output = []
for artifact in process.all_inputs():
output.append('{sample},{related_mip}'.format(
sample=artifact.name,
related_mip=artifact.samples[0].udf['Dx mip']
))

# Print output items, last line only contains a line end to work with fingerprintDB.
output_file.write(',\n'.join(output))
output_file.write('\n')
Loading

0 comments on commit f0ec206

Please sign in to comment.