Skip to content

Commit

Permalink
Merge pull request #66 from UMCUGenetics/release/v1.8.0
Browse files Browse the repository at this point in the history
Release/v1.8.0
  • Loading branch information
rernst authored Oct 3, 2023
2 parents 1b123ab + 0bdb960 commit 00642e7
Show file tree
Hide file tree
Showing 22 changed files with 972 additions and 257 deletions.
54 changes: 45 additions & 9 deletions clarity_epp.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@ def export_hamilton(args):

def export_illumina(args):
"""Export (updated) illumina samplesheet."""
clarity_epp.export.illumina.update_samplesheet(lims, args.process_id, args.artifact_id, args.output_file)
clarity_epp.export.illumina.update_samplesheet(
lims, args.process_id, args.artifact_id, args.output_file, args.conversion_tool
)


def export_labels(args):
Expand All @@ -59,6 +61,8 @@ def export_labels(args):
clarity_epp.export.labels.container_sample(lims, args.process_id, args.output_file, args.description)
elif args.type == 'storage_location':
clarity_epp.export.labels.storage_location(lims, args.process_id, args.output_file)
elif args.type == 'nunc_mix_sample':
clarity_epp.export.labels.nunc_mix_sample(lims, args.process_id, args.output_file)


def export_magnis(args):
Expand Down Expand Up @@ -92,6 +96,8 @@ def export_manual_pipetting(args):
clarity_epp.export.manual_pipetting.samplesheet_pool_samples(lims, args.process_id, args.output_file)
elif args.type == 'pool_magnis_pools':
clarity_epp.export.manual_pipetting.samplesheet_pool_magnis_pools(lims, args.process_id, args.output_file)
elif args.type == 'normalization_mix':
clarity_epp.export.manual_pipetting.samplesheet_normalization_mix(lims, args.process_id, args.output_file)


def export_ped_file(args):
Expand Down Expand Up @@ -151,6 +157,8 @@ def upload_tecan_results(args):
clarity_epp.upload.tecan.results_qc(lims, args.process_id)
elif args.type == 'purify_normalise':
clarity_epp.upload.tecan.results_purify_normalise(lims, args.process_id)
elif args.type == 'purify_mix':
clarity_epp.upload.tecan.results_purify_mix(lims, args.process_id)


def upload_tapestation_results(args):
Expand Down Expand Up @@ -195,12 +203,14 @@ def placement_automatic(args):
clarity_epp.placement.plate.copy_layout(lims, args.process_id)


def placement_artifact_set_name(args):
"""Change artifact name to sequence name."""
def placement_artifact_set(args):
"""Change artifact name or udf."""
if args.type == 'sequence_name':
clarity_epp.placement.artifact.set_sequence_name(lims, args.process_id)
elif args.type == 'run_id':
clarity_epp.placement.artifact.set_runid_name(lims, args.process_id)
elif args.type == 'norm_udf':
clarity_epp.placement.artifact.set_norm_manual_udf(lims, args.process_id)


def placement_route_artifact(args):
Expand All @@ -219,6 +229,11 @@ def placement_unpooling(args):
clarity_epp.placement.pool.unpooling(lims, args.process_id)


def placement_patient_pools(args):
"""Create patient pools for Dx samples."""
clarity_epp.placement.pool.create_patient_pools(lims, args.process_id)


def placement_complete_step(args):
"""Complete protocol step (Dx Mark protocol complete)."""
clarity_epp.placement.step.finish_protocol_complete(lims, args.process_id)
Expand All @@ -229,6 +244,11 @@ def placement_tecan(args):
clarity_epp.placement.tecan.place_artifacts(lims, args.process_id)


def placement_pipetting(args):
"""Check pipetted input and output nuncs."""
clarity_epp.placement.pipetting.check_nunc_input_nunc_output(lims, args.process_id)


if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers()
Expand Down Expand Up @@ -271,10 +291,16 @@ def placement_tecan(args):
)
parser_export_illumina.add_argument('process_id', help='Clarity lims process id')
parser_export_illumina.add_argument('artifact_id', help='Clarity lims samplesheet artifact id')
parser_export_illumina.add_argument(
'-c', '--conversion_tool', choices=['bcl2fastq', 'bclconvert'], default='bcl2fastq', help='Illumina conversion tool'
)
parser_export_illumina.set_defaults(func=export_illumina)

parser_export_labels = subparser_export.add_parser('labels', help='Export container labels', parents=[output_parser])
parser_export_labels.add_argument('type', choices=['container', 'container_sample', 'storage_location'], help='Label type')
parser_export_labels.add_argument(
'type',
choices=['container', 'container_sample', 'storage_location', 'nunc_mix_sample'],
help='Label type')
parser_export_labels.add_argument('process_id', help='Clarity lims process id')
parser_export_labels.add_argument('-d', '--description', nargs='?', help='Container name description')
parser_export_labels.set_defaults(func=export_labels)
Expand All @@ -293,7 +319,7 @@ def placement_tecan(args):
choices=[
'purify', 'dilute_library_pool', 'multiplex_library_pool', 'multiplex_sequence_pool', 'normalization',
'capture', 'exonuclease', 'pcr_exonuclease', 'mip_multiplex_pool', 'mip_dilute_pool', 'pool_samples',
'pool_magnis_pools'
'pool_magnis_pools', 'normalization_mix'
],
help='Samplesheet type'
)
Expand Down Expand Up @@ -338,7 +364,9 @@ def placement_tecan(args):

parser_export_tecan = subparser_export.add_parser('tecan', help='Create tecan samplesheets', parents=[output_parser])
parser_export_tecan.add_argument('process_id', help='Clarity lims process id')
parser_export_tecan.add_argument('type', choices=['qc', 'purify_normalise'], help='Samplesheet type')
parser_export_tecan.add_argument(
'type', choices=['qc', 'purify_normalise', 'filling_out_purify', 'normalise'], help='Samplesheet type'
)
parser_export_tecan.set_defaults(func=export_tecan)

parser_export_workflow = subparser_export.add_parser(
Expand Down Expand Up @@ -366,7 +394,7 @@ def placement_tecan(args):

parser_upload_tecan = subparser_upload.add_parser('tecan', help='Upload tecan results')
parser_upload_tecan.add_argument('process_id', help='Clarity lims process id')
parser_upload_tecan.add_argument('type', choices=['qc', 'purify_normalise'], help='Tecan process type')
parser_upload_tecan.add_argument('type', choices=['qc', 'purify_normalise', 'purify_mix'], help='Tecan process type')
parser_upload_tecan.set_defaults(func=upload_tecan_results)

parser_upload_magnis = subparser_upload.add_parser('magnis', help='Upload magnis results')
Expand Down Expand Up @@ -404,9 +432,9 @@ def placement_tecan(args):
parser_placement_automatic.set_defaults(func=placement_automatic)

parser_placement_artifact = subparser_placement.add_parser('artifact', help='Change artifact name to sequence name')
parser_placement_artifact.add_argument('type', choices=['sequence_name', 'run_id'], help='Check type')
parser_placement_artifact.add_argument('type', choices=['sequence_name', 'run_id', 'norm_udf'], help='Check type')
parser_placement_artifact.add_argument('process_id', help='Clarity lims process id')
parser_placement_artifact.set_defaults(func=placement_artifact_set_name)
parser_placement_artifact.set_defaults(func=placement_artifact_set)

parser_placement_route_artifact = subparser_placement.add_parser('route_artifact', help='Route artifact to a workflow')
parser_placement_route_artifact.add_argument('process_id', help='Clarity lims process id')
Expand All @@ -428,9 +456,17 @@ def placement_tecan(args):
parser_placement_unpooling.add_argument('process_id', help='Clarity lims process id')
parser_placement_unpooling.set_defaults(func=placement_unpooling)

parser_placement_patient_pools = subparser_placement.add_parser('patient_pools', help='Create patient pools for Dx samples')
parser_placement_patient_pools.add_argument('process_id', help='Clarity lims process id')
parser_placement_patient_pools.set_defaults(func=placement_patient_pools)

parser_placement_tecan = subparser_placement.add_parser('tecan', help='Placement of samples in tecan step')
parser_placement_tecan.add_argument('process_id', help='Clarity lims process id')
parser_placement_tecan.set_defaults(func=placement_tecan)

parser_placement_pipetting = subparser_placement.add_parser('pipetting', help='Check pipetting input and output')
parser_placement_pipetting.add_argument('process_id', help='Clarity lims process id')
parser_placement_pipetting.set_defaults(func=placement_pipetting)

args = parser.parse_args()
args.func(args)
84 changes: 56 additions & 28 deletions clarity_epp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,41 +4,69 @@
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
import re
import smtplib
import mimetypes

from genologics.entities import Artifact


def get_sequence_name(artifact):
"""Generate sequence name, for combined or single samples."""
sample_numbers = []
for sample in artifact.samples:
if 'Dx Monsternummer' in sample.udf: # Use monsternummer for Dx samples
sample_numbers.append(sample.udf['Dx Monsternummer'])

if sample_numbers: # Merge monsternummer for Dx samples
sequence_name = '-'.join(sorted(sample_numbers))
else: # Use sample name for non Dx samples
sequence_name = artifact.samples[0].name

def get_sequence_name(sample):
"""Generate sequence name."""
try:
# Set fam_status
if sample.udf['Dx Familie status'] == 'Kind':
fam_status = 'C'
elif sample.udf['Dx Familie status'] == 'Ouder':
fam_status = 'P'

# Set sex
if sample.udf['Dx Geslacht'] == 'Man':
sex = 'M'
elif sample.udf['Dx Geslacht'] == 'Vrouw':
sex = 'F'
elif sample.udf['Dx Geslacht'] == 'Onbekend':
sex = 'O'
except KeyError: # None DX sample, use sample.name as sequence name.
sequence_name = sample.name
else:
if not sample.name.startswith(sample.udf['Dx Familienummer']):
sequence_name = '{familienummer}{fam_status}{sex}{monsternummer}'.format(
familienummer=sample.udf['Dx Familienummer'],
fam_status=fam_status,
sex=sex,
monsternummer=sample.udf['Dx Monsternummer']
)
else:
sequence_name = sample.name
return sequence_name


def get_sample_artifacts_from_pool(lims, pool_artifact):
"""Get sample artifacts from (sequence) pool."""
sample_artifacts = []
pool_artifact_demux = lims.get(pool_artifact.uri + '/demux')
for node in pool_artifact_demux.getiterator('artifact'):
if node.find('samples'):
if len(node.find('samples').findall('sample')) in [1, 2]:
sample_artifact = Artifact(lims, uri=node.attrib['uri'])

# Check if sample_artifact with 2 samples are from the same person
if len(sample_artifact.samples) == 2:
if (
'Dx Persoons ID' in sample_artifact.samples[0].udf or
'Dx Persoons ID' in sample_artifact.samples[1].udf or
sample_artifact.samples[0].udf['Dx Persoons ID'] == sample_artifact.samples[1].udf['Dx Persoons ID']
):
sample_artifacts.append(sample_artifact)
else:
sample_artifacts.append(sample_artifact)
return sample_artifacts


def get_mix_sample_barcode(artifact):
"""Generate mix sample shortened barcode name."""
sample_names = {}
for sample in artifact.samples:
if 'Dx Monsternummer' in sample.udf:
monster = sample.udf['Dx Monsternummer']
if re.match(r'\d{4}D\d+', monster):
sample_names[sample] = monster[2:4], monster[5:]
elif monster.startswith('D'):
sample_names[sample] = monster

barcode_name = ''
if sample_names:
for sample in artifact.samples:
barcode_name += ''.join(sample_names[sample])

return barcode_name


def send_email(server, sender, receivers, subject, text, attachment=None):
"""Send emails."""
mail = MIMEMultipart()
Expand Down
Loading

0 comments on commit 00642e7

Please sign in to comment.