Skip to content

Commit

Permalink
chore: corrects spelling errors reported by codespell hook
Browse files Browse the repository at this point in the history
  • Loading branch information
ns-rse committed Nov 22, 2024
1 parent e2a9803 commit 2ae9aa6
Showing 1 changed file with 14 additions and 14 deletions.
28 changes: 14 additions & 14 deletions isoslam/all_introns_counts_and_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def fragment_iterator(read_iterator):
with open(argv_as_dictionary["outfile_tsv"], "w") as outfile:
# Add column headers
outfile.write(
"Read_UID\tTranscript_id\tStart\tEnd\tChr\tStrand\tAssignment\tConversions\tConvertable\tCoverage\n"
"Read_UID\tTranscript_id\tStart\tEnd\tChr\tStrand\tAssignment\tConversions\tConvertible\tCoverage\n"
)
results = pd.DataFrame()

Expand All @@ -143,7 +143,7 @@ def fragment_iterator(read_iterator):

if i_progress == 10000:
# E.debug(str(i_total_progress) + " pairs processed")
# E.debug(str(i) + "spliced/retained pairs proccessed")
# E.debug(str(i) + "spliced/retained pairs processed")
i_progress = 0

read1_start = read1.reference_start
Expand Down Expand Up @@ -268,7 +268,7 @@ def fragment_iterator(read_iterator):
continue
first_matched += 1

# Create a set of tupples: (tx_id,(start,end))
# Create a set of tuples: (tx_id,(start,end))
# Retained
assign_conversions_to_retained = []

Expand Down Expand Up @@ -355,7 +355,7 @@ def fragment_iterator(read_iterator):
# in the forward read.
if strand == "+":
# pass if mapped to +ve transcript
convertable = set()
convertible = set()
# create a set (list that only allows unique values to be added)
# we will add the genome_pos at each point for both reads
# len(coverage) will be the # of uniquely covered positions
Expand All @@ -376,7 +376,7 @@ def fragment_iterator(read_iterator):
read_seq = forward_read.query_sequence[read_pos]

if genome_seq.upper() == "T":
convertable.add(genome_pos)
convertible.add(genome_pos)

if read_seq == "C" and genome_seq == "t":
variants_at_position = list(
Expand All @@ -400,7 +400,7 @@ def fragment_iterator(read_iterator):
read_seq = reverse_read.query_sequence[read_pos]

if genome_seq.upper() == "A":
convertable.add(genome_pos)
convertible.add(genome_pos)

if read_seq == "G" and genome_seq == "a":
variants_at_position = list(
Expand All @@ -417,7 +417,7 @@ def fragment_iterator(read_iterator):

elif strand == "-":
# pass if mapped to -ve transcript
convertable = set()
convertible = set()
coverage = set()
converted_position = set()
for base in forward_read.get_aligned_pairs(with_seq=True):
Expand All @@ -430,7 +430,7 @@ def fragment_iterator(read_iterator):
read_seq = forward_read.query_sequence[read_pos]

if genome_seq.upper() == "A":
convertable.add(genome_pos)
convertible.add(genome_pos)

if read_seq == "G" and genome_seq == "a":
variants_at_position = list(
Expand All @@ -455,7 +455,7 @@ def fragment_iterator(read_iterator):
read_seq = reverse_read.query_sequence[read_pos]

if genome_seq.upper() == "T":
convertable.add(genome_pos)
convertible.add(genome_pos)

if read_seq == "C" and genome_seq == "t":
variants_at_position = list(
Expand All @@ -476,15 +476,15 @@ def fragment_iterator(read_iterator):
i_output += 1

# Stream output as a tsv
# Format: read_uid, transcript_id, start, end, ret/spl, conversions, convertable, coverage
# Format: read_uid, transcript_id, start, end, ret/spl, conversions, convertible, coverage
# A read pair will cover multiple lines if it matches multiple events (but metadata will be same)
# ns-rse : Add in building Pandas dataframe so the function can return something that is testable
for transcript_id, position in assign_conversions_to_retained:
start, end, chr, strand = position
outfile.write(
f"{i_output}\t{transcript_id}\t"
f"{start}\t{end}\t{chr}\t{strand}\tRet\t{len(converted_position)}\t"
f"{len(convertable)}\t{len(coverage)}\n"
f"{len(convertible)}\t{len(coverage)}\n"
)
row = pd.DataFrame(
[
Expand All @@ -497,7 +497,7 @@ def fragment_iterator(read_iterator):
"Strand": strand,
"Assignment": "Ret",
"Conversions": len(converted_position),
"Convertable": len(convertable),
"Convertible": len(convertible),
"Coverage": len(coverage),
}
]
Expand All @@ -509,7 +509,7 @@ def fragment_iterator(read_iterator):
outfile.write(
f"{i_output}\t{transcript_id}\t"
f"{start}\t{end}\t{chr}\t{strand}\tSpl\t{len(converted_position)}\t"
f"{len(convertable)}\t{len(coverage)}\n"
f"{len(convertible)}\t{len(coverage)}\n"
)
row = pd.DataFrame(
[
Expand All @@ -522,7 +522,7 @@ def fragment_iterator(read_iterator):
"Strand": strand,
"Assignment": "Spl",
"Conversions": len(converted_position),
"Convertable": len(convertable),
"Convertible": len(convertible),
"Coverage": len(coverage),
}
]
Expand Down

0 comments on commit 2ae9aa6

Please sign in to comment.