From a8bc653bc4f6f5a08d060053095e5aabe11d12fa Mon Sep 17 00:00:00 2001 From: Marc Verhagen Date: Wed, 30 Dec 2020 14:24:49 -0500 Subject: [PATCH] Updating built-in functions (issue #79). --- components/blinker/main.py | 47 ++++++++++--------- components/btime/btime-wrapper.py | 3 +- components/classifier/vectors.py | 9 ++-- components/common_modules/chunks.py | 9 ++-- components/common_modules/component.py | 3 +- components/common_modules/constituent.py | 3 +- components/common_modules/sentence.py | 9 ++-- components/common_modules/tags.py | 9 ++-- components/common_modules/tokens.py | 5 +- components/common_modules/tree.py | 23 ++++----- components/evita/features.py | 15 +++--- components/evita/settings.py | 7 +-- components/evita/wrapper.py | 7 +-- components/merging/sputlink/graph.py | 3 +- components/merging/sputlink/objects.py | 17 +++---- components/merging/sputlink/rules/closure.py | 31 ++++++------ .../merging/sputlink/rules/generateRules.py | 31 ++++++------ components/merging/sputlink/rules/objects.py | 47 ++++++++++--------- components/preprocessing/chunker.py | 15 +++--- components/preprocessing/tokenizer.py | 23 ++++----- components/simpletime/main.py | 9 ++-- components/slinket/main.py | 5 +- deprecated/get_lexes.py | 5 +- deprecated/get_tags.py | 3 +- deprecated/gui.py | 37 ++++++++------- deprecated/sputlink/rule_creation/closure.py | 31 ++++++------ .../sputlink/rule_creation/generateRules.py | 31 ++++++------ deprecated/sputlink/rule_creation/objects.py | 47 ++++++++++--------- deprecated/xml_parser.py | 9 ++-- docmodel/document.py | 35 +++++++------- docmodel/source_parser.py | 7 +-- docs/notes/python3.md | 30 ++++++++++-- library/blinker/blinker_rule_loader.py | 7 +-- library/classifier/create_model.py | 3 +- library/classifier/create_vectors.py | 7 +-- library/evita/build_event_nominals1.py | 21 +++++---- library/evita/compile_patterns.py | 3 +- library/s2t/s2t_rule_loader.py | 5 +- library/slinket/alinkPredicates.py | 3 +- library/slinket/create_dicts.py | 5 +- library/slinket/slinkPredicates.py | 3 +- library/slinket/slinketPatterns.py | 3 +- tarsqi.py | 24 +++++----- testing/regression.py | 22 +++++---- testing/run_tests.py | 37 ++++++++------- testing/scripts/regression/evita/compare.py | 5 +- testing/scripts/regression/slinket/compare.py | 13 ++--- utilities/FSA-org.py | 11 +++-- utilities/FSA.py | 8 ++-- utilities/find.py | 3 +- utilities/lif.py | 5 +- utilities/logger.py | 9 ++-- utilities/make_documentation.py | 5 +- utilities/mallet.py | 13 ++--- utilities/wordnet.py | 7 +-- 55 files changed, 431 insertions(+), 356 deletions(-) diff --git a/components/blinker/main.py b/components/blinker/main.py index 88de36a..c138ca6 100644 --- a/components/blinker/main.py +++ b/components/blinker/main.py @@ -5,6 +5,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import re from utilities import logger @@ -138,7 +139,7 @@ def _run_blinker(self): sentence = self.doctree[si] r3_main_event = None if _DEBUG: - print "processing sentence", si + print("processing sentence", si) # iterate over elements within a sentence for i in range(len(sentence)): @@ -168,7 +169,7 @@ def _run_blinker(self): and element.isVerbChunk() \ and event.attrs['class'] == 'REPORTING': if _DEBUG: - print "applying type 5 rules" + print("applying type 5 rules") self._apply_type5_rules(sentence, event, i) # R3: if no main event in sentence @@ -195,7 +196,7 @@ def _apply_type3_rules(self, event1, event2): event2.attrs[EIID], "%s-Rule-%s" % (BLINKER, rule.id)) if _DEBUG: - print "RULE %s fired!" % rule.rule_number + print("RULE %s fired!" % rule.rule_number) return def _apply_type5_rules(self, sentence, event1, position): @@ -219,11 +220,11 @@ def _apply_type5_rules(self, sentence, event1, position): # forward if _DEBUG: - print "inside rule application function" + print("inside rule application function") sentence.pretty_print() for i in range(position+1, len(sentence)): if _DEBUG: - print "processing element", i + print("processing element", i) element = sentence[i] # quote @@ -257,7 +258,7 @@ def _apply_type5_rules(self, sentence, event1, position): event2.attrs[EIID], "%s-Rule-%s" % (BLINKER, rule.id)) if _DEBUG: - print "RULE %s fired!" % rule.rule_number + print("RULE %s fired!" % rule.rule_number) # apply the first matching rule return @@ -268,7 +269,7 @@ def _apply_type5_rules(self, sentence, event1, position): direct = 'INDIRECT' for i in range(position-1, -1, -1): # ..,3,2,1,0 if _DEBUG: - print "processing element", i + print("processing element", i) element = sentence[i] # quote @@ -286,20 +287,20 @@ def _apply_type5_rules(self, sentence, event1, position): if direct in rule.attrs['sentType']] if _DEBUG: _pp_events(event1, event2) - print "Applying rules for sentence type:", \ - direct, len(current_rules), "rules" + print("Applying rules for sentence type:", \ + direct, len(current_rules), "rules") for rule in current_rules: # if attribute not set in the rule, accept any value for att in ['class', 'tense', 'aspect']: if 'arg2.'+att not in rule.attrs: rule.attrs['arg2.'+att] = [event2.attrs[att]] if _DEBUG: - print "RULE %s (%s):" % (rule.rule_number, - rule.attrs['sentType'][0]) - print rule.attrs['arg1.class'], rule.attrs['arg1.tense'], \ - rule.attrs['arg1.aspect'] - print rule.attrs['arg2.class'], rule.attrs['arg2.tense'], \ - rule.attrs['arg2.aspect'] + print("RULE %s (%s):" % (rule.rule_number, + rule.attrs['sentType'][0])) + print(rule.attrs['arg1.class'], rule.attrs['arg1.tense'], \ + rule.attrs['arg1.aspect']) + print(rule.attrs['arg2.class'], rule.attrs['arg2.tense'], \ + rule.attrs['arg2.aspect']) # check that specified values match if event2.attrs['class'] in rule.attrs['arg2.class'] and \ event2.attrs['tense'] in rule.attrs['arg2.tense'] and \ @@ -311,7 +312,7 @@ def _apply_type5_rules(self, sentence, event1, position): event2.attrs['eiid'], origin) if _DEBUG: - print "RULE %s fired!" % rule.rule_number + print("RULE %s fired!" % rule.rule_number) # apply the first matching rule return @@ -346,8 +347,8 @@ def _apply_event_anchoring_rules(self, sentence, timex, i): eiid = event.attrs[EIID] tid = timex.attrs[TID] if _DEBUG: - print "FOUND: [%s] %s [%s] --> %s" % \ - (event.dtrs[0].getText(), signal, timex.getText(), rel) + print("FOUND: [%s] %s [%s] --> %s" % \ + (event.dtrs[0].getText(), signal, timex.getText(), rel)) self._add_tlink(rel, eiid, tid, "%s-Type-2-%s" % (BLINKER, signal)) return @@ -439,8 +440,8 @@ def _fix_timex_val(date): def _pp_events(event1, event2): - print(event1.dtrs[0].getText(), event2.dtrs[0].getText()) - print(' e1', event1.dtrs[0].getText(), event1.attrs['class'], - event1.attrs['tense'], event1.attrs['aspect']) - print(' e2', event2.dtrs[0].getText(), event2.attrs['class'], - event2.attrs['tense'], event2.attrs['aspect']) + print((event1.dtrs[0].getText(), event2.dtrs[0].getText())) + print((' e1', event1.dtrs[0].getText(), event1.attrs['class'], + event1.attrs['tense'], event1.attrs['aspect'])) + print((' e2', event2.dtrs[0].getText(), event2.attrs['class'], + event2.attrs['tense'], event2.attrs['aspect'])) diff --git a/components/btime/btime-wrapper.py b/components/btime/btime-wrapper.py index 181b0e2..513e0f0 100644 --- a/components/btime/btime-wrapper.py +++ b/components/btime/btime-wrapper.py @@ -25,6 +25,7 @@ class will at least have methods text(), tag(), begin(), end() and __unicode__() from __future__ import absolute_import +from __future__ import print_function import sys, codecs @@ -50,7 +51,7 @@ def parse_file(filename): def parse_sentence(sentence): """This is where BTime gets folded in.""" - print "processing \"%s\"" % ' '.join([t.text for t in sentence]) + print("processing \"%s\"" % ' '.join([t.text for t in sentence])) diff --git a/components/classifier/vectors.py b/components/classifier/vectors.py index 00b820d..9c5765e 100644 --- a/components/classifier/vectors.py +++ b/components/classifier/vectors.py @@ -22,6 +22,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import os, codecs, textwrap from components.common_modules.tree import create_tarsqi_tree @@ -411,18 +412,18 @@ def _set_feature_SIGNAL(self): def _debug(text): if DEBUG: - print text + print(text) def _debug_vector(vector_type, vector, indent=''): if DEBUG: text = "%s %s%s" % (vector_type, vector, indent) for line in textwrap.wrap(text, 100): - print "%s%s" % (indent, line) + print("%s%s" % (indent, line)) def _debug_leafnodes(element, s): if DEBUG: - print("\n%s %s\n" % (element.begin, s)) + print(("\n%s %s\n" % (element.begin, s))) for n in s.leaf_nodes(): - print ' ', n + print(' ', n) diff --git a/components/common_modules/chunks.py b/components/common_modules/chunks.py index 7102182..57ffaee 100644 --- a/components/common_modules/chunks.py +++ b/components/common_modules/chunks.py @@ -8,6 +8,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import types from xml.sax.saxutils import quoteattr @@ -198,9 +199,9 @@ def isChunk(self): return True def pretty_print(self, indent=0): - print "%s<%s position=%s %d-%d checkedEvents=%s event=%s eid=%s>" % \ + print("%s<%s position=%s %d-%d checkedEvents=%s event=%s eid=%s>" % \ (indent * ' ', self.__class__.__name__, self.position, - self.begin, self.end, self.checkedEvents, self.event, self.eid) + self.begin, self.end, self.checkedEvents, self.event, self.eid)) for tok in self.dtrs: tok.pretty_print(indent + 2) @@ -598,7 +599,7 @@ def _identify_substring(self, sentence_slice, fsa_list): lenSubstring = fsa.acceptsSubstringOf(sentence_slice) if lenSubstring: if DEBUG: - print "Succesful application of %s" % fsa.fsaname + print("Succesful application of %s" % fsa.fsaname) return (lenSubstring, fsaCounter) else: return (0, fsaCounter) @@ -635,7 +636,7 @@ def _debug_vcf(vcf_list): if len(vcf_list) > 0 and DEBUG: for vcf in vcf_list: if DEBUG: - print ' ', + print(' ', end=' ') vcf.pp() for vcf in vcf_list: logger.debug(vcf.as_verbose_string()) diff --git a/components/common_modules/component.py b/components/common_modules/component.py index 916acf7..c6c64e6 100644 --- a/components/common_modules/component.py +++ b/components/common_modules/component.py @@ -6,6 +6,7 @@ from __future__ import absolute_import +from __future__ import print_function from utilities import logger @@ -22,7 +23,7 @@ def process(self, infile, outfile): def pp_doctree(self, componentName): """Print the document tree. Assumes there is a doctree instance variable that contains a TarsqiTree object.""" - print "\n--------- DOCUMENT TREE for %s ----------" % componentName + print("\n--------- DOCUMENT TREE for %s ----------" % componentName) self.doctree.pretty_print() diff --git a/components/common_modules/constituent.py b/components/common_modules/constituent.py index d25da37..ed50fe0 100644 --- a/components/common_modules/constituent.py +++ b/components/common_modules/constituent.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from __future__ import print_function from types import ListType, TupleType from pprint import pprint @@ -274,7 +275,7 @@ def print_vars(self): pprint(vars(self)) def pretty_print(self): - print "<>" + print("<>") # SLINKET METHODS # There is some serious redundancy here, refactor these methods. diff --git a/components/common_modules/sentence.py b/components/common_modules/sentence.py index b4b3630..4f6274e 100644 --- a/components/common_modules/sentence.py +++ b/components/common_modules/sentence.py @@ -1,6 +1,7 @@ """Contains functionality specific to sentences in a tree.""" from __future__ import absolute_import +from __future__ import print_function from utilities import logger from components.common_modules.constituent import Constituent @@ -51,11 +52,11 @@ def set_event_list(self): def pretty_print(self, tree=True, verbose=False, indent=0): """Pretty print the sentence by pretty printing all daughters""" if verbose: - print "SENTENCE %s\n" % self.position - print " parent = %s" % self.parent - print " eventList = %s\n" % self.eventList + print("SENTENCE %s\n" % self.position) + print(" parent = %s" % self.parent) + print(" eventList = %s\n" % self.eventList) else: - print "%s" % (indent*' ', self.position, self.begin, self.end) + print("%s" % (indent*' ', self.position, self.begin, self.end)) if tree or verbose: for dtr in self.dtrs: dtr.pretty_print(indent=indent+2) diff --git a/components/common_modules/tags.py b/components/common_modules/tags.py index 57c00f7..094b338 100644 --- a/components/common_modules/tags.py +++ b/components/common_modules/tags.py @@ -6,6 +6,7 @@ from __future__ import absolute_import +from __future__ import print_function from library.main import LIBRARY from components.common_modules.constituent import Constituent from utilities import logger @@ -100,9 +101,9 @@ def isEvent(self): def pretty_print(self, indent=0): (eid, eiid, cl) = (self.attrs.get('eid'), self.attrs.get('eiid'), self.attrs.get('class')) - print "%s<%s position=%s %d-%d eid=%s eiid=%s class=%s>" % \ + print("%s<%s position=%s %d-%d eid=%s eiid=%s class=%s>" % \ (indent * ' ', self.__class__.__name__, self.position, - self.begin, self.end, eid, eiid, cl) + self.begin, self.end, eid, eiid, cl)) for dtr in self.dtrs: dtr.pretty_print(indent+2) @@ -133,9 +134,9 @@ def isTimex(self): return True def pretty_print(self, indent=0): - print "%s<%s tid=%s type=%s value=%s>" % \ + print("%s<%s tid=%s type=%s value=%s>" % \ (indent * ' ', self.__class__.__name__, self.attrs.get('tid'), - self.attrs.get('type'), self.attrs.get('value')) + self.attrs.get('type'), self.attrs.get('value'))) for dtr in self.dtrs: dtr.pretty_print(indent+2) diff --git a/components/common_modules/tokens.py b/components/common_modules/tokens.py index c403036..56abf1a 100644 --- a/components/common_modules/tokens.py +++ b/components/common_modules/tokens.py @@ -1,5 +1,6 @@ from __future__ import absolute_import +from __future__ import print_function from library import forms from library.main import LIBRARY from components.evita.event import Event @@ -120,9 +121,9 @@ def pretty_print(self, indent=0): eid = self.event_tag.attrs.get('eid') eiid = self.event_tag.attrs.get('eiid') event_string = " eid=%s eiid=%s" % (eid, eiid) - print "%s<%s position=%d %d-%d pos=%s text=%s%s>" % \ + print("%s<%s position=%d %d-%d pos=%s text=%s%s>" % \ (indent * ' ', self.__class__.__name__, self.position, - self.begin, self.end, self.pos, self.getText(), event_string) + self.begin, self.end, self.pos, self.getText(), event_string)) class AdjectiveToken(Token): diff --git a/components/common_modules/tree.py b/components/common_modules/tree.py index 4df8d54..dec0bbf 100644 --- a/components/common_modules/tree.py +++ b/components/common_modules/tree.py @@ -1,6 +1,7 @@ """Contains the TarsqiTree class.""" from __future__ import absolute_import +from __future__ import print_function import sys import re from xml.sax.saxutils import escape, quoteattr @@ -295,7 +296,7 @@ def as_tree_element(self): return tree_element def pp(self, indent=0): - print "%s%s" % (indent * ' ', self) + print("%s%s" % (indent * ' ', self)) for dtr in self.dtrs: dtr.pp(indent + 1) @@ -437,8 +438,8 @@ def pp(self): def pretty_print(self): """Pretty printer that prints all instance variables and a neat representation of the sentences.""" - print "\n\n" % self.tarsqidoc.sourcedoc.filename - print "len(dtrs) = %s" % (len(self.dtrs)) + print("\n\n" % self.tarsqidoc.sourcedoc.filename) + print("len(dtrs) = %s" % (len(self.dtrs))) self.pretty_print_tagged_events_dict() self.pretty_print_sentences() self.pretty_print_links(self.alinks) @@ -446,23 +447,23 @@ def pretty_print(self): self.pretty_print_links(self.tlinks) def pretty_print_tagged_events_dict(self): - print 'events = {', + print('events = {', end=' ') eids = sorted(self.events.keys()) for eid in eids: - print "\n ", eid, '=> {', + print("\n ", eid, '=> {', end=' ') attrs = list(self.events[eid].keys()) attrs.sort() for attr in attrs: - print "%s=%s" % (attr, str(self.events[eid][attr])), - print '}' - print '}' + print("%s=%s" % (attr, str(self.events[eid][attr])), end=' ') + print('}') + print('}') def pretty_print_sentences(self): for sentence in self: - print + print() sentence.pretty_print(verbose=False) - print + print() def pretty_print_links(self, links): for link in links: - print ' ', link + print(' ', link) diff --git a/components/evita/features.py b/components/evita/features.py index 2605cc3..39e1822 100644 --- a/components/evita/features.py +++ b/components/evita/features.py @@ -10,6 +10,7 @@ from __future__ import absolute_import +from __future__ import print_function from types import ListType, InstanceType from pprint import pprint @@ -56,8 +57,8 @@ def getWordPosList(constituents): def debug(text, newline=True): if DEBUG: - if newline: print text - else: print text, + if newline: print(text) + else: print(text, end=' ') class ChunkFeatures(object): @@ -369,9 +370,9 @@ def as_verbose_string(self): def pp(self, verbose=False): if verbose: - print self.as_verbose_string() + print(self.as_verbose_string()) else: - print self + print(self) class VChunkFeaturesList(object): @@ -426,7 +427,7 @@ def __init__(self, verbchunk=None, tokens=None): self._initialize_lists() self._distributeNodes() self._generate_features_list() - if DEBUG: print "\n", self + if DEBUG: print("\n", self) def _initialize_nodes(self): """Given the VerbChunk or a list of Tokens, set the nodes variable to @@ -648,5 +649,5 @@ def print_ChunkLists(self): inf = sep.join(["%s" % x.text for x in self.infMarkLists[i]]) adv1 = sep.join(["%s" % x.text for x in self.adverbsPreLists[i]]) adv2 = sep.join(["%s" % x.text for x in self.adverbsPostLists[i]]) - print " tc=[%s] inf=[%s] neg=[%s] adv1=[%s] adv2=[%s]" % \ - (tc, inf, neg, adv1, adv2) + print(" tc=[%s] inf=[%s] neg=[%s] adv1=[%s] adv2=[%s]" % \ + (tc, inf, neg, adv1, adv2)) diff --git a/components/evita/settings.py b/components/evita/settings.py index 0b65cf5..6cae63a 100644 --- a/components/evita/settings.py +++ b/components/evita/settings.py @@ -4,6 +4,7 @@ """ +from __future__ import print_function DEBUG = False @@ -26,7 +27,7 @@ if DEBUG: - print "EVITA_NOM_DISAMB = %s" % EVITA_NOM_DISAMB - print "EVITA_NOM_CONTEXT = %s" % EVITA_NOM_CONTEXT - print "EVITA_NOM_WNPRIMSENSE_ONLY = %s" % EVITA_NOM_WNPRIMSENSE_ONLY + print("EVITA_NOM_DISAMB = %s" % EVITA_NOM_DISAMB) + print("EVITA_NOM_CONTEXT = %s" % EVITA_NOM_CONTEXT) + print("EVITA_NOM_WNPRIMSENSE_ONLY = %s" % EVITA_NOM_WNPRIMSENSE_ONLY) diff --git a/components/evita/wrapper.py b/components/evita/wrapper.py index ca2e595..88a479c 100644 --- a/components/evita/wrapper.py +++ b/components/evita/wrapper.py @@ -5,6 +5,7 @@ """ from __future__ import absolute_import +from __future__ import print_function from library.tarsqi_constants import EVITA from library.main import LIBRARY from components.evita.main import Evita @@ -66,7 +67,7 @@ def _evaluate_results(self, imported_events): the system.""" events_key = self.document.sourcedoc.tags.find_tags(EVENT) if not events_key: - print "Nothing to evaluate" + print("Nothing to evaluate") else: events_system = {} for e in self.document.tags.find_tags(EVENT): @@ -79,8 +80,8 @@ def _evaluate_results(self, imported_events): imported += 1 break percentage = imported * 100 // len(events_key) - print "\n\nEVENTS TO BE IMPORTED: %3s" % len(events_key) - print "FOUND BY SYSTEM: %3s (%s%%)\n" % (imported, percentage) + print("\n\nEVENTS TO BE IMPORTED: %3s" % len(events_key)) + print("FOUND BY SYSTEM: %3s (%s%%)\n" % (imported, percentage)) def _pp_imported_events(imported_events): diff --git a/components/merging/sputlink/graph.py b/components/merging/sputlink/graph.py index ec33fd1..6a747ce 100644 --- a/components/merging/sputlink/graph.py +++ b/components/merging/sputlink/graph.py @@ -11,6 +11,7 @@ from __future__ import absolute_import +from __future__ import print_function from .objects import Node, Edge, Constraint from .utils import intersect_relations from .utils import compare_id @@ -403,4 +404,4 @@ def _html_added_table(self, fh): def debug(indent=0, str=''): if DEBUG: - print ' ' * indent, str + print(' ' * indent, str) diff --git a/components/merging/sputlink/objects.py b/components/merging/sputlink/objects.py index 4191d08..e8d7c39 100644 --- a/components/merging/sputlink/objects.py +++ b/components/merging/sputlink/objects.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from __future__ import print_function from .utils import intersect_lists from .utils import intersect_relations from .utils import compare_id @@ -62,13 +63,13 @@ def __str__(self): def pretty_print(self): """Print the node with its edges_in and edges_out attributes to standard output.""" - print "\n", self + print("\n", self) e_in = list(self.edges_in.keys()) e_out = list(self.edges_out.keys()) e_in.sort(compare_id) e_out.sort(compare_id) - print " i [%s]" % (' '.join(e_in)) - print " o [%s]" % (' '.join(e_out)) + print(" i [%s]" % (' '.join(e_in))) + print(" o [%s]" % (' '.join(e_out))) class Edge(object): @@ -191,15 +192,15 @@ def has_normalized_relation(self): def pp_history(self, indent=''): if isinstance(self.history, tuple): - print("%s%s" % (indent, str(self.history[0]))) - print("%s%s" % (indent, str(self.history[1]))) + print(("%s%s" % (indent, str(self.history[0])))) + print(("%s%s" % (indent, str(self.history[1])))) elif self.history.__class__.__name__ == 'Tag': tlink = "TLINK(relType=%s)" % self.history.attrs.get('relType') - print("%s%s" % (indent, tlink)) + print(("%s%s" % (indent, tlink))) elif self.history.__class__.__name__ == 'Constraint': - print("%s%s" % (indent, self.history)) + print(("%s%s" % (indent, self.history))) else: - print("%sno history" % indent) + print(("%sno history" % indent)) def history_string(self): if isinstance(self.history, tuple): diff --git a/components/merging/sputlink/rules/closure.py b/components/merging/sputlink/rules/closure.py index d0a50bc..e61fb67 100644 --- a/components/merging/sputlink/rules/closure.py +++ b/components/merging/sputlink/rules/closure.py @@ -1,5 +1,6 @@ from __future__ import absolute_import +from __future__ import print_function from objects import TemporalObject, Link, PLink @@ -37,7 +38,7 @@ def __init__(self,environment,closureType): self.axioms = environment.POINT_AXIOMS self.linkType = PLink else: - print "ERROR: unknown closure type" + print("ERROR: unknown closure type") return def computeClosure(self): @@ -53,12 +54,12 @@ def computeClosure(self): def closeNode(self,node): if self.debug: - print "Closing node %s" % (node.string) - print node.inLinks - print node.outLinks + print("Closing node %s" % (node.string)) + print(node.inLinks) + print(node.outLinks) for inLink in node.inLinks: for outLink in node.outLinks: - if self.debug: print inLink.asPrettyString(),outLink.asPrettyString() + if self.debug: print(inLink.asPrettyString(),outLink.asPrettyString()) axiom = self.findAxiom(inLink,outLink) if axiom: self.printMessage1(node, axiom, inLink, outLink) @@ -121,15 +122,15 @@ def debugOff(self): self.debug = 0 def printMessage1(self,node,axiom,inlink,outlink): if self.debug: - print "Closing:.." - print " ", node - print " ", axiom - print " ", inlink - print " ", outlink + print("Closing:..") + print(" ", node) + print(" ", axiom) + print(" ", inlink) + print(" ", outlink) def printMessage2(self,existingLink,relation,inlink,outlink): - print "\nWARNING: link already exists" - print " %s" % (existingLink) - print " %s" % (relation.upper()) - print " %s" % (inlink) - print " %s" % (outlink) + print("\nWARNING: link already exists") + print(" %s" % (existingLink)) + print(" %s" % (relation.upper())) + print(" %s" % (inlink)) + print(" %s" % (outlink)) diff --git a/components/merging/sputlink/rules/generateRules.py b/components/merging/sputlink/rules/generateRules.py index e9671f4..033b6ab 100644 --- a/components/merging/sputlink/rules/generateRules.py +++ b/components/merging/sputlink/rules/generateRules.py @@ -7,6 +7,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import sys from objects import \ @@ -188,8 +189,8 @@ def __str__(self): str(self.POINTS) + "\n" + str(self.PLINKS) def printPointEnv(self): - print "\nENVIRONMENT(%s)\n" % (self.id) , \ - str(self.POINTS) , "\n" , str(self.PLINKS), "\n" + print("\nENVIRONMENT(%s)\n" % (self.id) , \ + str(self.POINTS) , "\n" , str(self.PLINKS), "\n") def printPLinks(self,fh=sys.stdout): fh.write(" %s\n" % self.asPLinkString()) @@ -314,12 +315,12 @@ def printConsistentEnvironments(self,environments,closedEnvironments): ENV2 = closedEnvironments[i] if ENV2.isConsistent: consistentCount = consistentCount + 1 - print "\nENVIRONMENT(%s)\n" % (ENV1.id) + print("\nENVIRONMENT(%s)\n" % (ENV1.id)) ENV1.printPLinks() - print "\nENVIRONMENT(%s)\n" % (ENV2.id) + print("\nENVIRONMENT(%s)\n" % (ENV2.id)) ENV2.printPLinks() - print - print "\n\nTotal number of consistent environments: %s\n\n" % (consistentCount) + print() + print("\n\nTotal number of consistent environments: %s\n\n" % (consistentCount)) def filterConsistentEnvironments(self,environments): consistentEnvs = [] @@ -352,10 +353,10 @@ def filterPLinks(links,nodeName): def translateEnvironments(envs): for env in envs: - print; env.printPLinks() - print "\n ==> ", - print env.translateEnvironment('x','y') - print "\n" + print(); env.printPLinks() + print("\n ==> ", end=' ') + print(env.translateEnvironment('x','y')) + print("\n") def test1(): @@ -373,7 +374,7 @@ def test1(): link3 = Link(ENV,node1,'after',node3) clos = Closure(ENV,"nodes") clos.computeClosure() - print ENV + print(ENV) def test2(): @@ -390,12 +391,12 @@ def test2(): count = count + 1 env3 = envFact.mergeEnvironments(env1,env2,'y') env4 = env3.close("points") - print "\nAXIOM_COMPILATION %s:\n" % (count) + print("\nAXIOM_COMPILATION %s:\n" % (count)) env1.printPLinks() env2.printPLinks() - print "\n ==>\n\n [", - for plink in env4.getNewPLinks('y'): print plink.asPrettyString(), - print "]\n" + print("\n ==>\n\n [", end=' ') + for plink in env4.getNewPLinks('y'): print(plink.asPrettyString(), end=' ') + print("]\n") def test3(): diff --git a/components/merging/sputlink/rules/objects.py b/components/merging/sputlink/rules/objects.py index f71d1ca..f2a86c5 100644 --- a/components/merging/sputlink/rules/objects.py +++ b/components/merging/sputlink/rules/objects.py @@ -1,5 +1,6 @@ +from __future__ import print_function class ObjectList(object): """Class that provides interface for global PointLink and Link data bases. Just a wrapper around a list. Assumes that all elements are @@ -94,13 +95,13 @@ def __str__(self): return "EventNode(%s,%s,%s)" % (self.id, self.string,self.eventClass) def printVerbosely(self): - print "\nEVENT(%s %s %s)" % (self.id, self.string, self.eventClass) - print " inLinks:" + print("\nEVENT(%s %s %s)" % (self.id, self.string, self.eventClass)) + print(" inLinks:") for link in self.inLinks: - print " %s --(%s,%s)--> SELF" % (link.begin, link.id, link.relation) - print " outLinks:" + print(" %s --(%s,%s)--> SELF" % (link.begin, link.id, link.relation)) + print(" outLinks:") for link in self.outLinks: - print " SELF --(%s,%s)--> %s" % (link.id, link.relation, link.end) + print(" SELF --(%s,%s)--> %s" % (link.id, link.relation, link.end)) class TimexNode(Node): @@ -140,12 +141,12 @@ def copy(self,newEnvironment): return Point(newEnvironment,self.interval,self.boundary) def printVerbosely(self): - print "\nPoint(%s)" % (self.id) - print " %s - %s" % (self.interval,self.boundary) - print " inLinks:" - for link in self.inLinks: print " ", link - print " outLinks:" - for link in self.outLinks: print " ", link + print("\nPoint(%s)" % (self.id)) + print(" %s - %s" % (self.interval,self.boundary)) + print(" inLinks:") + for link in self.inLinks: print(" ", link) + print(" outLinks:") + for link in self.outLinks: print(" ", link) class AbstractLink(TemporalObject): @@ -200,21 +201,21 @@ def __str__(self): def printVerbosely(self): def printPLinks(point): - print " inLinks:" - for link in point.inLinks: print " ", link - print " outLinks:" - for link in point.outLinks: print " ", link - print "\nTimeML_Link(%s)" % (self.id) - print " %s" % (self.relation.upper()) - print " %s" % (self.begin) - print " %s" % (self.begin.begin) + print(" inLinks:") + for link in point.inLinks: print(" ", link) + print(" outLinks:") + for link in point.outLinks: print(" ", link) + print("\nTimeML_Link(%s)" % (self.id)) + print(" %s" % (self.relation.upper())) + print(" %s" % (self.begin)) + print(" %s" % (self.begin.begin)) printPLinks(self.begin.begin) - print " %s" % (self.begin.end) + print(" %s" % (self.begin.end)) printPLinks(self.begin.end) - print " %s" % (self.end) - print " %s" % (self.end.begin) + print(" %s" % (self.end)) + print(" %s" % (self.end.begin)) printPLinks(self.end.begin) - print " %s" % (self.end.end) + print(" %s" % (self.end.end)) printPLinks(self.end.end) diff --git a/components/preprocessing/chunker.py b/components/preprocessing/chunker.py index 9496749..c6f2529 100644 --- a/components/preprocessing/chunker.py +++ b/components/preprocessing/chunker.py @@ -25,6 +25,7 @@ from __future__ import absolute_import +from __future__ import print_function from types import StringType from utilities import logger @@ -312,14 +313,14 @@ def not_be(token): def pp_tokens(self): for e in self.sentence: if type(e) == type((None,)): - print e[0], - print + print(e[0], end=' ') + print() def pp(self): in_chunk = False for t in self.sentence: ss = ' '+str(t) if in_chunk else str(t) - print ss + print(ss) if type(t) == StringType: in_chunk = not in_chunk @@ -416,11 +417,11 @@ def _add_chunks_for_timexes(self, element): @staticmethod def _debug(orphan, sentence, nodes): if DEBUG: - print orphan - print ' ', sentence + print(orphan) + print(' ', sentence) for n in nodes: - print ' ', n - print ' ', n.tag + print(' ', n) + print(' ', n.tag) if __name__ == '__main__': diff --git a/components/preprocessing/tokenizer.py b/components/preprocessing/tokenizer.py index 80e411c..5d94a12 100644 --- a/components/preprocessing/tokenizer.py +++ b/components/preprocessing/tokenizer.py @@ -30,6 +30,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import re from StringIO import StringIO from xml.sax.saxutils import escape @@ -413,10 +414,10 @@ def print_as_string(self): s.print_as_string() def print_as_xmlstring(self): - print "" + print("") for s in self.sentences: s.print_as_xmlstring() - print "" + print("") class TokenizedSentence(object): @@ -439,13 +440,13 @@ def as_pairs(self): return [(t.text, t) for t in self.tokens] def print_as_string(self): - print ' '.join([t.text for t in self.tokens]) + print(' '.join([t.text for t in self.tokens])) def print_as_xmlstring(self): - print '' + print('') for t in self.tokens: t.print_as_xmlstring(indent=' ') - print '' + print('') class TokenizedLex(object): @@ -468,11 +469,11 @@ def as_pairs(self): return [(self.text, self)] def print_as_string(self, indent=''): - print self.text + print(self.text) def print_as_xmlstring(self, indent=''): - print "%s%s" % \ - (indent, self.begin, self.end, escape(self.text)) + print("%s%s" % \ + (indent, self.begin, self.end, escape(self.text))) if __name__ == '__main__': @@ -487,6 +488,6 @@ def print_as_xmlstring(self, indent=''): text = tk.tokenize_text() # print tk.sentences # print tk.lexes - print tk.get_tokenized_as_xml() - print tk.get_tokenized_as_string() - print "\nDONE, processing time was %.3f seconds\n" % (time() - t1) + print(tk.get_tokenized_as_xml()) + print(tk.get_tokenized_as_string()) + print("\nDONE, processing time was %.3f seconds\n" % (time() - t1)) diff --git a/components/simpletime/main.py b/components/simpletime/main.py index 2615a70..4117937 100644 --- a/components/simpletime/main.py +++ b/components/simpletime/main.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from __future__ import print_function import os, sys from io import open @@ -90,7 +91,7 @@ def match(self, word): pass def pp(self): - print "%s = %s" % (self.name, ' '.join(list(self.words))) + print("%s = %s" % (self.name, ' '.join(list(self.words)))) class RegularExpression(TimexElement): @@ -109,7 +110,7 @@ def is_regular_expression(self): return True def pp(self): - print "%s = %s" % (self.name, self.regexp) + print("%s = %s" % (self.name, self.regexp)) class CombinationRule(object): @@ -119,7 +120,7 @@ def __init__(self, lhs, rhs): self.rhs = rhs def pp(self): - print "%s --> %s" % (self.lhs, ' '.join(self.rhs)) + print("%s --> %s" % (self.lhs, ' '.join(self.rhs))) def get_class(class_name): @@ -144,7 +145,7 @@ def show_rules(self): rule.pp() def extract_times(self, text): - print 'On %s" % (option, val) + print(" %s -> %s" % (option, val)) - print 'PROCESSING...' + print('PROCESSING...') tc = Tarsqi(doc_type, options, self.file_path, self.file_out) tc.process() #self.text_Info.SetValue(file_contents(self.file_out)) self.text_Info.SetValue(xml_tree(self.file_out)) - print 'CREATED:', self.file_out + print('CREATED:', self.file_out) # Create display file using various intermediate files created # by the Tarsqi instance. @@ -386,7 +387,7 @@ def OnViewResult(self, e): frame = ResultFrame(self) frame.Show() else: - print 'WARNING: file not yet parsed' + print('WARNING: file not yet parsed') class ResultFrame(TarsqiFrame): @@ -450,30 +451,30 @@ def CreateMenuBar(self): ("TBox", self.OnTBox)])]) def OnViewSlinket(self, e): - print 'Opening', self.file_display_sli + print('Opening', self.file_display_sli) self.DisplayFile(self.file_display_sli) def OnViewBlinker(self, e): - print 'Opening', self.file_display_bli + print('Opening', self.file_display_bli) self.DisplayFile(self.file_display_bli) def OnViewClassifier(self, e): - print 'Opening', self.file_display_cla + print('Opening', self.file_display_cla) self.DisplayFile(self.file_display_cla) def OnViewAll(self, e): - print 'Opening', self.file_display_all + print('Opening', self.file_display_all) self.DisplayFile(self.file_display_all) def OnViewMerged(self, e): - print 'Opening', self.file_display_mer + print('Opening', self.file_display_mer) self.DisplayFile(self.file_display_mer) def OnTango(self, e): os.chdir(self.tangodir) file = self.displaydir + os.sep + self.file_basename + '.ALL.xml' command = 'java -jar tango_v15.jar file=' + file - print 'OPENING TANGO:', command + print('OPENING TANGO:', command) popen2.popen2(command) def OnTBox(self, e): @@ -655,11 +656,11 @@ def SetAllValues(self, boolean): def run_shell_command(command): - print 'RUNNING SHELL SCRIPT:', command + print('RUNNING SHELL SCRIPT:', command) r, e = popen2.popen2(command) for line in r.readlines(): - print 'RUNNING COMMAND:', line, - print e + print('RUNNING COMMAND:', line, end=' ') + print(e) def file_contents(filename): """Return the contents of filename.""" diff --git a/deprecated/sputlink/rule_creation/closure.py b/deprecated/sputlink/rule_creation/closure.py index 187883c..878ff3c 100755 --- a/deprecated/sputlink/rule_creation/closure.py +++ b/deprecated/sputlink/rule_creation/closure.py @@ -1,5 +1,6 @@ from __future__ import absolute_import +from __future__ import print_function from objects import TemporalObject, Link, PLink import string @@ -38,7 +39,7 @@ def __init__(self,environment,closureType): self.axioms = environment.POINT_AXIOMS self.linkType = PLink else: - print "ERROR: unknown closure type" + print("ERROR: unknown closure type") return def computeClosure(self): @@ -54,12 +55,12 @@ def computeClosure(self): def closeNode(self,node): if self.debug: - print "Closing node %s" % (node.string) - print node.inLinks - print node.outLinks + print("Closing node %s" % (node.string)) + print(node.inLinks) + print(node.outLinks) for inLink in node.inLinks: for outLink in node.outLinks: - if self.debug: print inLink.asPrettyString(),outLink.asPrettyString() + if self.debug: print(inLink.asPrettyString(),outLink.asPrettyString()) axiom = self.findAxiom(inLink,outLink) if axiom: self.printMessage1(node, axiom, inLink, outLink) @@ -122,15 +123,15 @@ def debugOff(self): self.debug = 0 def printMessage1(self,node,axiom,inlink,outlink): if self.debug: - print "Closing:.." - print " ", node - print " ", axiom - print " ", inlink - print " ", outlink + print("Closing:..") + print(" ", node) + print(" ", axiom) + print(" ", inlink) + print(" ", outlink) def printMessage2(self,existingLink,relation,inlink,outlink): - print "\nWARNING: link already exists" - print " %s" % (existingLink) - print " %s" % (string.upper(relation)) - print " %s" % (inlink) - print " %s" % (outlink) + print("\nWARNING: link already exists") + print(" %s" % (existingLink)) + print(" %s" % (string.upper(relation))) + print(" %s" % (inlink)) + print(" %s" % (outlink)) diff --git a/deprecated/sputlink/rule_creation/generateRules.py b/deprecated/sputlink/rule_creation/generateRules.py index 905b788..c4471fc 100755 --- a/deprecated/sputlink/rule_creation/generateRules.py +++ b/deprecated/sputlink/rule_creation/generateRules.py @@ -5,6 +5,7 @@ extra short output for consumption of Perl scripts. """ from __future__ import absolute_import +from __future__ import print_function import sys import string @@ -187,8 +188,8 @@ def __str__(self): str(self.POINTS) + "\n" + str(self.PLINKS) def printPointEnv(self): - print "\nENVIRONMENT(%s)\n" % (self.id) , \ - str(self.POINTS) , "\n" , str(self.PLINKS), "\n" + print("\nENVIRONMENT(%s)\n" % (self.id) , \ + str(self.POINTS) , "\n" , str(self.PLINKS), "\n") def printPLinks(self,fh=sys.stdout): fh.write(" %s\n" % self.asPLinkString()) @@ -313,12 +314,12 @@ def printConsistentEnvironments(self,environments,closedEnvironments): ENV2 = closedEnvironments[i] if ENV2.isConsistent: consistentCount = consistentCount + 1 - print "\nENVIRONMENT(%s)\n" % (ENV1.id) + print("\nENVIRONMENT(%s)\n" % (ENV1.id)) ENV1.printPLinks() - print "\nENVIRONMENT(%s)\n" % (ENV2.id) + print("\nENVIRONMENT(%s)\n" % (ENV2.id)) ENV2.printPLinks() - print - print "\n\nTotal number of consistent environments: %s\n\n" % (consistentCount) + print() + print("\n\nTotal number of consistent environments: %s\n\n" % (consistentCount)) def filterConsistentEnvironments(self,environments): consistentEnvs = [] @@ -351,10 +352,10 @@ def filterPLinks(links,nodeName): def translateEnvironments(envs): for env in envs: - print; env.printPLinks() - print "\n ==> ", - print env.translateEnvironment('x','y') - print "\n" + print(); env.printPLinks() + print("\n ==> ", end=' ') + print(env.translateEnvironment('x','y')) + print("\n") def test1(): @@ -372,7 +373,7 @@ def test1(): link3 = Link(ENV,node1,'after',node3) clos = Closure(ENV,"nodes") clos.computeClosure() - print ENV + print(ENV) def test2(): @@ -389,12 +390,12 @@ def test2(): count = count + 1 env3 = envFact.mergeEnvironments(env1,env2,'y') env4 = env3.close("points") - print "\nAXIOM_COMPILATION %s:\n" % (count) + print("\nAXIOM_COMPILATION %s:\n" % (count)) env1.printPLinks() env2.printPLinks() - print "\n ==>\n\n [", - for plink in env4.getNewPLinks('y'): print plink.asPrettyString(), - print "]\n" + print("\n ==>\n\n [", end=' ') + for plink in env4.getNewPLinks('y'): print(plink.asPrettyString(), end=' ') + print("]\n") def test3(): diff --git a/deprecated/sputlink/rule_creation/objects.py b/deprecated/sputlink/rule_creation/objects.py index d0e7631..5902dce 100755 --- a/deprecated/sputlink/rule_creation/objects.py +++ b/deprecated/sputlink/rule_creation/objects.py @@ -1,5 +1,6 @@ +from __future__ import print_function class ObjectList: """Class that provides interface for global PointLink and Link data bases. Just a wrapper around a list. Assumes that all elements are @@ -94,13 +95,13 @@ def __str__(self): return "EventNode(%s,%s,%s)" % (self.id, self.string,self.eventClass) def printVerbosely(self): - print "\nEVENT(%s %s %s)" % (self.id, self.string, self.eventClass) - print " inLinks:" + print("\nEVENT(%s %s %s)" % (self.id, self.string, self.eventClass)) + print(" inLinks:") for link in self.inLinks: - print " %s --(%s,%s)--> SELF" % (link.begin, link.id, link.relation) - print " outLinks:" + print(" %s --(%s,%s)--> SELF" % (link.begin, link.id, link.relation)) + print(" outLinks:") for link in self.outLinks: - print " SELF --(%s,%s)--> %s" % (link.id, link.relation, link.end) + print(" SELF --(%s,%s)--> %s" % (link.id, link.relation, link.end)) class TimexNode(Node): @@ -140,12 +141,12 @@ def copy(self,newEnvironment): return Point(newEnvironment,self.interval,self.boundary) def printVerbosely(self): - print "\nPoint(%s)" % (self.id) - print " %s - %s" % (self.interval,self.boundary) - print " inLinks:" - for link in self.inLinks: print " ", link - print " outLinks:" - for link in self.outLinks: print " ", link + print("\nPoint(%s)" % (self.id)) + print(" %s - %s" % (self.interval,self.boundary)) + print(" inLinks:") + for link in self.inLinks: print(" ", link) + print(" outLinks:") + for link in self.outLinks: print(" ", link) class AbstractLink(TemporalObject): @@ -200,21 +201,21 @@ def __str__(self): def printVerbosely(self): def printPLinks(point): - print " inLinks:" - for link in point.inLinks: print " ", link - print " outLinks:" - for link in point.outLinks: print " ", link - print "\nTimeML_Link(%s)" % (self.id) - print " %s" % (string.upper(self.relation)) - print " %s" % (self.begin) - print " %s" % (self.begin.begin) + print(" inLinks:") + for link in point.inLinks: print(" ", link) + print(" outLinks:") + for link in point.outLinks: print(" ", link) + print("\nTimeML_Link(%s)" % (self.id)) + print(" %s" % (string.upper(self.relation))) + print(" %s" % (self.begin)) + print(" %s" % (self.begin.begin)) printPLinks(self.begin.begin) - print " %s" % (self.begin.end) + print(" %s" % (self.begin.end)) printPLinks(self.begin.end) - print " %s" % (self.end) - print " %s" % (self.end.begin) + print(" %s" % (self.end)) + print(" %s" % (self.end.begin)) printPLinks(self.end.begin) - print " %s" % (self.end.end) + print(" %s" % (self.end.end)) printPLinks(self.end.end) diff --git a/deprecated/xml_parser.py b/deprecated/xml_parser.py index a5c277d..6d700d9 100644 --- a/deprecated/xml_parser.py +++ b/deprecated/xml_parser.py @@ -14,6 +14,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import sys from types import UnicodeType, IntType, StringType, NoneType import xml.parsers.expat @@ -355,14 +356,14 @@ def toString(self): def pretty_print(self): """Pretty printer for XmlDocuments. Pretty prints the list of elements and prints the tag dictionary to standard output.""" - print "\n%s\n" % str(self) + print("\n%s\n" % str(self)) element = self.elements[0] while element: element.pretty_print(indent='') element = element.get_next() - print "\nTAGS:" + print("\nTAGS:") for tag in self.tags.keys(): - print " %-5s %s" % ('<'+tag+'>', [el.id for el in self.tags[tag]]) + print(" %-5s %s" % ('<'+tag+'>', [el.id for el in self.tags[tag]])) def pp(self): self.pretty_print() @@ -648,7 +649,7 @@ def as_xml_tag(self): def pretty_print(self, indent=''): """Pretty printer for XmlDocElements, prints the content of the element.""" - print indent + 'ELEMENT(' + str(self.id) + '): ' + self.content.replace("\n", '\\n') + print(indent + 'ELEMENT(' + str(self.id) + '): ' + self.content.replace("\n", '\\n')) diff --git a/docmodel/document.py b/docmodel/document.py index 681682d..efcc24a 100644 --- a/docmodel/document.py +++ b/docmodel/document.py @@ -6,6 +6,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import os, sys, codecs, StringIO, itertools, time from xml.sax.saxutils import escape, quoteattr from subprocess import Popen, PIPE @@ -107,18 +108,18 @@ def add_event(self, begin, end, attrs): self.tags.add_tag('EVENT', begin, end, attrs) def pp(self, source_tags=True, tarsqi_tags=True): - print "\n", self, "\n" + print("\n", self, "\n") for key, value in self.metadata.items(): - print " metadata.%-14s --> %s" % (key, value) + print(" metadata.%-14s --> %s" % (key, value)) for key, value in self.options.items(): - print " options.%-15s --> %s" % (key, value) + print(" options.%-15s --> %s" % (key, value)) if source_tags and not self.sourcedoc.tags.is_empty(): - print "\nSOURCE_TAGS:" + print("\nSOURCE_TAGS:") self.sourcedoc.tags.pp() if tarsqi_tags and not self.tags.is_empty(): - print "\nTARSQI_TAGS:" + print("\nTARSQI_TAGS:") self.tags.pp() - print + print() def next_event_id(self): self.counters[EVENT] += 1 @@ -316,12 +317,12 @@ def finish(self): def pp(self): """Print source and tags.""" - print "\n\n" % self.filename - print self.text.encode('utf-8').strip() - print "\nMETADATA:", self.metadata - print "\nTAGS:" + print("\n\n" % self.filename) + print(self.text.encode('utf-8').strip()) + print("\nMETADATA:", self.metadata) + print("\nTAGS:") self.tags.pp() - print + print() # print "XMLDECL:", self.xmldecl # print "COMMENTS:", self.comments # print "PROCESSING:", self.processing_instructions @@ -519,18 +520,18 @@ def pp(self, indent=' '): def pp_tags(self, indent=''): for tag in self.tags: - print "%s%s" % (indent, tag) + print("%s%s" % (indent, tag)) def pp_opening_tags(self): - print '.opening_tags' + print('.opening_tags') for offset, taglist in sorted(self.opening_tags.items()): - print(" %d " - % offset, "\n ".join([x.__str__() for x in taglist])) + print((" %d " + % offset, "\n ".join([x.__str__() for x in taglist]))) def pp_closing_tags(self): - print '.closing_tags' + print('.closing_tags') for offset, tagdict in sorted(self.closing_tags.items()): - print " %d " % offset, tagdict + print(" %d " % offset, tagdict) class Tag(object): diff --git a/docmodel/source_parser.py b/docmodel/source_parser.py index 82c6882..5ab640b 100644 --- a/docmodel/source_parser.py +++ b/docmodel/source_parser.py @@ -37,6 +37,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import codecs import xml.parsers.expat from xml.dom import minidom @@ -172,7 +173,7 @@ def print_dom(node, indent=0): return if node.nodeType == minidom.Node.TEXT_NODE and not node.data.strip(): return - print "%s%s" % (indent * ' ', node) + print("%s%s" % (indent * ' ', node)) for childnode in node.childNodes: print_dom(childnode, indent + 3) @@ -286,8 +287,8 @@ def _debug(self, *rest): p1 = "%s-%s" % (self.parser.CurrentLineNumber, self.parser.CurrentColumnNumber) p2 = "%s" % self.parser.CurrentByteIndex - print("%-5s %-4s %s" % - (p1, p2, " ".join(["%-8s" % replace_newline(x) for x in rest]))) + print(("%-5s %-4s %s" % + (p1, p2, " ".join(["%-8s" % replace_newline(x) for x in rest])))) def replace_newline(text): diff --git a/docs/notes/python3.md b/docs/notes/python3.md index ac4075b..480f9a8 100644 --- a/docs/notes/python3.md +++ b/docs/notes/python3.md @@ -583,6 +583,8 @@ There were no comprehensions over tuples. ### 3.8. Iterators +See commits [e06c50ab](https://github.com/tarsqi/ttk/commit/e06c50ab534d4cf33165cd669a1c05474dd46d61) and [aad98acc](https://github.com/tarsqi/ttk/commit/aad98acc1e30a546a1df6f306f8555a59aa1e945) for changes made in this section. + [https://portingguide.readthedocs.io/en/latest/iterators.html](https://portingguide.readthedocs.io/en/latest/iterators.html) ``` @@ -604,20 +606,38 @@ $ python-modernize -wnf libmodernize.fixes.fix_xrange_six . Imported range from six.moves, added some list functions around results, and removed xrange(). +``` +$ python-modernize -wnf libmodernize.fixes.fix_next . +``` + +Changed one occurence of the next() method. + + + +### 3.9. Built-in functions + +[https://portingguide.readthedocs.io/en/latest/builtins.html](https://portingguide.readthedocs.io/en/latest/builtins.html) + +Ran all the fixers mentioned in the link above. + +Some comments: + +- Use of `file` as in `isinstance(f, file)` was not fixed but does not appear to exist . In Python 3 this would cause name errors. +- There was no use of `execfile`, `reload`, `intern` or `coerce`. + + + ## 4. Remaining thingies List of steps still remaining. -- Other core object changes - - [https://portingguide.readthedocs.io/en/latest/core-obj-misc.html](https://portingguide.readthedocs.io/en/latest/core-obj-misc.html) -- built-in functions - - - [https://portingguide.readthedocs.io/en/latest/builtins.html](https://portingguide.readthedocs.io/en/latest/builtins.html) - comparing and sorting, including rich comparison operators - [https://portingguide.readthedocs.io/en/latest/comparisons.html](https://portingguide.readthedocs.io/en/latest/comparisons.html) - [http://python3porting.com/preparing.html](http://python3porting.com/preparing.html) +- Other core object changes + - [https://portingguide.readthedocs.io/en/latest/core-obj-misc.html](https://portingguide.readthedocs.io/en/latest/core-obj-misc.html) - Other changes - [https://portingguide.readthedocs.io/en/latest/etc.html](https://portingguide.readthedocs.io/en/latest/etc.html) diff --git a/library/blinker/blinker_rule_loader.py b/library/blinker/blinker_rule_loader.py index d6a3bd9..d55c8b0 100644 --- a/library/blinker/blinker_rule_loader.py +++ b/library/blinker/blinker_rule_loader.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from __future__ import print_function import os import re from io import open @@ -41,7 +42,7 @@ def __getitem__(self, key): def pp(self): for i in range(1,7): - print "\nBLINKER RULES TYPE %d:\n\n" % i + print("\nBLINKER RULES TYPE %d:\n\n" % i) for rule in self.rules[i]: rule.pp() @@ -71,9 +72,9 @@ def __str__(self): return '' def pp(self): - print "" % self.id + print("" % self.id) for attr, val in sorted(self.attrs.items()): - print " %-12s = %s" % (attr, val) + print(" %-12s = %s" % (attr, val)) diff --git a/library/classifier/create_model.py b/library/classifier/create_model.py index a086dc3..d4d54f5 100644 --- a/library/classifier/create_model.py +++ b/library/classifier/create_model.py @@ -41,6 +41,7 @@ from __future__ import absolute_import +from __future__ import print_function import os, sys, glob, getopt from . import path from utilities import mallet @@ -66,7 +67,7 @@ def create_model(vectors, trainer='MaxEnt', mallet.train_model_command(MALLET, vectors, trainer=trainer, cross_validation=cross_validation)] for command in commands: - print command + print(command) os.system(command) diff --git a/library/classifier/create_vectors.py b/library/classifier/create_vectors.py index 4a6bc7b..d3f60a6 100644 --- a/library/classifier/create_vectors.py +++ b/library/classifier/create_vectors.py @@ -94,6 +94,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import os, sys from . import path @@ -162,11 +163,11 @@ def process_dir(gold_dir): if fname.startswith('.') or fname.endswith('~'): continue try: - print fname + print(fname) process_file(gold_dir, fname) except Exception as e: - print "WARNING: could not process %s" % fname - print " ", sys.exc_info()[1] + print("WARNING: could not process %s" % fname) + print(" ", sys.exc_info()[1]) def process_file(gold_dir, fname): diff --git a/library/evita/build_event_nominals1.py b/library/evita/build_event_nominals1.py index 333054e..71bee31 100644 --- a/library/evita/build_event_nominals1.py +++ b/library/evita/build_event_nominals1.py @@ -18,6 +18,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import os import sys import forms @@ -132,31 +133,31 @@ def checkWNEvent(wnWord): for (line_number, line) in IN: if not ((line_number + 1) % 1000): - print line_number + 1 + print(line_number + 1) token = line.split()[0] # first sense is event if (isWNEvent(N[str(token)][0])): if DEBUG: - print token - print " ", N[str(token)], "\n ", N[str(token)][0] - print " primSenseIsEvent" + print(token) + print(" ", N[str(token)], "\n ", N[str(token)][0]) + print(" primSenseIsEvent") file1.write(token+"\n") dbm1[token] = '1' if (wnAllSensesAreEvents(token)): if DEBUG: - print token - print " ", N[str(token)], "\n ", N[str(token)][0] - print " isAlwaysEvent" + print(token) + print(" ", N[str(token)], "\n ", N[str(token)][0]) + print(" isAlwaysEvent") file2.write(token+"\n") dbm2[token] = '1' if (wnSomeSensesAreEvents(token)): if DEBUG: - print token - print " ", N[str(token)], "\n ", N[str(token)][0] - print " isAmbiguous" + print(token) + print(" ", N[str(token)], "\n ", N[str(token)][0]) + print(" isAmbiguous") file3.write(token+"\n") dbm3[token] = '1' diff --git a/library/evita/compile_patterns.py b/library/evita/compile_patterns.py index 7972896..32095e0 100644 --- a/library/evita/compile_patterns.py +++ b/library/evita/compile_patterns.py @@ -22,6 +22,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import os, sys, six.moves.cPickle from io import open @@ -41,7 +42,7 @@ for (listName, patternsList) in patternsGroups: fname = "%s.pickle" % listName pickleFile = open(fname, 'w') - print "Compiling %d %s and saving them in %s" % (len(patternsList), listName, fname) + print("Compiling %d %s and saving them in %s" % (len(patternsList), listName, fname)) toPickle = [] for name, pattern in patternsList: toPickle.append(compileOP(pattern, name=name)) diff --git a/library/s2t/s2t_rule_loader.py b/library/s2t/s2t_rule_loader.py index c81b912..741236a 100644 --- a/library/s2t/s2t_rule_loader.py +++ b/library/s2t/s2t_rule_loader.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from __future__ import print_function import os import re from io import open @@ -39,9 +40,9 @@ def __str__(self): return '' def pp(self): - print "" % self.id + print("" % self.id) for attr, val in self.attrs.items(): - print " %s=\"%s\"" % (attr, val) + print(" %s=\"%s\"" % (attr, val)) def read_rules(): diff --git a/library/slinket/alinkPredicates.py b/library/slinket/alinkPredicates.py index c0c2694..9949990 100644 --- a/library/slinket/alinkPredicates.py +++ b/library/slinket/alinkPredicates.py @@ -30,6 +30,7 @@ #from library.timeMLspec import INIT, CULM, TERM, CONT, REINIT from __future__ import absolute_import +from __future__ import print_function from .timeMLspec import INIT, CULM, TERM, CONT, REINIT # FORWARD patterns: @@ -62,7 +63,7 @@ from .slinketPatterns import RelClauseRestric, RelClauseRestricPerfect -print "Creating Alink predicate dictionaries" +print("Creating Alink predicate dictionaries") nounDict = { diff --git a/library/slinket/create_dicts.py b/library/slinket/create_dicts.py index 9cf8fcf..d1f891c 100644 --- a/library/slinket/create_dicts.py +++ b/library/slinket/create_dicts.py @@ -19,6 +19,7 @@ from __future__ import absolute_import +from __future__ import print_function import os, sys, six.moves.cPickle from io import open @@ -33,7 +34,7 @@ from . import alinkPredicates -print "Pickling dictionaries..." +print("Pickling dictionaries...") slink_dictionaries = [(slinkPredicates.nounDict, "slinkNouns"), (slinkPredicates.adjDict, "slinkAdjs"), (slinkPredicates.verbDict, "slinkVerbs"), @@ -43,5 +44,5 @@ for (dictionary, name) in slink_dictionaries: fname = "%s.pickle" % name pickleFile = open(fname, 'w') - print "Writing %s to %s" % (name, fname) + print("Writing %s to %s" % (name, fname)) six.moves.cPickle.dump(dictionary, pickleFile) diff --git a/library/slinket/slinkPredicates.py b/library/slinket/slinkPredicates.py index 2db0575..b9e221f 100644 --- a/library/slinket/slinkPredicates.py +++ b/library/slinket/slinkPredicates.py @@ -29,6 +29,7 @@ #from library.timeMLspec import * from __future__ import absolute_import +from __future__ import print_function from .timeMLspec import * # FORWARD patterns: @@ -49,7 +50,7 @@ from .slinketPatterns import MAINsentence -print "Creating Slink predicate dictionaries" +print("Creating Slink predicate dictionaries") # TODO: it looks like when these dictionaries are pickled, the values of # 'acceptance' and 'acceptances' are going to be equal. Check this out. diff --git a/library/slinket/slinketPatterns.py b/library/slinket/slinketPatterns.py index e04ab7d..37f54f4 100644 --- a/library/slinket/slinketPatterns.py +++ b/library/slinket/slinketPatterns.py @@ -12,6 +12,7 @@ """ from __future__ import absolute_import +from __future__ import print_function from library import forms #from library.timeMLspec import * @@ -19,7 +20,7 @@ from utilities.FSA import compileOP -print "Loading and compiling patterns" +print("Loading and compiling patterns") # Note: Patterns must be sorted from shortest to longest, and from most to less diff --git a/tarsqi.py b/tarsqi.py index 8eada62..da804fe 100644 --- a/tarsqi.py +++ b/tarsqi.py @@ -106,6 +106,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import sys, os, time, getopt import root @@ -117,6 +118,7 @@ from docmodel.main import create_docstructure_parser from utilities import logger from utilities.file import read_config +from six.moves import input TTK_ROOT = os.environ['TTK_ROOT'] CONFIG_FILE = os.path.join(TTK_ROOT, 'config.txt') @@ -353,9 +355,9 @@ def set_option(self, opt, value): setattr(self, adjusted_opt, value) def pp(self): - print "OPTIONS:" + print("OPTIONS:") for option in sorted(self._options.keys()): - print " %-18s --> %s" % (option, self._options[option]) + print(" %-18s --> %s" % (option, self._options[option])) class TarsqiError(Exception): @@ -425,9 +427,9 @@ def __init__(self, args): "input or output arguments\n%s" % _usage_string()) def pp(self): - print "\n" - print " inpath =", self.inpath - print " outpath =", self.outpath, "\n" + print("\n") + print(" inpath =", self.inpath) + print(" outpath =", self.outpath, "\n") self.options.pp() def run(self): @@ -463,10 +465,10 @@ def _run_tarsqi_on_directory(self): if not os.path.isdir(self.outpath): os.makedirs(self.outpath) else: - print "WARNING: Directory %s already exists" % self.outpath - print "WARNING: Existing files in %s will be overwritten" % self.outpath - print "Continue? (y/n)\n?", - answer = raw_input() + print("WARNING: Directory %s already exists" % self.outpath) + print("WARNING: Existing files in %s will be overwritten" % self.outpath) + print("Continue? (y/n)\n?", end=' ') + answer = input() if answer != 'y': exit() for filename in os.listdir(self.inpath): @@ -475,7 +477,7 @@ def _run_tarsqi_on_directory(self): if (os.path.isfile(infile) and os.path.basename(infile)[0] != '.' and os.path.basename(infile)[-1] != '~'): - print infile + print(infile) Tarsqi(self.options, infile, outfile).process_document() def _run_tarsqi_on_file(self): @@ -488,7 +490,7 @@ def run_profiler(args): """Wrap running Tarsqi in the profiler.""" import profile command = "TarsqiWrapper([%s]).run()" % ','.join(['"'+x+'"' for x in args]) - print 'Running profiler on:', command + print('Running profiler on:', command) profile.run(command, os.path.abspath(PROFILER_OUTPUT)) diff --git a/testing/regression.py b/testing/regression.py index 44e9570..9b7178a 100644 --- a/testing/regression.py +++ b/testing/regression.py @@ -43,10 +43,12 @@ from __future__ import absolute_import +from __future__ import print_function import os, sys, getopt, time, glob import tarsqi from io import open +from six.moves import input def load_cases(fname): @@ -59,7 +61,7 @@ def load_cases(fname): o1 = int(o1) o2 = int(o2) cases.append(Case(identifier, sentence, o1, o2)) - print "Loaded %d test cases from fname" % len(cases) + print("Loaded %d test cases from fname" % len(cases)) return cases @@ -114,7 +116,7 @@ def create_event_vg_cases(): o1 = o2 - last_token_length name = name_generator("%s-%s" % (chunkclass, vg)) out.write("VG-%s\t%s\t%s\t%s\n" % (name, sentence, o1, o2)) - print "Created", outfile + print("Created", outfile) def run_evita(): @@ -131,7 +133,7 @@ def run_evita(): true += 1 else: false += 1 - print "True=%s False=%s" % (true, false) + print("True=%s False=%s" % (true, false)) def check_tag(pipeline, sentence, tag, o1, o2): @@ -194,7 +196,7 @@ def write_cases(self): self.case = case self.case_file = os.path.join(self.html_dir, "cases-%s.html" % case) self.case_fh = open(self.case_file, 'w') - print "writing", self.case_file + print("writing", self.case_file) self._load_cases() self._load_case_results() self._write_case_report() @@ -202,7 +204,7 @@ def write_cases(self): def _load_cases(self): self.case_input_file = u"%s/cases-%s.tab" % (self.cases_dir, self.case) self.case_input_fh = open(self.case_input_file) - print " reading cases in", self.case_input_file + print(" reading cases in", self.case_input_file) self.case_input = {} for case in load_cases(self.case_input_file): self.case_input[case.identifier] = case @@ -210,7 +212,7 @@ def _load_cases(self): def _load_case_results(self): self.case_results = {} for results_file in glob.glob(u"%s/%s/*.tab" % (self.results_dir, self.case)): - print ' reading results from', results_file + print(' reading results from', results_file) timestamp = os.path.splitext(os.path.basename(results_file))[0] self.case_results[timestamp] = {} for line in open(results_file): @@ -268,14 +270,14 @@ def purge_result(args): case, "%s.tab" % timestamp) results_file_was_removed = False if os.path.isfile(results_file): - print "Remove %s? (y/n)" % results_file - print "?", - answer = raw_input() + print("Remove %s? (y/n)" % results_file) + print("?", end=' ') + answer = input() if answer.strip() == 'y': os.remove(results_file) results_file_was_removed = True else: - print "Warning: incorrect case or timestamp" + print("Warning: incorrect case or timestamp") if results_file_was_removed: generate_report() diff --git a/testing/run_tests.py b/testing/run_tests.py index 3b9145d..391a337 100644 --- a/testing/run_tests.py +++ b/testing/run_tests.py @@ -50,6 +50,7 @@ from __future__ import absolute_import +from __future__ import print_function import sys, getopt, traceback, types import tarsqi @@ -119,20 +120,20 @@ def run(self, pipeline): result = PASS if tag is None else FAIL self.results[result] += 1 entity_spec = "%s(%s:%s)=%s" % (self.tag, self.o1, self.o2, self.find_tag) - print " %-30s %-30s %s" % (self.name, entity_spec, result) + print(" %-30s %-30s %s" % (self.name, entity_spec, result)) for attr, val in self.attributes: attr_spec = " %s=%s" % (attr, val.replace(' ', '_')) if result == PASS: attr_result = PASS if tag.attrs.get(attr) == val else FAIL self.results[attr_result] += 1 - print " %-30s %-30s %s" % (self.name, attr_spec, attr_result) + print(" %-30s %-30s %s" % (self.name, attr_spec, attr_result)) else: - print " %-30s %-30s %s" % (self.name, attr_spec, result) + print(" %-30s %-30s %s" % (self.name, attr_spec, result)) except: self.results[ERROR] += 1 - print " %-61s %s" % (self.name, ERROR) + print(" %-61s %s" % (self.name, ERROR)) if SHOW_ERRORS: - print; traceback.print_exc(); print + print(); traceback.print_exc(); print() class TarsqiLinkTest(TarsqiTest): @@ -168,12 +169,12 @@ def run(self, pipeline): link_spec = "%s(%s:%s-%s:%s)=%s" \ % (self.reltype, self.e1[0], self.e1[1], self.e2[0], self.e2[1], self.find_tag) - print " %-35s %-40s %s" % (self.name, link_spec, result) + print(" %-35s %-40s %s" % (self.name, link_spec, result)) except: self.results[ERROR] += 1 - print " %-76s %s" % (self.name, ERROR) + print(" %-76s %s" % (self.name, ERROR)) if SHOW_ERRORS: - print; traceback.print_exc(); print + print(); traceback.print_exc(); print() class GUTimeTest(TarsqiEntityTest): @@ -256,12 +257,12 @@ def run(self): result = sorted(self.output) == sorted(observed_output) result = PASS if result else FAIL self.results[result] += 1 - print " %-35s %s" % (self.name, result) + print(" %-35s %s" % (self.name, result)) except: self.results[ERROR] += 1 - print " %-76s %s" % (self.name, ERROR) + print(" %-76s %s" % (self.name, ERROR)) if SHOW_ERRORS: - print; traceback.print_exc(); print + print(); traceback.print_exc(); print() def run_pipeline(pipeline, sentence): @@ -342,7 +343,7 @@ def __init__(self, module_name, module_class, test_specs, tag=None): self.tag = tag def run(self): - print "\n>>> Running %s Tests...\n" % self.module_name + print("\n>>> Running %s Tests...\n" % self.module_name) results = {PASS: 0, FAIL: 0, ERROR: 0} for test_specification in self.test_specifications: if self.tag: @@ -354,7 +355,7 @@ def run(self): results[FAIL] += test.results[FAIL] results[ERROR] += test.results[ERROR] p, f, e = results[PASS], results[FAIL], results[ERROR] - print "\nTOTALS: PASS=%s FAIL=%s ERROR=%s\n" % (p, f, e) + print("\nTOTALS: PASS=%s FAIL=%s ERROR=%s\n" % (p, f, e)) SUMMARY.append([self.module_name, p, f, e]) @@ -394,13 +395,13 @@ def test_sputlink(): def print_summary(): - print "\nSUMMARY:\n" - print " %-15s %-10s %-10s %s" % ('', ' PASS', ' FAIL', 'ERROR') + print("\nSUMMARY:\n") + print(" %-15s %-10s %-10s %s" % ('', ' PASS', ' FAIL', 'ERROR')) for module, p, f, e in SUMMARY: - print " %-15s %5s %5s %5s" % (module, p, f, e) - print + print(" %-15s %5s %5s %5s" % (module, p, f, e)) + print() if f or e: - print " THERE WERE FAILURES OR ERRORS\n" + print(" THERE WERE FAILURES OR ERRORS\n") if __name__ == '__main__': diff --git a/testing/scripts/regression/evita/compare.py b/testing/scripts/regression/evita/compare.py index 9416ad7..944c1e5 100644 --- a/testing/scripts/regression/evita/compare.py +++ b/testing/scripts/regression/evita/compare.py @@ -35,6 +35,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import sys from xml.dom.minidom import parse @@ -90,9 +91,9 @@ def node_as_string(node, text): def print_results(diffs, text): for diff in sorted(diffs): - print diff[1] + print(diff[1]) if diffs: - print "%d differences in %d lines" % (len(diffs), len(text.split("\n"))) + print("%d differences in %d lines" % (len(diffs), len(text.split("\n")))) if __name__ == '__main__': diff --git a/testing/scripts/regression/slinket/compare.py b/testing/scripts/regression/slinket/compare.py index e0e3929..ff951cc 100644 --- a/testing/scripts/regression/slinket/compare.py +++ b/testing/scripts/regression/slinket/compare.py @@ -42,6 +42,7 @@ from __future__ import absolute_import +from __future__ import print_function import sys from xml.dom.minidom import parse @@ -112,13 +113,13 @@ def print_results(diffs, text): left_context = text[p1-20:p1] right_context = text[p2:p2+20] fragment = text[p1:p2] - print "%s %s" % (direction, offsets) - print " %s[%s]%s" % (left_context, fragment, right_context) - print " %s" % node_as_string(slink) - print " %s" % node_as_string(e1) - print " %s" % node_as_string(e2) + print("%s %s" % (direction, offsets)) + print(" %s[%s]%s" % (left_context, fragment, right_context)) + print(" %s" % node_as_string(slink)) + print(" %s" % node_as_string(e1)) + print(" %s" % node_as_string(e2)) if diffs: - print "%d differences in %d lines" % (len(diffs), len(text.split("\n"))) + print("%d differences in %d lines" % (len(diffs), len(text.split("\n")))) if __name__ == '__main__': diff --git a/utilities/FSA-org.py b/utilities/FSA-org.py index 7621696..4368161 100644 --- a/utilities/FSA-org.py +++ b/utilities/FSA-org.py @@ -340,6 +340,7 @@ from __future__ import absolute_import +from __future__ import print_function import string, os, tempfile from types import InstanceType, ListType, TupleType, IntType, LongType, DictType, StringType from six.moves import map @@ -424,10 +425,10 @@ def computeEpsilonClosures(self): # Copying # def create(self, *args): - return apply(self.__class__, args) + return self.__class__(*args) def copy(self, *args): - fsa = apply(self.__class__, args) + fsa = self.__class__(*args) if hasattr(self, 'label'): fsa.label = self.label return fsa @@ -436,7 +437,7 @@ def creationArgs(self): return self.tuple() + (self.getArcMetadata(),) def coerce(self, klass): - return apply(klass, self.creationArgs()) + return klass(*self.creationArgs()) # @@ -602,7 +603,7 @@ def acceptsSubstringOf(self, sequence): #looking for Longest Subsequence def acceptsShortestSubstringOf(self, sequence): """ As accepts() method, but accepting also substrings of sequence. Returning length of shortest piece of sequence that has been matched""" - print "\n........Entering acceptsSubstringOf......\n" + print("\n........Entering acceptsSubstringOf......\n") stateSequences = self.findSubstringsOf(sequence, self.initialState, []) #log("\n\nSUBSTRINGS: "+str(stateSequences)) return self.shortestSequence(stateSequences)[0] @@ -827,7 +828,7 @@ def determinized(self): return self if len(self.states) > NUMPY_DETERMINIZATION_CUTOFF and NumFSAUtils and not self.getArcMetadata(): #debugFile.write( "\n..............determinzed option 1") - data = apply(NumFSAUtils.determinize, self.tuple() + (self.epsilonClosure,)) + data = NumFSAUtils.determinize(*self.tuple() + (self.epsilonClosure,)) result = apply(self.copy, data).sorted() result._isDeterminized = 1 return result diff --git a/utilities/FSA.py b/utilities/FSA.py index 8f04f97..e9207be 100644 --- a/utilities/FSA.py +++ b/utilities/FSA.py @@ -420,10 +420,10 @@ def computeEpsilonClosures(self): # Copying # def create(self, *args): - return apply(self.__class__, args) + return self.__class__(*args) def copy(self, *args): - fsa = apply(self.__class__, args) + fsa = self.__class__(*args) if hasattr(self, 'label'): fsa.label = self.label return fsa @@ -432,7 +432,7 @@ def creationArgs(self): return self.tuple() + (self.getArcMetadata(),) def coerce(self, klass): - return apply(klass, self.creationArgs()) + return klass(*self.creationArgs()) # @@ -829,7 +829,7 @@ def determinized(self): return self if len(self.states) > NUMPY_DETERMINIZATION_CUTOFF and NumFSAUtils and not self.getArcMetadata(): #debugFile.write( "\n..............determinzed option 1") - data = apply(NumFSAUtils.determinize, self.tuple() + (self.epsilonClosure,)) + data = NumFSAUtils.determinize(*self.tuple() + (self.epsilonClosure,)) result = apply(self.copy, data).sorted() result._isDeterminized = 1 return result diff --git a/utilities/find.py b/utilities/find.py index 2f411cd..94a2ce4 100644 --- a/utilities/find.py +++ b/utilities/find.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from __future__ import print_function import os import sys import re @@ -15,7 +16,7 @@ def search_file(name, search_term): line = line.rstrip() if search_term in line: loc = "%s:%d" % (name, line_number) - print("%-30s == %s" % (loc, line.strip())) + print(("%-30s == %s" % (loc, line.strip()))) for root, dirs, files in os.walk(".", topdown=False): diff --git a/utilities/lif.py b/utilities/lif.py index ead56c0..c6a17c6 100644 --- a/utilities/lif.py +++ b/utilities/lif.py @@ -30,6 +30,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import sys import codecs import json @@ -193,9 +194,9 @@ def as_json(self): return d def pp(self): - print self + print(self) for contains in self.metadata["contains"].keys(): - print ' ', contains + print(' ', contains) class Annotation(object): diff --git a/utilities/logger.py b/utilities/logger.py index cec77a3..73d1135 100644 --- a/utilities/logger.py +++ b/utilities/logger.py @@ -52,6 +52,7 @@ from __future__ import absolute_import +from __future__ import print_function import os import sys import inspect @@ -194,12 +195,12 @@ def out(*args): file = path_elements[-1] file = file.replace('.py', '') prefix = 'LOG (' + str(depth) + ') [' + file + '.' + function + ']' - print prefix + ' ', + print(prefix + ' ', end=' ') for arg in args: - print arg, - print + print(arg, end=' ') + print() def outnl(): if STDOUT_PRINTING: - print + print() diff --git a/utilities/make_documentation.py b/utilities/make_documentation.py index a643ac7..b96a893 100644 --- a/utilities/make_documentation.py +++ b/utilities/make_documentation.py @@ -32,6 +32,7 @@ from __future__ import absolute_import +from __future__ import print_function import os import sys import inspect @@ -69,7 +70,7 @@ def print_module_documentation(module): - print module.__name__ + print(module.__name__) filename = os.path.join(DOCUMENTATION_DIR, 'modules', module.__name__ + '.html') docfile = open(filename, 'w') @@ -227,7 +228,7 @@ def print_function_code(identifier, name, fun): try: code = "".join(inspect.getsourcelines(fun)[0]) except IOError: - print "WARNING: could not get source code of %s" % fun + print("WARNING: could not get source code of %s" % fun) code = '' code = trim(code, 0) code = code.replace('<', '<') diff --git a/utilities/mallet.py b/utilities/mallet.py index 1b12d74..8dc3d31 100644 --- a/utilities/mallet.py +++ b/utilities/mallet.py @@ -21,6 +21,7 @@ """ from __future__ import absolute_import +from __future__ import print_function import os, sys from subprocess import Popen, PIPE @@ -137,10 +138,10 @@ def __str__(self): def pp(self): """Pretty priniter for the MalletClassifier.""" - print "" - print " directory - %s" % self.mallet + print("") + print(" directory - %s" % self.mallet) for classifier in self.classifiers.keys(): - print " model - %s" % classifier + print(" model - %s" % classifier) def add_classifiers(self, *classifier_paths): for path in classifier_paths: @@ -201,9 +202,9 @@ def _shell_command(self, classifier, vectors): def _debug_vectors(vectors, out, err): if DEBUG: - print '>>>', vectors - print ' [OUT]', out - print ' [ERR]', err + print('>>>', vectors) + print(' [OUT]', out) + print(' [ERR]', err) """Some notes on classify_command() diff --git a/utilities/wordnet.py b/utilities/wordnet.py index 53ecc4a..02020f2 100644 --- a/utilities/wordnet.py +++ b/utilities/wordnet.py @@ -37,6 +37,7 @@ """ from __future__ import absolute_import +from __future__ import print_function from io import open from six.moves import map from six.moves import filter @@ -921,7 +922,7 @@ def has_key(self, form): def _testKeys(self): """Verify that index lookup can find each word in the index file.""" - print "Testing: ", self + print("Testing: ", self) file = open(self.indexFile.file.name, _FILE_OPEN_MODE) counter = 0 while 1: @@ -930,13 +931,13 @@ def _testKeys(self): if line[0] != ' ': key = line[:line.find(' ')].replace('_', ' ') if (counter % 1000) == 0: - print "%s..." % (key,), + print("%s..." % (key,), end=' ') import sys sys.stdout.flush() counter = counter + 1 self[key] file.close() - print "done." + print("done.") class _IndexFile(object):