Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tillgreenmodecore #288

Merged
merged 2 commits into from
Aug 12, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .nocover.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ nocover_file_globs:
- coala_quickstart/info_extractors/EditorconfigParsing.py
- coala_quickstart/info_extractors/GemfileInfoExtractor.py
- coala_quickstart/info_extractors/GruntfileInfoExtractor.py
- coala_quickstart/green_mode/green_mode_core.py
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Entire core is excluded from coverage??

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no its just the master function which calls other functions. Each individual method is tested with 100% coverage

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it needed too many mocks of methods like os.walk()

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

80 lines ... fair enough.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Recheck #274 & #273 above, and INFO_SETTING_MAPS = below, and maybe others below also which were your code that wasnt covered.

on a separate branch, try removing those rules and see what the coverage looks like. Maybe it has increased in those areas to the point that a little effort will make those rules redundant.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


nocover_regexes:
# coala_quickstart.py
Expand Down
11 changes: 10 additions & 1 deletion coala_quickstart/coala_quickstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@
generate_settings, write_coafile)
from coala_quickstart.generation.SettingsClass import (
collect_bear_settings)
from coala_quickstart.green_mode.green_mode_core import green_mode

MAX_NUM_OF_OPTIONAL_ARGS_ALLOWED_FOR_GREEN_MODE = 5
MAX_NUM_OF_VALUES_OF_OPTIONAL_ARGS_ALLOWED_FOR_GREEN_MODE = 5


def _get_arg_parser():
Expand Down Expand Up @@ -110,7 +114,12 @@ def main():
used_languages, printer, arg_parser, extracted_information)

if args.green_mode:
collect_bear_settings(relevant_bears)
bear_settings_obj = collect_bear_settings(relevant_bears)
green_mode(
project_dir, ignore_globs, relevant_bears, bear_settings_obj,
MAX_NUM_OF_OPTIONAL_ARGS_ALLOWED_FOR_GREEN_MODE,
MAX_NUM_OF_VALUES_OF_OPTIONAL_ARGS_ALLOWED_FOR_GREEN_MODE,
printer)

print_relevant_bears(printer, relevant_bears)

Expand Down
123 changes: 123 additions & 0 deletions coala_quickstart/generation/Utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import os
from collections import defaultdict
import re
import yaml

from coala_utils.Extensions import exts
from coala_utils.string_processing import unescaped_search_for
Expand Down Expand Up @@ -236,3 +237,125 @@ def peek(iterable):
except StopIteration:
return None
return first, itertools.chain([first], iterable)


def contained_in(smaller, bigger):
"""
Takes in two SourceRange objects and checks whether
the first one lies inside the other one.
:param smaller:
The SourceRange object that needs to be checked whether
it is inside the other one.
:param bigger:
The SourceRange object that needs to be checked whether
it contains the other one.
:return:
True if smaller is inside the bigger else false.
"""
smaller_file = smaller.start.file
bigger_file = bigger.start.file

smaller_start_line = smaller.start.line
smaller_start_column = smaller.start.column
smaller_end_line = smaller.end.line
smaller_end_column = smaller.end.column

bigger_start_line = bigger.start.line
bigger_start_column = bigger.start.column
bigger_end_line = bigger.end.line
bigger_end_column = bigger.end.column

if None in [smaller_start_line, smaller_start_column,
smaller_end_line, smaller_end_column,
bigger_start_line, bigger_start_column,
bigger_end_line, bigger_end_column]:
return False

if not smaller_file == bigger_file:
return False

if smaller_start_line < bigger_start_line:
return False

if smaller_end_line > bigger_end_line:
return False

if smaller_start_line > bigger_start_line and (
smaller_end_line < bigger_end_line):
return True

same_start_line = (smaller_start_line == bigger_start_line)

same_end_line = (smaller_end_line == bigger_end_line)

if same_start_line and same_end_line:
if smaller_start_column < bigger_start_column:
return False
if smaller_end_column > bigger_end_column:
return False
return True

if same_start_line:
if smaller_start_column < bigger_start_column:
return False
return True

assert same_end_line
if smaller_end_column > bigger_end_column:
return False
return True


def get_yaml_contents(project_data):
"""
Reads a YAML file and returns the data.
:param project_data:
The file path from which to read data.
:return:
The YAML data as python objects.
"""
with open(project_data, 'r') as stream:
return yaml.load(stream)


def dump_yaml_to_file(file, contents):
"""
Writes YAML data to a file.
:param file:
The file to write YAML data to.
:param contents:
The python objects to be written as YAML data.
"""
with open(file, 'w+') as outfile:
yaml.dump(contents, outfile,
default_flow_style=False)


def append_to_contents(contents, key, values, settings_key):
"""
Appends data to a dict, adding the received values
to the list of values at a given key or creating
the key if it does not exist.
:param contents:
The dict to append data to.
:param key:
The key needed to be appended to the dict.
:param values:
The list of values needed to be appended to the
values at a key in the dict.
:param settings_key:
The key to which data has to be appended to.
:return:
The dict with appended key and values.
"""
found = False
if settings_key not in contents:
contents[settings_key] = []
for index, obj in enumerate(contents[settings_key]):
if isinstance(obj, dict) and key in obj.keys():
found = True
contents[settings_key][index][key] += values
if not found:
contents[settings_key].append({key: values})

return contents
36 changes: 36 additions & 0 deletions coala_quickstart/green_mode/Setting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
settings_key = 'green_mode_infinite_value_settings'


def find_max_min_of_setting(setting, value, contents, operator):
"""
Generates min/max value of a setting where this
function is called upon for every value generated for
every file in the project (excluding ignored files).
:param setting:
The setting for which to find the min value of.
:param value:
The current value to be compared against the
supposedly min value stored in contents.
:param contents:
The python object to be written to 'PROJECT_DATA'
which contains the min value of the setting which was
encountered uptil now.
:param operator:
Either the less than or greater than operator.
:return:
The contents with the min value of the setting encountered
uptil now after comparing it with the current value recieved
by the function.
"""
found = False
for index, item in enumerate(contents[settings_key]):
if isinstance(item, dict) and setting in item:
found = True
position = index
if not found:
contents[settings_key].append({setting: value})
return contents
current_val = contents[settings_key][position][setting]
if operator(value, current_val):
contents[settings_key][position][setting] = value
return contents
147 changes: 147 additions & 0 deletions coala_quickstart/green_mode/filename_operations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
import os
from copy import deepcopy

from coala_quickstart.generation.Utilities import (
append_to_contents,
)
from coala_quickstart.green_mode.green_mode import (
settings_key,
)


class Node:
def __init__(self, character, parent=None):
self.count = 1
self.character = character
self.children = {}
self.parent = parent

def insert(self, string, idx):
if idx >= len(string):
return
code = ord(string[idx]) # ASCII code
ch = string[idx]
if ch in self.children:
self.children[ch].count += 1
else:
self.children[ch] = Node(string[idx], self)
self.children[ch].insert(string, idx + 1)


class Trie:
"""
Creates a Trie data structure for storing names of files.
"""

def __init__(self):
self.root = Node('')

def insert(self, string):
self.root.insert(string, 0)

# Just a wrapper function.
def get_prefixes(self, min_length, min_files):
"""
Discovers prefix from the Trie. Prefix shorter than the
min_length or matching against files lesser than the
min_files are not stored. Returns the prefixes in sorted
order.
"""
self.prefixes = {}
self._discover_prefixes(self.root, [], min_length, 0, min_files)
return sorted(self.prefixes.items(), key=lambda x: (x[1], x[0]),
reverse=True)

def _discover_prefixes(self, node, prefix, min_length, len, min_files):
"""
Performs a DFA search on the trie. Discovers the prefixes in the trie
and stores them in the self.prefixes dictionary.
"""
if node.count < min_files and node.character != '':
return
if len >= min_length:
current_prefix = ''.join(prefix) + node.character
to_delete = []
for i in self.prefixes:
if i in current_prefix:
to_delete.append(i)
for i in to_delete:
self.prefixes.pop(i)
self.prefixes[''.join(prefix) + node.character] = node.count
orig_prefix = deepcopy(prefix)
for ch, ch_node in node.children.items():
prefix.append(node.character)
if (not ch_node.count < node.count) or orig_prefix == []:
self._discover_prefixes(ch_node, prefix, min_length, len + 1,
min_files)
prefix.pop()


def get_files_list(contents):
"""
Generates a list which contains only files from
the entire project from the directory and file
structure written to '.project_data.yaml'.
:param contents:
The python object containing the file and
directory structure written to '.project_data.yaml'.
:return:
The list of all the files in the project.
"""
file_names_list = []
for item in contents:
if not isinstance(item, dict):
file_names_list.append(item)
else:
file_names_list += get_files_list(
item[next(iter(item))])
return file_names_list


def check_filename_prefix_postfix(contents, min_length_of_prefix=6,
min_files_for_prefix=5):
"""
Checks whether the project has some files with filenames
having certain prefix or postfix.
:param contents:
The python object containing the file and
directory structure written to '.project_data.yaml'.
:param min_length_of_prefix:
The minimum length of prefix for it green_mode to
consider as a valid prefix.
:param min_files_for_prefix:
The minimum amount of files a prefix to match against
for green_mode to consider it as a valid prefix.
:return:
Update contents value with the results found out
from the file/directory structure in .project_data.yaml.
"""
file_names_list = get_files_list(contents['dir_structure'])
file_names_list = [os.path.splitext(os.path.basename(x))[
0] for x in file_names_list]
file_names_list_reverse = [os.path.splitext(
x)[0][::-1] for x in file_names_list]
trie = Trie()
for file in file_names_list:
trie.insert(file)
prefixes = trie.get_prefixes(min_length_of_prefix, min_files_for_prefix)
trie_reverse = Trie()
for file in file_names_list_reverse:
trie_reverse.insert(file)
suffixes = trie_reverse.get_prefixes(
min_length_of_prefix, min_files_for_prefix)
if len(suffixes) == 0:
suffixes = [('', 0)]
if len(prefixes) == 0:
prefixes = [('', 0)]
prefix_list, suffix_list = [], []
for prefix, freq in prefixes:
prefix_list.append(prefix)
for suffix, freq in suffixes:
suffix_list.append(suffix[::-1])
contents = append_to_contents(contents, 'filename_prefix', prefix_list,
settings_key)
contents = append_to_contents(contents, 'filename_suffix', suffix_list,
settings_key)

return contents
Loading