Skip to content

Commit

Permalink
Fixed bug in featurizer mapping from candidates to model features.
Browse files Browse the repository at this point in the history
  • Loading branch information
Brandon Rohrer committed Sep 17, 2018
1 parent 6d37207 commit 3d3d56c
Show file tree
Hide file tree
Showing 6 changed files with 114 additions and 91 deletions.
18 changes: 10 additions & 8 deletions becca/brain.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def __init__(self, world, config=None):
"name": None,
"reporting_interval": 1e3,
"restore": True,
"visualize_interval": 1e3,
"visualize_interval": 1e4,
}
if config is None:
config = {}
Expand Down Expand Up @@ -192,7 +192,6 @@ def __init__(self, world, config=None):
self.featurizer = Featurizer(
debug=self.debug,
n_inputs=self.n_features,
threshold=1e3,
)
# The model builds sequences of features and goals and reward
# for making predictions about its world.
Expand Down Expand Up @@ -273,18 +272,21 @@ def sense_act_learn(self, sensors, reward):
feature_activities = self.featurizer.featurize(
np.concatenate((self.postprocessor.consolidated_commands,
input_activities)))

(model_feature_activities,
conditional_predictions,
conditional_rewards,
conditional_curiosities) = self.model.step(
feature_activities, reward)
conditional_curiosities
) = self.model.step(feature_activities, reward)

feature_goals, i_goal = self.actor.choose(
feature_activities=model_feature_activities,
conditional_predictions=conditional_predictions,
conditional_rewards=conditional_rewards,
conditional_curiosities=conditional_curiosities,
)
feature_pool_goals = self.model.update_goals(feature_goals, i_goal)
feature_pool_goals = self.model.update_goals(
feature_goals, i_goal)

debug_local = False
if debug_local:
Expand All @@ -299,7 +301,7 @@ def sense_act_learn(self, sensors, reward):

# Isolate the actions from the rest of the goals.
self.actions = (self.postprocessor.convert_to_actions(
input_goals[:self.n_commands]))
input_goals[:self.n_commands]))

# Update the inputs in a pair of top-down/bottom-up passes.
# Top-down
Expand Down Expand Up @@ -391,8 +393,8 @@ def backup(self):
print('Pickling error: {0} encountered while saving brain data'.
format(perr))
except Exception as err:
print('Unknown error: {0} encountered while saving brain data'.
format(err))
print('Unknown error: {0} encountered while saving brain data'
.format(err))
else:
success = True
return success
Expand Down
112 changes: 61 additions & 51 deletions becca/featurizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ def __init__(
self,
debug=False,
n_inputs=None,
threshold=None,
):
"""
Configure the featurizer.
Expand All @@ -25,8 +24,6 @@ def __init__(
n_inputs : int
The number of inputs (cables) that each Ziptie will be
equipped to handle.
threshold : float
See Ziptie.nucleation_threshold
"""
self.debug = debug

Expand Down Expand Up @@ -66,16 +63,19 @@ def __init__(
# features, which sparsity helps keep Becca fast.
self.ziptie = Ziptie(
n_cables=self.n_inputs,
threshold=threshold,
debug=self.debug)

# n_features: int
# The total number of features that have been colllected so far.
# This includes the cable candidate pools from each ziptie.
self.n_features = [0, 0]
self.n_features_by_level = [0, 0]

# mapping: 2D array of ints
# The transformation from
# The transformation from candidates (List or arrays of values)
# to the feature pool.
# If there is a one at [row_i, col_j] then
# candidate row_i maps to feature index col_j .
# feature_pool = np.matmul(candidates, self.mapping)
self.mapping = np.zeros((0, 0), dtype=np.int)

def featurize(self, new_candidates):
Expand Down Expand Up @@ -168,7 +168,7 @@ def update_inputs(self):

resets = []
for i_level, level_resets in enumerate(all_resets):
i_start = np.sum(np.array(self.n_features[:i_level]))
i_start = np.sum(np.array(self.n_features_by_level[:i_level]))
for i_reset in level_resets:
resets.append(np.where(
self.mapping[i_start + i_reset, :])[0])
Expand All @@ -188,10 +188,9 @@ def map_from_feature_pool(self, feature_values):
-------
candidate_values: list of array of floats
"""
# feature_pool_values = np.matmul(feature_values, self.mapping.T)
candidate_values = []
i_last = 0
for n_feat in self.n_features:
for n_feat in self.n_features_by_level:
candidate_values.append(
feature_values[i_last: i_last + n_feat])
i_last += n_feat
Expand All @@ -210,51 +209,62 @@ def map_to_feature_pool(self, candidate_values):
-------
feature_values: array of floats
"""
# Check whether the number of candidates has expanded at any level
# and adapt.
n_candidates_by_level = [values.size for values in candidate_values]
total_n_candidates = np.sum(np.array(n_candidates_by_level))
total_n_features = np.sum(np.array(self.n_features))
if (total_n_features < total_n_candidates):
# Update_needed
delta = []
for i_level, candidate_pool in enumerate(candidate_values):
delta.append(candidate_pool.size - self.n_features[i_level])
# Create a larger map
new_mapping = np.zeros(
(total_n_candidates, total_n_candidates), dtype=np.int)
new_mapping[:total_n_features, :total_n_features] = self.mapping
new_mapping[total_n_features:, total_n_features:] = (
np.eye(total_n_candidates - total_n_features))

# Shift new rows upward to sit with their own levels.
last_row = 0
for i_level in range(len(candidate_values) - 1):
last_row += self.n_features[i_level]
start = total_n_features
stop = start + delta[i_level]
if delta[i_level] > 0:
move_rows = new_mapping[start:stop, :]
new_mapping[
last_row + delta[i_level]:
last_row + delta[i_level]
+ self.n_features[i_level + 1], :
] = new_mapping[
last_row:
last_row
+ self.n_features[i_level + 1], :
]
new_mapping[
last_row: last_row + delta[i_level], :] = move_rows
self.n_features[i_level] += delta[i_level]
start += delta[i_level]
last_row += delta[i_level]
self.n_features[-1] += delta[-1]
self.mapping = new_mapping

self.grow_map(candidate_values)
all_candidate_values = []
for level_candidate_values in candidate_values:
all_candidate_values += list(level_candidate_values)
feature_values = np.matmul(
np.array(all_candidate_values), self.mapping)
return feature_values

def grow_map(self, candidate_values):
"""
Check whether we need to add more candidates to the feature pool.
New candidates will come in appended to the end of their
respective input pools. However, feature indices need to
stay consistent throughout the life of each feature.
these new candidates need to be given indices at the
end of the currently used set of feature pool indices.
Parameters
----------
candidate_values: list of arrays of floats
"""
# Check whether the number of candidates has expanded
# at any level and adapt.
n_candidates_by_level = [
values.size for values in candidate_values]
total_n_candidates = np.sum(np.array(n_candidates_by_level))
total_n_features = np.sum(np.array(self.n_features_by_level))

if (total_n_features < total_n_candidates):
# Create a larger map
new_mapping = []
i_last_old = 0 # Track last candidate handled.
j_last_new = total_n_features # Track last feature assigned.

for i_level in range(len(self.n_features_by_level)):
n_cand = n_candidates_by_level[i_level]
n_feat = self.n_features_by_level[i_level]
delta = n_cand - n_feat

level_old_map = np.zeros((n_feat, total_n_candidates))
level_old_map[:, :total_n_features] = self.mapping[
i_last_old:i_last_old + n_feat, :]
new_mapping.append(level_old_map)

if delta > 0:
level_new_map = np.zeros((delta, total_n_candidates))
level_new_map[
:,
j_last_new: j_last_new + delta
] = np.eye(delta)
new_mapping.append(level_new_map)

j_last_new += delta
self.n_features_by_level[i_level] += delta

i_last_old += n_feat
self.mapping = np.concatenate(new_mapping)
return
22 changes: 13 additions & 9 deletions becca/input_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def __init__(self, debug=False, n_inputs=None, name='filter'):
# as the number of candidates.
# A 1 in position i, j indicates that candidate i maps to
# input j.
self.mapping = np.zeros((self.n_inputs * 2, self.n_inputs),
dtype=np.int)
self.mapping = np.zeros(
(self.n_inputs * 2, self.n_inputs), dtype=np.int)

# Position in this array shows which candidate is being assigned.
# The value at that position shows the input index it is
Expand Down Expand Up @@ -99,8 +99,8 @@ def update_activities(self, candidate_activities):
self.n_candidates = self.candidate_activities.size
capacity = self.mapping.shape[0]
if self.n_candidates >= capacity:
new_mapping = np.zeros((self.n_candidates * 2, self.n_inputs),
dtype=np.int)
new_mapping = np.zeros(
(self.n_candidates * 2, self.n_inputs), dtype=np.int)
new_mapping[:capacity, :] = self.mapping
self.mapping = new_mapping

Expand Down Expand Up @@ -130,8 +130,8 @@ def update_activities(self, candidate_activities):
self.mapping[:self.n_candidates, :], axis=1) == 0)[0]
self.i_in_use = np.where(self.mapping)[0]
self.bench_pressure[self.i_benched] += (
self.candidate_activities[self.i_benched] /
(tools.epsilon
self.candidate_activities[self.i_benched] / (
tools.epsilon
+ self.cumulative_activities[self.i_benched]
* self.pressure_time))

Expand Down Expand Up @@ -212,7 +212,9 @@ def update_inputs(self, upstream_resets=None):

resets = []
candidate_score = (
self.candidate_fitness + self.bench_pressure[self.n_candidates])
self.candidate_fitness
+ self.bench_pressure[self.n_candidates]
)
# Find lowest scoring candidates in use.
i_lowest_scoring_in_use = np.argsort(
candidate_score[self.i_in_use])[::-1]
Expand All @@ -225,8 +227,10 @@ def update_inputs(self, upstream_resets=None):
# n_inputs_unassigned = self.n_inputs - n_inputs_used
n_inputs_unassigned = self.n_inputs - self.i_in_use.size
i_fill = 0
while(n_inputs_unassigned > 0 and
i_highest_scoring_benched.size > i_fill):
while(
n_inputs_unassigned > 0
and i_highest_scoring_benched.size > i_fill
):
i_in = self.i_benched[i_highest_scoring_benched[i_fill]]
self.mapping[i_in, self.n_inputs - n_inputs_unassigned] = 1
# self.inverse_mapping[self.n_inputs - n_inputs_unassigned] = i_in
Expand Down
4 changes: 1 addition & 3 deletions becca/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def __init__(
# curiosity_update_rate : float
# One of the factors that determines he rate at which
# a prefix increases its curiosity.
self.curiosity_update_rate = 3e-3
self.curiosity_update_rate = 1e-2

def step(self, candidate_activities, reward):
"""
Expand Down Expand Up @@ -223,7 +223,6 @@ def update_activities(self, candidate_activities):
feature_activities: array of floats
previous_feature_activities: array of floats
"""
# TODO: incorporate _update_activities() into this
feature_activities = self.filter.update_activities(
candidate_activities)

Expand All @@ -249,7 +248,6 @@ def calculate_fitness(self):
The fitness of each of the feature candidate inputs to
the model.
"""

nb.update_fitness(
self.feature_fitness,
self.prefix_occurrences,
Expand Down
6 changes: 4 additions & 2 deletions becca/model_numba.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,10 @@ def update_prefixes(
for i_goal in range(n_goals):
prefix_activities[i_feature, i_goal] *= 1 - prefix_decay_rate

new_prefix_activity = (previous_feature_activities[i_feature] *
goal_activities[i_goal])
new_prefix_activity = (
previous_feature_activities[i_feature] *
goal_activities[i_goal]
)
prefix_activities[i_feature, i_goal] += new_prefix_activity
prefix_activities[i_feature, i_goal] = min(
prefix_activities[i_feature, i_goal], 1)
Expand Down
Loading

0 comments on commit 3d3d56c

Please sign in to comment.