Skip to content

Commit

Permalink
Minor: Simplify default reward function
Browse files Browse the repository at this point in the history
  • Loading branch information
thomaswolgast committed Dec 3, 2024
1 parent afe6c76 commit 19715d0
Showing 1 changed file with 2 additions and 6 deletions.
8 changes: 2 additions & 6 deletions opfgym/opf_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def __init__(self,
evaluate_on: str='validation',
steps_per_episode: int=1,
bus_wise_obs: bool=False,
reward_function: opfgym.RewardFunction=None,
reward_function: str | opfgym.RewardFunction = 'summation',
reward_function_params: dict=None,
diff_objective: bool=False,
add_res_obs: bool=False,
Expand Down Expand Up @@ -164,11 +164,7 @@ def __init__(self,

# Define reward function
reward_function_params = reward_function_params or {}
if reward_function is None:
# Default reward
self.reward_function = opfgym.reward.Summation(
env=self, **reward_function_params)
elif isinstance(reward_function, str):
if isinstance(reward_function, str):
# Load by string (e.g. 'Summation' or 'summation')
reward_class = opfgym.util.load_class_from_module(
reward_function, 'opfgym.reward')
Expand Down

0 comments on commit 19715d0

Please sign in to comment.