Skip to content

Commit

Permalink
Solution col names match model components
Browse files Browse the repository at this point in the history
  • Loading branch information
sjpfenninger committed Feb 13, 2015
1 parent 61b9d23 commit 851fb78
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 12 deletions.
10 changes: 5 additions & 5 deletions calliope/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def plot_solution(solution, data, carrier='power', demand='demand_power',
'unmet_demand'], additional_types)
stacked_techs = df.query(query_string).index.tolist()
# Put stack in order according to stack_weights
weighted = df.weight.order(ascending=False).index.tolist()
weighted = df.stack_weight.order(ascending=False).index.tolist()
stacked_techs = [y for y in weighted if y in stacked_techs]
names = [df.at[y, 'name'] for y in stacked_techs]
# If no colormap given, derive one from colors given in metadata
Expand Down Expand Up @@ -186,7 +186,7 @@ def plot_installed_capacities(solution, additional_types=None, **kwargs):

df = solution.parameters.e_cap.loc[:, supply_cap]

weighted = solution.metadata.weight.order(ascending=False).index.tolist()
weighted = solution.metadata.stack_weight.order(ascending=False).index.tolist()
stacked_techs = [y for y in weighted if y in df.columns]

df = df.loc[:, stacked_techs] / 1e6
Expand Down Expand Up @@ -348,7 +348,7 @@ def get_delivered_cost(solution, cost_class='monetary', carrier='power',


def get_group_share(solution, techs, group_type='supply',
var='production'):
var='e_prod'):
"""
From ``solution.summary``, get the share of the given list of ``techs``
from the total for the given ``group_type``, for the given ``var``.
Expand Down Expand Up @@ -420,7 +420,7 @@ def areas_below_resolution(solution, resolution):
return list(_get_ranges(selected.index.tolist()))


def get_swi(solution, shares_var='capacity', exclude_patterns=['unmet_demand']):
def get_swi(solution, shares_var='e_cap', exclude_patterns=['unmet_demand']):
"""
Returns the Shannon-Wiener diversity index.
Expand All @@ -441,7 +441,7 @@ def get_swi(solution, shares_var='capacity', exclude_patterns=['unmet_demand']):
return swi


def get_hhi(solution, shares_var='capacity', exclude_patterns=['unmet_demand']):
def get_hhi(solution, shares_var='e_cap', exclude_patterns=['unmet_demand']):
"""
Returns the Herfindahl-Hirschmann diversity index.
Expand Down
14 changes: 7 additions & 7 deletions calliope/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1399,11 +1399,11 @@ def get_metadata(self):
df.loc[:, 'carrier'] = df.index.map(lambda y: self.get_carrier(y))
get_src_c = lambda y: self.get_source_carrier(y)
df.loc[:, 'source_carrier'] = df.index.map(get_src_c)
df.loc[:, 'weight'] = df.index.map(lambda y: self.get_weight(y))
df.loc[:, 'stack_weight'] = df.index.map(lambda y: self.get_weight(y))
df.loc[:, 'color'] = df.index.map(lambda y: self.get_color(y))
return df

def get_summary(self, sort_by='capacity', carrier='power'):
def get_summary(self, sort_by='e_cap', carrier='power'):
sol = self.solution
# Capacity factor per carrier
df = pd.DataFrame({'cf': sol.capacity_factor.loc[carrier, 'total', :]})
Expand All @@ -1412,11 +1412,11 @@ def get_summary(self, sort_by='capacity', carrier='power'):
# .loc[cost_class, carrier, location, tech]
df['cost_' + k] = sol.levelized_cost.loc[k, carrier, 'total', :]
# Add totals per carrier
df['production'] = sol.totals.loc[carrier, 'ec_prod', :, :].sum(0)
df['consumption'] = sol.totals.loc[carrier, 'ec_con', :, :].sum(0)
df['e_prod'] = sol.totals.loc[carrier, 'ec_prod', :, :].sum(0)
df['e_con'] = sol.totals.loc[carrier, 'ec_con', :, :].sum(0)
# Add other carrier-independent stuff
df['capacity'] = sol.parameters['e_cap'].sum()
df['area'] = sol.parameters['r_area'].sum()
df['e_cap'] = sol.parameters['e_cap'].sum()
df['r_area'] = sol.parameters['r_area'].sum()
return df.sort(columns=sort_by, ascending=False)

def get_shares(self):
Expand All @@ -1434,7 +1434,7 @@ def get_shares(self):
df['group'] = df.index.map(gg)
df['type'] = df.index.map(self.get_parent)

for var in ['production', 'consumption', 'capacity']:
for var in ['e_prod', 'e_con', 'e_cap']:
for index, row in df.iterrows():
group_members = row['members'].split('|')
group_type = row['type']
Expand Down
1 change: 1 addition & 0 deletions changelog.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Release History
* [new] Run setting ``model_override`` allows specifying the path to a YAML file with overrides for the model configuration, applied at model initialization (path is given relative to the run configuration file used). This is in addition to the existing ``override`` setting, and is applied first (so ``override`` can override ``model_override``).
* [new] Run settings ``output.save_constraints`` and ``output.save_constraints_options``
* [new] Run setting ``parallel.post_run``
* [changed] Solution column names more in line with model component names
* [changed] Can specify more than one output format as a list, e.g. ``output.format: ['csv', 'hdf']``
* [changed] Run setting ``parallel.additional_lines`` renamed to ``parallel.pre_run``
* [changed] Better error messages and CLI error handling
Expand Down
4 changes: 4 additions & 0 deletions doc/user/configuration_reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ See the `YAML website <http://www.yaml.org/>`_ for more general information abou

Calliope internally represents the configuration as :class:`~calliope.utils.AttrDict`\ s, which are a subclass of the built-in Python dictionary data type (``dict``) with added functionality such as YAML reading/writing and attribute access to keys.

.. TODO improve the docs on this warning as well as the underlying messy implementation. perhaps more specific docs on generating parallel runs and the quirks that entails.
.. Warning:: When generating parallel runs with the ``calliope generate`` command-line tool, any ``import`` directive, unlike other settings that point to file system paths such as ``model_override`` or ``data_path``, is evaluated immediately and all imported files are combined into one model configuration file for the parallel runs. This means that while paths used in ``import`` directives don't need adjustment for parallel runs, other settings that work with file system paths probably do need adjustment to account for the way files are laid out on the system running the parallel runs. For this purpose, the ``data_path_adjustment`` inside a ``parallel`` configuration block can change the data path for parallel runs only.

.. _config_reference_model_wide:

Model-wide settings
Expand Down
2 changes: 2 additions & 0 deletions doc/user/run_configuration.rst
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ Additional (optional) settings, including debug settings, can be specified in th
.. Note:: If run settings override the ``data_path`` setting and specify a relative path, that path will be interpreted as relative to the run settings file and not the model settings file being overridden.

.. TODO add documentation on special _REPLACE_ key
Instead of directly overriding settings within the run configuration file using an ``override`` block, it is also possible to specify an additional model configuration file with overriding settings by using the ``model_override: path/to/model_override.yaml`` setting (the path given here is relative to the run configuration file).

The optional settings to adjust the timestep resolution and those for parallel runs are discussed below. For a complete list of the other available settings, see :ref:`config_reference_run` in the configuration reference.
Expand Down

0 comments on commit 851fb78

Please sign in to comment.