Skip to content

Commit

Permalink
Merge branch 'main' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
joyxyz1994 committed Dec 11, 2024
2 parents b5432c2 + e45b7b4 commit 19943e3
Show file tree
Hide file tree
Showing 24 changed files with 1,520 additions and 112 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/build-only.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ name: build-only

on:
push:
branches: [beta, dev]
branches: [beta, dev, trial]
pull_request:
branches: [beta, dev]
branches: [beta, dev, trial]

jobs:
build:
Expand Down
20 changes: 20 additions & 0 deletions docs/source/FAQ.rst
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,26 @@ There are multiple possible reasons:
Then when you open the Jupyter Notebook, select the ``<KERNEL NAME>`` kernel when you create a new notebook you can find more details in this post about `enabling multiple kernels in Jupyter Notebook <https://medium.com/@ace139/enable-multiple-kernels-in-jupyter-notebooks-6098c738fe72>`_.


``underlying object has vanished``
**********************************
This error is related to ``numba`` caching, we haven't figured out the exact mechanism, but clearing cache will help resolve it. One/both of the following approaches should work:

1. Clear cache. Remove all ``.pyc``, ``.nbc``, and ``.nbi`` files, you can do this in your CLI using (replace <DIR> with the directory to your ``thermosteam``, ``biosteam``, ``qsdsan``, and ``exposan`` directory):

.. code::
get-childitem . -recurse -include *.pyc, *.nbc, *.nbi | remove-item
2. Uninstalling and reinstalling a different version of ``numba``. Suppose you now have 0.58.1 and the newest version is 0.60.0, you can do:

.. code::
pip uninstall numba
pip install --no-cache-dir numba==0.60.0
The ``--no-cache-dir`` option is to do a fresh installation rather than using previously downloaded packages. Note that you need to exit out your editor/any other programs that are currently using numba. Otherwise the uninstallation is incomplete, you might be prompted to do a manual removal, or this won't work.


``UnicodeDecodeError``
**********************
When using non-English operating systems, you may run into errors similar to (cp949 is the case of Korean Windows):
Expand Down
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
# built documents.
#
# The short X.Y version.
version = '1.4.0'
version = '1.4.1'
# The full version, including alpha/beta/rc tags.
release = version

Expand Down
2 changes: 1 addition & 1 deletion qsdsan/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
Flowsheet = _bst.Flowsheet
main_flowsheet = _bst.main_flowsheet
default_utilities = _bst.default_utilities
CEPCI_by_year = _bst.units.design_tools.CEPCI_by_year

# Global variables
currency = 'USD'
Expand All @@ -54,6 +53,7 @@


from . import utils
CEPCI_by_year = utils.indices.tea_indices['CEPCI']
from ._component import *
from ._components import *
from ._sanstream import *
Expand Down
2 changes: 1 addition & 1 deletion qsdsan/_impact_item.py
Original file line number Diff line number Diff line change
Expand Up @@ -822,7 +822,7 @@ def linked_stream(self, new_s):
f'is replaced with {self.ID}.')
else:
warn(f'The original `StreamImpactItem` linked to stream {new_s.ID} '
f'is replaced with upon the creation of a new stream.')
f'is replaced upon the creation of a new stream.')
new_s._stream_impact_item = self
self._linked_stream = new_s

Expand Down
68 changes: 41 additions & 27 deletions qsdsan/_lca.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,9 @@ class LCA:
>>> # Retrieve impacts associated with a specific indicator
>>> lca.get_total_impacts()[GWP.ID] # doctest: +ELLIPSIS
349737809...
>>> # Annual results
>>> lca.get_total_impacts(annual=True)[GWP.ID] # doctest: +ELLIPSIS
34973780...
>>> # Or breakdowns of the different category
>>> lca.get_impact_table('Construction') # doctest: +SKIP
>>> # Below is for testing purpose, you do not need it
Expand Down Expand Up @@ -637,12 +640,13 @@ def get_unit_impacts(

return tot

def _append_cat_sum(self, cat_table, cat, tot):
def _append_cat_sum(self, cat_table, cat, tot, annual=False):
num = len(cat_table)
cat_table.loc[num] = '' # initiate a blank spot for value to be added later

suffix = '/yr' if annual else ''

for i in self.indicators:
cat_table[f'{i.ID} [{i.unit}]'][num] = tot[i.ID]
cat_table[f'{i.ID} [{i.unit}{suffix}]'][num] = tot[i.ID]
cat_table[f'Category {i.ID} Ratio'][num] = 1

if cat in ('construction', 'transportation'):
Expand All @@ -662,17 +666,21 @@ def get_impact_table(self, category, annual=False):
Parameters
----------
category : str
Can be 'construction', 'transportation', 'stream', or 'other'.
Can be 'Construction', 'Transportation', 'Stream', or 'Other'.
annual : bool
If True, will return the annual impacts considering `uptime_ratio`
instead of across the system lifetime.
'''
time = self.lifetime_hr
sys_yr = self.lifetime
cat = category.lower()
tot_f = getattr(self, f'get_{cat}_impacts')
kwargs = {'annual': annual} if cat != 'other' else {}
# kwargs = {'annual': annual} if cat != 'other' else {}
kwargs = {'annual': annual}
tot = tot_f(**kwargs)

suffix = '/yr' if annual else''
_append_cat_sum = self._append_cat_sum

if cat in ('construction', 'transportation'):
units = sorted(getattr(self, f'_{cat}_units'),
key=(lambda su: su.ID))
Expand All @@ -684,31 +692,35 @@ def get_impact_table(self, category, annual=False):
# Note that item_dct = dict.fromkeys([item.ID for item in items], []) won't work
item_dct = dict.fromkeys([item.ID for item in items])
for item_ID in item_dct.keys():
item_dct[item_ID] = dict(SanUnit=[], Quantity=[])
item_dct[item_ID] = {'SanUnit': [], f'Quantity{suffix}': []}
for su in units:
if not isinstance(su, SanUnit):
continue
for i in getattr(su, cat):
item_dct[i.item.ID]['SanUnit'].append(su.ID)
if cat == 'transportation':
item_dct[i.item.ID]['Quantity'].append(i.quantity*time/i.interval)
quantity = i.quantity*time/i.interval
quantity = quantity/sys_yr if annual else quantity
item_dct[i.item.ID][f'Quantity{suffix}'].append(quantity)
else: # construction
lifetime = i.lifetime or su.lifetime or self.lifetime
if isinstance(lifetime, dict): # in the case the the equipment is not in the unit lifetime dict
lifetime = lifetime.get(i.item.ID) or self.lifetime
constr_ratio = self.lifetime/lifetime if self.annualize_construction else ceil(self.lifetime/lifetime)
item_dct[i.item.ID]['Quantity'].append(i.quantity*constr_ratio)
constr_ratio = sys_yr/lifetime if self.annualize_construction else ceil(sys_yr/lifetime)
quantity = i.quantity * constr_ratio
quantity = quantity/sys_yr if annual else quantity
item_dct[i.item.ID][f'Quantity{suffix}'].append(quantity)

dfs = []
for item in items:
dct = item_dct[item.ID]
dct['SanUnit'].append('Total')
dct['Quantity'] = np.append(dct['Quantity'], sum(dct['Quantity']))
if dct['Quantity'].sum() == 0.: dct['Item Ratio'] = 0
else: dct['Item Ratio'] = dct['Quantity']/dct['Quantity'].sum()*2
dct[f'Quantity{suffix}'] = np.append(dct[f'Quantity{suffix}'], sum(dct[f'Quantity{suffix}']))
if dct[f'Quantity{suffix}'].sum() == 0.: dct['Item Ratio'] = 0
else: dct['Item Ratio'] = dct[f'Quantity{suffix}']/dct[f'Quantity{suffix}'].sum()*2
for i in self.indicators:
if i.ID in item.CFs:
dct[f'{i.ID} [{i.unit}]'] = impact = dct['Quantity']*item.CFs[i.ID]
dct[f'{i.ID} [{i.unit}{suffix}]'] = impact = dct[f'Quantity{suffix}']*item.CFs[i.ID]
dct[f'Category {i.ID} Ratio'] = impact/tot[i.ID]
else:
dct[f'{i.ID} [{i.unit}]'] = dct[f'Category {i.ID} Ratio'] = 0
Expand All @@ -721,55 +733,57 @@ def get_impact_table(self, category, annual=False):
dfs.append(df)

table = pd.concat(dfs)
return self._append_cat_sum(table, cat, tot)
return _append_cat_sum(table, cat, tot, annual=annual)

ind_head = sum(([f'{i.ID} [{i.unit}]',
ind_head = sum(([f'{i.ID} [{i.unit}{suffix}]',
f'Category {i.ID} Ratio'] for i in self.indicators), [])

if cat in ('stream', 'streams'):
headings = ['Stream', 'Mass [kg]', *ind_head]
headings = ['Stream', f'Mass [kg]{suffix}', *ind_head]
item_dct = dict.fromkeys(headings)
for key in item_dct.keys():
item_dct[key] = []
for ws_item in self.stream_inventory:
ws = ws_item.linked_stream
item_dct['Stream'].append(ws.ID)
mass = ws_item.flow_getter(ws) * time
item_dct['Mass [kg]'].append(mass)
mass = mass/sys_yr if annual else mass
item_dct[f'Mass [kg]{suffix}'].append(mass)
for ind in self.indicators:
if ind.ID in ws_item.CFs.keys():
impact = ws_item.CFs[ind.ID]*mass
item_dct[f'{ind.ID} [{ind.unit}]'].append(impact)
item_dct[f'{ind.ID} [{ind.unit}{suffix}]'].append(impact)
item_dct[f'Category {ind.ID} Ratio'].append(impact/tot[ind.ID])
else:
item_dct[f'{ind.ID} [{ind.unit}]'].append(0)
item_dct[f'{ind.ID} [{ind.unit}{suffix}]'].append(0)
item_dct[f'Category {ind.ID} Ratio'].append(0)
table = pd.DataFrame.from_dict(item_dct)
table.set_index(['Stream'], inplace=True)
return self._append_cat_sum(table, cat, tot)
return _append_cat_sum(table, cat, tot, annual=annual)

elif cat == 'other':
headings = ['Other', 'Quantity', *ind_head]
headings = ['Other', f'Quantity{suffix}', *ind_head]
item_dct = dict.fromkeys(headings)
for key in item_dct.keys():
item_dct[key] = []
for other_ID in self.other_items.keys():
other = self.other_items[other_ID]['item']
item_dct['Other'].append(f'{other_ID} [{other.functional_unit}]')
item_dct['Other'].append(f'{other_ID}')
quantity = self.other_items[other_ID]['quantity']
item_dct['Quantity'].append(quantity)
quantity = quantity/sys_yr if annual else quantity
item_dct[f'Quantity{suffix}'].append(quantity)
for ind in self.indicators:
if ind.ID in other.CFs.keys():
impact = other.CFs[ind.ID]*quantity
item_dct[f'{ind.ID} [{ind.unit}]'].append(impact)
item_dct[f'{ind.ID} [{ind.unit}{suffix}]'].append(impact)
item_dct[f'Category {ind.ID} Ratio'].append(impact/tot[ind.ID])
else:
item_dct[f'{ind.ID} [{ind.unit}]'].append(0)
item_dct[f'{ind.ID} [{ind.unit}{suffix}]'].append(0)
item_dct[f'Category {ind.ID} Ratio'].append(0)

table = pd.DataFrame.from_dict(item_dct)
table.set_index(['Other'], inplace=True)
return self._append_cat_sum(table, cat, tot)
return _append_cat_sum(table, cat, tot, annual=annual)

raise ValueError(
'category can only be "Construction", "Transportation", "Stream", or "Other", ' \
Expand Down
3 changes: 2 additions & 1 deletion qsdsan/_sanstream.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,12 +170,13 @@ def copy_flow(self, other, IDs=..., *, remove=False, exclude=False):
--------
:func:`copy` for the differences between ``copy``, ``copy_like``, and ``copy_flow``.
'''
stream_impact_item = self.stream_impact_item
Stream.copy_flow(self, other=other, IDs=IDs, remove=remove, exclude=exclude)

if not isinstance(other, SanStream):
return

self._stream_impact_item = None
self._stream_impact_item = stream_impact_item


def flow_proxy(self, ID=None):
Expand Down
6 changes: 4 additions & 2 deletions qsdsan/_sanunit.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,7 @@ def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream
self._assert_compatible_property_package()

self._utility_cost = None
self._recycle_system = None

##### qsdsan-specific #####
for i in (*construction, *transportation, *equipment):
Expand Down Expand Up @@ -631,10 +632,11 @@ def results(self, with_units=True, include_utilities=True,
include_total_cost=True, include_installed_cost=False,
include_zeros=True, external_utilities=(), key_hook=None):

if super().results is None: return super().results
results = super().results(with_units, include_utilities,
include_total_cost, include_installed_cost,
include_zeros, external_utilities, key_hook)
if not self.add_OPEX: self.add_OPEX = {'Additional OPEX': 0}
if not hasattr(self, 'add_OPEX'): self.add_OPEX = {'Additional OPEX': 0}
for k, v in self.add_OPEX.items():
if not with_units:
results.loc[(k, '')] = v
Expand All @@ -647,7 +649,7 @@ def results(self, with_units=True, include_utilities=True,
results.insert(0, 'Units', '')
results.loc[(k, ''), :] = ('USD/hr', v)
results.columns.name = type(self).__name__
if with_units:
if with_units and results is not None:
results.replace({'USD': f'{currency}', 'USD/hr': f'{currency}/hr'},
inplace=True)
return results
Expand Down
8 changes: 6 additions & 2 deletions qsdsan/_waste_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,11 @@ def __init__(self, dct, F_vol, MW, phase, phase_container):
def output(self, index, value):
'''Concentration flows, in mg/L (g/m3).'''
f_mass = value * self.MW[index]
phase = self.phase or self.phase_container.phase
if self.phase:
phase = self.phase
else:
try: phase = self.phase_container._phase
except: phase = self.phase_container
if phase != 'l':
raise AttributeError('Concentration only valid for liquid phase.')
V_sum = self.F_vol
Expand Down Expand Up @@ -186,7 +190,7 @@ def by_conc(self, TP):
check_data=False,
)
return conc
indexer.ChemicalMolarFlowIndexer.by_conc = by_conc
ChemicalMolarFlowIndexer.by_conc = by_conc
del by_conc


Expand Down
39 changes: 39 additions & 0 deletions qsdsan/data/process_data/_pm2asm2d.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
X_CHL X_ALG X_CH X_LI S_CO2 S_A S_F S_O2 S_NH S_NO S_P X_N_ALG X_P_ALG S_N2 S_ALK S_I X_I X_S X_H X_AUT
photoadaptation 1
ammonium_uptake -1 1
phosphorus_uptake -1 1
growth_pho 1 ? 1 ? ?
carbohydrate_storage_pho 1 ? 1
lipid_storage_pho 1 ? 1
carbohydrate_growth_pho 1 (-Y_CH_PHO/Y_X_ALG_PHO) ? ? ? ?
lipid_growth_pho 1 (-Y_LI_PHO/Y_X_ALG_PHO) ? ? ? ?
carbohydrate_maintenance_pho -1 ? -1
lipid_maintenance_pho -1 ? -1
endogenous_respiration_pho -1 ? -1 ? ?
growth_ace 1 ? (-1)/Y_X_ALG_HET_ACE ? ? ?
carbohydrate_storage_ace 1 ? (-1)/Y_CH_ND_HET_ACE ?
lipid_storage_ace 1 ? (-1)/Y_LI_ND_HET_ACE ?
carbohydrate_growth_ace 1 (-Y_CH_NR_HET_ACE/Y_X_ALG_HET_ACE) ? ? ? ?
lipid_growth_ace 1 (-Y_LI_NR_HET_ACE/Y_X_ALG_HET_ACE) ? ? ? ?
carbohydrate_maintenance_ace -1 ? -1
lipid_maintenance_ace -1 ? -1
endogenous_respiration_ace -1 ? -1 ? ?
growth_glu 1 ? (-1)/Y_X_ALG_HET_GLU ? ? ?
carbohydrate_storage_glu 1 ? (-1)/Y_CH_ND_HET_GLU ?
lipid_storage_glu 1 ? (-1)/Y_LI_ND_HET_GLU ?
carbohydrate_growth_glu 1 (-Y_CH_NR_HET_GLU/Y_X_ALG_HET_GLU) ? ? ? ?
lipid_growth_glu 1 (-Y_LI_NR_HET_GLU/Y_X_ALG_HET_GLU) ? ? ? ?
carbohydrate_maintenance_glu -1 ? -1
lipid_maintenance_glu -1 ? -1
endogenous_respiration_glu -1 ? -1 ? ?
aero_hydrolysis 1-f_SI ? ? ? f_SI -1
anox_hydrolysis 1-f_SI ? ? ? f_SI -1
anae_hydrolysis 1-f_SI ? ? ? f_SI -1
hetero_growth_S_F (-1)/Y_H 1-1/Y_H ? ? ? 1
hetero_growth_S_A (-1)/Y_H 1-1/Y_H ? ? ? 1
denitri_S_F (-1)/Y_H ? (Y_H-1)/(20/7*Y_H) ? (1-Y_H)/(20/7*Y_H) ? 1
denitri_S_A (-1)/Y_H ? (Y_H-1)/(20/7*Y_H) ? (1-Y_H)/(20/7*Y_H) ? 1
ferment 1 -1 ? ? ?
hetero_lysis ? ? ? f_XI_H 1-f_XI_H -1
auto_aero_growth (Y_A-32/7)/Y_A ? 1/Y_A ? ? 1
auto_lysis ? ? ? f_XI_AUT 1-f_XI_AUT -1
28 changes: 28 additions & 0 deletions qsdsan/data/process_data/_pm2asm2d_1.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
X_CHL X_ALG X_CH X_LI S_CO2 S_A S_F S_O2 S_NH S_NO S_P X_N_ALG X_P_ALG S_N2 S_ALK S_I X_I X_S X_H X_AUT
photoadaptation 1
ammonium_uptake -1 1
phosphorus_uptake -1 1
growth_pho 1 ? 1 ? ?
carbohydrate_storage_pho 1 ? 1
lipid_storage_pho 1 ? 1
carbohydrate_growth_pho 1 (-Y_CH_PHO/Y_X_ALG_PHO) ? ? ? ?
lipid_growth_pho 1 (-Y_LI_PHO/Y_X_ALG_PHO) ? ? ? ?
carbohydrate_maintenance_pho -1 ? -1
lipid_maintenance_pho -1 ? -1
endogenous_respiration_pho -1 ? -1 ? ?
growth_ace 1 ? (-1)/Y_X_ALG_HET_ACE ? ? ?
carbohydrate_storage_ace 1 ? (-1)/Y_CH_ND_HET_ACE ?
lipid_storage_ace 1 ? (-1)/Y_LI_ND_HET_ACE ?
carbohydrate_growth_ace 1 (-Y_CH_NR_HET_ACE/Y_X_ALG_HET_ACE) ? ? ? ?
lipid_growth_ace 1 (-Y_LI_NR_HET_ACE/Y_X_ALG_HET_ACE) ? ? ? ?
carbohydrate_maintenance_ace -1 ? -1
lipid_maintenance_ace -1 ? -1
endogenous_respiration_ace -1 ? -1 ? ?
growth_glu 1 ? (-1)/Y_X_ALG_HET_GLU ? ? ?
carbohydrate_storage_glu 1 ? (-1)/Y_CH_ND_HET_GLU ?
lipid_storage_glu 1 ? (-1)/Y_LI_ND_HET_GLU ?
carbohydrate_growth_glu 1 (-Y_CH_NR_HET_GLU/Y_X_ALG_HET_GLU) ? ? ? ?
lipid_growth_glu 1 (-Y_LI_NR_HET_GLU/Y_X_ALG_HET_GLU) ? ? ? ?
carbohydrate_maintenance_glu -1 ? -1
lipid_maintenance_glu -1 ? -1
endogenous_respiration_glu -1 ? -1 ? ?
12 changes: 12 additions & 0 deletions qsdsan/data/process_data/_pm2asm2d_2.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
X_CHL X_ALG X_CH X_LI S_CO2 S_A S_F S_O2 S_NH S_NO S_P X_N_ALG X_P_ALG S_N2 S_ALK S_I X_I X_S X_H X_AUT
aero_hydrolysis 1-f_SI ? ? ? f_SI -1
anox_hydrolysis 1-f_SI ? ? ? f_SI -1
anae_hydrolysis 1-f_SI ? ? ? f_SI -1
hetero_growth_S_F (-1)/Y_H 1-1/Y_H ? ? ? 1
hetero_growth_S_A (-1)/Y_H 1-1/Y_H ? ? ? 1
denitri_S_F (-1)/Y_H ? (Y_H-1)/(20/7*Y_H) ? (1-Y_H)/(20/7*Y_H) ? 1
denitri_S_A (-1)/Y_H ? (Y_H-1)/(20/7*Y_H) ? (1-Y_H)/(20/7*Y_H) ? 1
ferment 1 -1 ? ? ?
hetero_lysis ? ? ? f_XI_H 1-f_XI_H -1
auto_aero_growth (Y_A-32/7)/Y_A ? 1/Y_A ? ? 1
auto_lysis ? ? ? f_XI_AUT 1-f_XI_AUT -1
Loading

0 comments on commit 19943e3

Please sign in to comment.