diff --git a/release-1.2/404.html b/release-1.2/404.html index f0430aa215..1431b7945e 100644 --- a/release-1.2/404.html +++ b/release-1.2/404.html @@ -14,8 +14,9 @@ + - + @@ -23,15 +24,18 @@ - + - + + + + @@ -66,9 +70,6 @@
Apache License 2.0
ppsci.arch
Arch
- Bases: Layer
Layer
+ Bases: Layer
Base class for Network.
ppsci/arch/base.py
28 + + Source code in ppsci/arch/base.py + 28 29 30 31 @@ -2600,9 +3397,9 @@ ] ) - - + + @@ -2616,9 +3413,10 @@ - - num_params: int + + num_params: int + property @@ -2626,134 +3424,140 @@ ¶ - - + + Return number of parameters within network. - Returns: - - + + Returns: + + + +Name Type + Description + + + -Name Type - Description +int + int + + + + Number of parameters. + + - - - -int - int - - - - Number of parameters. - - - - - - + + + + + + - concat_to_tensor(data_dict, keys, axis=-1) + concat_to_tensor(data_dict, keys, axis=-1) ¶ - - + + Concatenate tensors from dict in the order of given keys. -Parameters: - - - - Name - Type - Description - Default - - - - - data_dict - - Dict[str, Tensor] - - - - Dict contains tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys tensor fetched from. - - - - required - - - - axis - - int - - - - Axis concatenate at. Defaults to -1. - - - - -1 - - - - - - - Returns: - - - - Type - Description - - - - - - Tuple[Tensor, ...] - - - - Tuple[paddle.Tensor, ...]: Concatenated tensor. - - - - - - - Source code in ppsci/arch/base.py - 63 + Parameters: + + + + Name + Type + Description + Default + + + + + data_dict + + Dict[str, Tensor] + + + + Dict contains tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys tensor fetched from. + + + + required + + + + axis + + int + + + + Axis concatenate at. Defaults to -1. + + + + -1 + + + + + + + + Returns: + + + + Type + Description + + + + + + Tuple[Tensor, ...] + + + + Tuple[paddle.Tensor, ...]: Concatenated tensor. + + + + + + + + Source code in ppsci/arch/base.py + 63 64 65 66 @@ -2787,56 +3591,60 @@ data = [data_dict[key] for key in keys] return paddle.concat(data, axis) - - + + + + + - register_input_transform(transform) + register_input_transform(transform) ¶ - - + + Register input transform. -Parameters: - - - - Name - Type - Description - Default - - - - - transform - - Callable[[Dict[str, Tensor]], Dict[str, Tensor]] - - - - Input transform of network, receive a single tensor dict and return a single tensor dict. - - - - required - - - - - - Source code in ppsci/arch/base.py - 99 + Parameters: + + + + Name + Type + Description + Default + + + + + transform + + Callable[[Dict[str, Tensor]], Dict[str, Tensor]] + + + + Input transform of network, receive a single tensor dict and return a single tensor dict. + + + + required + + + + + + + Source code in ppsci/arch/base.py + 99 100 101 102 @@ -2858,57 +3666,61 @@ """ self._input_transform = transform - - + + + + + - register_output_transform(transform) + register_output_transform(transform) ¶ - - + + Register output transform. -Parameters: - - - - Name - Type - Description - Default - - - - - transform - - Callable[[Dict[str, Tensor], Dict[str, Tensor]], Dict[str, Tensor]] - - - - Output transform of network, receive two single tensor dict(raw input + + Parameters: + + + + Name + Type + Description + Default + + + + + transform + + Callable[[Dict[str, Tensor], Dict[str, Tensor]], Dict[str, Tensor]] + + + + Output transform of network, receive two single tensor dict(raw input and raw output) and return a single tensor dict(transformed output). - - - - required - - - - + + + + required + + + + - - Source code in ppsci/arch/base.py - 111 + + Source code in ppsci/arch/base.py + 111 112 113 114 @@ -2938,107 +3750,112 @@ """ self._output_transform = transform - - + + + + + - split_to_dict(data_tensor, keys, axis=-1) + split_to_dict(data_tensor, keys, axis=-1) ¶ - - + + Split tensor and wrap into a dict by given keys. -Parameters: - - - - Name - Type - Description - Default - - - - - data_tensor - - Tensor - - - - Tensor to be split. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys tensor mapping to. - - - - required - - - - axis - - int - - - - Axis split at. Defaults to -1. - - - - -1 - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict contains tensor. - - - - - - - Source code in ppsci/arch/base.py - 81 + Parameters: + + + + Name + Type + Description + Default + + + + + data_tensor + + Tensor + + + + Tensor to be split. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys tensor mapping to. + + + + required + + + + axis + + int + + + + Axis split at. Defaults to -1. + + + + -1 + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict contains tensor. + + + + + + + + Source code in ppsci/arch/base.py + 81 82 83 84 @@ -3072,8 +3889,8 @@ data = paddle.split(data_tensor, len(keys), axis=axis) return {key: data[i] for i, key in enumerate(keys)} - - + + @@ -3081,7 +3898,7 @@ - + @@ -3089,175 +3906,178 @@ + - AMGNet + AMGNet ¶ - - - Bases: Layer - + + + Bases: Layer + A Multi-scale Graph neural Network model based on Encoder-Process-Decoder structure for flow field prediction. https://doi.org/10.1080/09540091.2022.2131737 Code reference: https://github.com/baoshiaijhin/amgnet -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input", ). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("pred", ). - - - - required - - - - input_dim - - int - - - - Number of input dimension. - - - - required - - - - output_dim - - int - - - - Number of output dimension. - - - - required - - - - latent_dim - - int - - - - Number of hidden(feature) dimension. - - - - required - - - - num_layers - - int - - - - Number of layer(s). - - - - required - - - - message_passing_aggregator - - Literal['sum'] - - - - Message aggregator method in graph. + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input", ). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("pred", ). + + + + required + + + + input_dim + + int + + + + Number of input dimension. + + + + required + + + + output_dim + + int + + + + Number of output dimension. + + + + required + + + + latent_dim + + int + + + + Number of hidden(feature) dimension. + + + + required + + + + num_layers + + int + + + + Number of layer(s). + + + + required + + + + message_passing_aggregator + + Literal['sum'] + + + + Message aggregator method in graph. Only "sum" available now. - - - - required - - - - message_passing_steps - - int - - - - Message passing steps in graph. - - - - required - - - - speed - - str - - - - Whether use vanilla method or fast method for graph_connectivity + + + + required + + + + message_passing_steps + + int + + + + Message passing steps in graph. + + + + required + + + + speed + + str + + + + Whether use vanilla method or fast method for graph_connectivity computation. - - - - required - - - - + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.AMGNet(("input", ), ("pred", ), 5, 3, 64, 2) - - Source code in ppsci/arch/amgnet.py - 559 + + Source code in ppsci/arch/amgnet.py + 559 560 561 562 @@ -3431,9 +4251,9 @@ return node_features - - + + @@ -3449,7 +4269,7 @@ - + @@ -3457,171 +4277,174 @@ + - MLP + MLP ¶ - - - Bases: Arch - + + + Bases: Arch + Multi layer perceptron network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("x", "y", "z"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("u", "v", "w"). - - - - required - - - - num_layers - - int - - - - Number of hidden layers. - - - - required - - - - hidden_size - - Union[int, Tuple[int, ...]] - - - - Number of hidden size. + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("x", "y", "z"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("u", "v", "w"). + + + + required + + + + num_layers + + int + + + + Number of hidden layers. + + + + required + + + + hidden_size + + Union[int, Tuple[int, ...]] + + + + Number of hidden size. An integer for all layers, or list of integer specify each layer's size. - - - - required - - - - activation - - str - - - - Name of activation function. Defaults to "tanh". - - - - 'tanh' - - - - skip_connection - - bool - - - - Whether to use skip connection. Defaults to False. - - - - False - - - - weight_norm - - bool - - - - Whether to apply weight norm on parameter(s). Defaults to False. - - - - False - - - - input_dim - - Optional[int] - - - - Number of input's dimension. Defaults to None. - - - - None - - - - output_dim - - Optional[int] - - - - Number of output's dimension. Defaults to None. - - - - None - - - - + + + + required + + + + activation + + str + + + + Name of activation function. Defaults to "tanh". + + + + 'tanh' + + + + skip_connection + + bool + + + + Whether to use skip connection. Defaults to False. + + + + False + + + + weight_norm + + bool + + + + Whether to apply weight norm on parameter(s). Defaults to False. + + + + False + + + + input_dim + + Optional[int] + + + + Number of input's dimension. Defaults to None. + + + + None + + + + output_dim + + Optional[int] + + + + Number of output's dimension. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v"), 5, 128) - - Source code in ppsci/arch/mlp.py - 53 + + Source code in ppsci/arch/mlp.py + 53 54 55 56 @@ -3851,9 +4674,9 @@ y = self._output_transform(x, y) return y - - + + @@ -3869,7 +4692,7 @@ - + @@ -3877,265 +4700,268 @@ + - DeepONet + DeepONet ¶ - - - Bases: Arch - + + + Bases: Arch + Deep operator network. Lu et al. Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators. Nat Mach Intell, 2021. -Parameters: - - - - Name - Type - Description - Default - - - - - u_key - - str - - - - Name of function data for input function u(x). - - - - required - - - - y_key - - str - - - - Name of location data for input function G(u). - - - - required - - - - G_key - - str - - - - Output name of predicted G(u)(y). - - - - required - - - - num_loc - - int - - - - Number of sampled u(x), i.e. m in paper. - - - - required - - - - num_features - - int - - - - Number of features extracted from u(x), same for y. - - - - required - - - - branch_num_layers - - int - - - - Number of hidden layers of branch net. - - - - required - - - - trunk_num_layers - - int - - - - Number of hidden layers of trunk net. - - - - required - - - - branch_hidden_size - - Union[int, Tuple[int, ...]] - - - - Number of hidden size of branch net. + + Parameters: + + + + Name + Type + Description + Default + + + + + u_key + + str + + + + Name of function data for input function u(x). + + + + required + + + + y_key + + str + + + + Name of location data for input function G(u). + + + + required + + + + G_key + + str + + + + Output name of predicted G(u)(y). + + + + required + + + + num_loc + + int + + + + Number of sampled u(x), i.e. m in paper. + + + + required + + + + num_features + + int + + + + Number of features extracted from u(x), same for y. + + + + required + + + + branch_num_layers + + int + + + + Number of hidden layers of branch net. + + + + required + + + + trunk_num_layers + + int + + + + Number of hidden layers of trunk net. + + + + required + + + + branch_hidden_size + + Union[int, Tuple[int, ...]] + + + + Number of hidden size of branch net. An integer for all layers, or list of integer specify each layer's size. - - - - required - - - - trunk_hidden_size - - Union[int, Tuple[int, ...]] - - - - Number of hidden size of trunk net. + + + + required + + + + trunk_hidden_size + + Union[int, Tuple[int, ...]] + + + + Number of hidden size of trunk net. An integer for all layers, or list of integer specify each layer's size. - - - - required - - - - branch_skip_connection - - bool - - - - Whether to use skip connection for branch net. Defaults to False. - - - - False - - - - trunk_skip_connection - - bool - - - - Whether to use skip connection for trunk net. Defaults to False. - - - - False - - - - branch_activation - - str - - - - Name of activation function. Defaults to "tanh". - - - - 'tanh' - - - - trunk_activation - - str - - - - Name of activation function. Defaults to "tanh". - - - - 'tanh' - - - - branch_weight_norm - - bool - - - - Whether to apply weight norm on parameter(s) for branch net. Defaults to False. - - - - False - - - - trunk_weight_norm - - bool - - - - Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. - - - - False - - - - use_bias - - bool - - - - Whether to add bias on predicted G(u)(y). Defaults to True. - - - - True - - - - + + + + required + + + + branch_skip_connection + + bool + + + + Whether to use skip connection for branch net. Defaults to False. + + + + False + + + + trunk_skip_connection + + bool + + + + Whether to use skip connection for trunk net. Defaults to False. + + + + False + + + + branch_activation + + str + + + + Name of activation function. Defaults to "tanh". + + + + 'tanh' + + + + trunk_activation + + str + + + + Name of activation function. Defaults to "tanh". + + + + 'tanh' + + + + branch_weight_norm + + bool + + + + Whether to apply weight norm on parameter(s) for branch net. Defaults to False. + + + + False + + + + trunk_weight_norm + + bool + + + + Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. + + + + False + + + + use_bias + + bool + + + + Whether to add bias on predicted G(u)(y). Defaults to True. + + + + True + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepONet( ... "u", "y", "G", ... 100, 40, @@ -4146,9 +4972,9 @@ ... ) - - Source code in ppsci/arch/deeponet.py - 28 + + Source code in ppsci/arch/deeponet.py + 28 29 30 31 @@ -4390,9 +5216,9 @@ return result_dict - - + + @@ -4408,7 +5234,7 @@ - + @@ -4416,100 +5242,103 @@ + - DeepPhyLSTM + DeepPhyLSTM ¶ - - - Bases: Arch - + + + Bases: Arch + DeepPhyLSTM init function. -Parameters: - - - - Name - Type - Description - Default - - - - - input_size - - int - - - - The input size. - - - - required - - - - output_size - - int - - - - The output size. - - - - required - - - - hidden_size - - int - - - - The hidden size. Defaults to 100. - - - - 100 - - - - model_type - - int - - - - The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. - - - - 2 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_size + + int + + + + The input size. + + + + required + + + + output_size + + int + + + + The output size. + + + + required + + + + hidden_size + + int + + + + The hidden size. Defaults to 100. + + + + 100 + + + + model_type + + int + + + + The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. + + + + 2 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepPhyLSTM(1, 1, 100) - - Source code in ppsci/arch/phylstm.py - 21 + + Source code in ppsci/arch/phylstm.py + 21 22 23 24 @@ -4803,9 +5632,9 @@ "g_dot_pred_c": g_dot_pred_c, } - - + + @@ -4821,7 +5650,7 @@ - + @@ -4829,156 +5658,159 @@ + - LorenzEmbedding + LorenzEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Lorenz ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.LorenzEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 36 + + Source code in ppsci/arch/embedding_koopman.py + 36 37 38 39 @@ -5298,9 +6130,9 @@ y = self._output_transform(x, y) return y - - + + @@ -5316,7 +6148,7 @@ - + @@ -5324,156 +6156,159 @@ + - RosslerEmbedding + RosslerEmbedding ¶ - - - Bases: LorenzEmbedding - + + + Bases: LorenzEmbedding + Embedding Koopman model for the Rossler ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.RosslerEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 198 + + Source code in ppsci/arch/embedding_koopman.py + 198 199 200 201 @@ -5551,9 +6386,9 @@ drop, ) - - + + @@ -5569,7 +6404,7 @@ - + @@ -5577,156 +6412,159 @@ + - CylinderEmbedding + CylinderEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Cylinder system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states", "visc"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - embed_size - - int - - - - Number of embedding size. Defaults to 128. - - - - 128 - - - - encoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in encoder network. Defaults to None. - - - - None - - - - decoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in decoder network. Defaults to None. - - - - None - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states", "visc"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + embed_size + + int + + + + Number of embedding size. Defaults to 128. + + + + 128 + + + + encoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in encoder network. Defaults to None. + + + + None + + + + decoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in decoder network. Defaults to None. + + + + None + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ - + @@ -6276,155 +7114,158 @@ + - Generator + Generator ¶ - - - Bases: Arch - + + + Bases: Arch + Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - - - - required - - - - kernel_sizes_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - - - - required - - - - strides_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - - - - required - - - - use_bns_tuple - - Tuple[Tuple[bool, ...], ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts_tuple - - Tuple[Tuple[str, ...], ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns_tuple + + Tuple[Tuple[bool, ...], ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts_tuple + + Tuple[Tuple[str, ...], ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
28 29 30 31 @@ -2600,9 +3397,9 @@ ] )
num_params: int
property
Return number of parameters within network.
Returns:
int
Number of parameters.
concat_to_tensor(data_dict, keys, axis=-1)
Concatenate tensors from dict in the order of given keys.
Parameters:
data_dict
Dict[str, Tensor]
Dict contains tensor.
keys
Tuple[str, ...]
Keys tensor fetched from.
axis
Axis concatenate at. Defaults to -1.
-1
Tuple[Tensor, ...]
Tuple[paddle.Tensor, ...]: Concatenated tensor.
63 + Parameters: +
63 64 65 66 @@ -2787,56 +3591,60 @@ data = [data_dict[key] for key in keys] return paddle.concat(data, axis)
register_input_transform(transform)
Register input transform.
transform
Callable[[Dict[str, Tensor]], Dict[str, Tensor]]
Input transform of network, receive a single tensor dict and return a single tensor dict.
99 + Parameters: +
99 100 101 102 @@ -2858,57 +3666,61 @@ """ self._input_transform = transform
register_output_transform(transform)
Register output transform.
Callable[[Dict[str, Tensor], Dict[str, Tensor]], Dict[str, Tensor]]
Output transform of network, receive two single tensor dict(raw input + +
Output transform of network, receive two single tensor dict(raw input and raw output) and return a single tensor dict(transformed output).
111 + + Source code in ppsci/arch/base.py + 111 112 113 114 @@ -2938,107 +3750,112 @@ """ self._output_transform = transform - -
111 112 113 114 @@ -2938,107 +3750,112 @@ """ self._output_transform = transform
split_to_dict(data_tensor, keys, axis=-1)
Split tensor and wrap into a dict by given keys.
data_tensor
Tensor
Tensor to be split.
Keys tensor mapping to.
Axis split at. Defaults to -1.
Dict[str, paddle.Tensor]: Dict contains tensor.
81 + Parameters: +
81 82 83 84 @@ -3072,8 +3889,8 @@ data = paddle.split(data_tensor, len(keys), axis=axis) return {key: data[i] for i, key in enumerate(keys)}
AMGNet
A Multi-scale Graph neural Network model based on Encoder-Process-Decoder structure for flow field prediction.
https://doi.org/10.1080/09540091.2022.2131737
Code reference: https://github.com/baoshiaijhin/amgnet
input_keys
Name of input keys, such as ("input", ).
output_keys
Name of output keys, such as ("pred", ).
input_dim
Number of input dimension.
output_dim
Number of output dimension.
latent_dim
Number of hidden(feature) dimension.
num_layers
Number of layer(s).
message_passing_aggregator
Literal['sum']
Message aggregator method in graph. + +
Message aggregator method in graph. Only "sum" available now.
message_passing_steps
Message passing steps in graph.
speed
str
Whether use vanilla method or fast method for graph_connectivity +
Whether use vanilla method or fast method for graph_connectivity computation.
Examples:
>>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.AMGNet(("input", ), ("pred", ), 5, 3, 64, 2) - - Source code in ppsci/arch/amgnet.py - 559 + + Source code in ppsci/arch/amgnet.py + 559 560 561 562 @@ -3431,9 +4251,9 @@ return node_features - - + + @@ -3449,7 +4269,7 @@ - + @@ -3457,171 +4277,174 @@ + - MLP + MLP ¶ - - - Bases: Arch - + + + Bases: Arch + Multi layer perceptron network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("x", "y", "z"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("u", "v", "w"). - - - - required - - - - num_layers - - int - - - - Number of hidden layers. - - - - required - - - - hidden_size - - Union[int, Tuple[int, ...]] - - - - Number of hidden size. + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("x", "y", "z"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("u", "v", "w"). + + + + required + + + + num_layers + + int + + + + Number of hidden layers. + + + + required + + + + hidden_size + + Union[int, Tuple[int, ...]] + + + + Number of hidden size. An integer for all layers, or list of integer specify each layer's size. - - - - required - - - - activation - - str - - - - Name of activation function. Defaults to "tanh". - - - - 'tanh' - - - - skip_connection - - bool - - - - Whether to use skip connection. Defaults to False. - - - - False - - - - weight_norm - - bool - - - - Whether to apply weight norm on parameter(s). Defaults to False. - - - - False - - - - input_dim - - Optional[int] - - - - Number of input's dimension. Defaults to None. - - - - None - - - - output_dim - - Optional[int] - - - - Number of output's dimension. Defaults to None. - - - - None - - - - + + + + required + + + + activation + + str + + + + Name of activation function. Defaults to "tanh". + + + + 'tanh' + + + + skip_connection + + bool + + + + Whether to use skip connection. Defaults to False. + + + + False + + + + weight_norm + + bool + + + + Whether to apply weight norm on parameter(s). Defaults to False. + + + + False + + + + input_dim + + Optional[int] + + + + Number of input's dimension. Defaults to None. + + + + None + + + + output_dim + + Optional[int] + + + + Number of output's dimension. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v"), 5, 128) - - Source code in ppsci/arch/mlp.py - 53 + + Source code in ppsci/arch/mlp.py + 53 54 55 56 @@ -3851,9 +4674,9 @@ y = self._output_transform(x, y) return y - - + + @@ -3869,7 +4692,7 @@ - + @@ -3877,265 +4700,268 @@ + - DeepONet + DeepONet ¶ - - - Bases: Arch - + + + Bases: Arch + Deep operator network. Lu et al. Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators. Nat Mach Intell, 2021. -Parameters: - - - - Name - Type - Description - Default - - - - - u_key - - str - - - - Name of function data for input function u(x). - - - - required - - - - y_key - - str - - - - Name of location data for input function G(u). - - - - required - - - - G_key - - str - - - - Output name of predicted G(u)(y). - - - - required - - - - num_loc - - int - - - - Number of sampled u(x), i.e. m in paper. - - - - required - - - - num_features - - int - - - - Number of features extracted from u(x), same for y. - - - - required - - - - branch_num_layers - - int - - - - Number of hidden layers of branch net. - - - - required - - - - trunk_num_layers - - int - - - - Number of hidden layers of trunk net. - - - - required - - - - branch_hidden_size - - Union[int, Tuple[int, ...]] - - - - Number of hidden size of branch net. + + Parameters: + + + + Name + Type + Description + Default + + + + + u_key + + str + + + + Name of function data for input function u(x). + + + + required + + + + y_key + + str + + + + Name of location data for input function G(u). + + + + required + + + + G_key + + str + + + + Output name of predicted G(u)(y). + + + + required + + + + num_loc + + int + + + + Number of sampled u(x), i.e. m in paper. + + + + required + + + + num_features + + int + + + + Number of features extracted from u(x), same for y. + + + + required + + + + branch_num_layers + + int + + + + Number of hidden layers of branch net. + + + + required + + + + trunk_num_layers + + int + + + + Number of hidden layers of trunk net. + + + + required + + + + branch_hidden_size + + Union[int, Tuple[int, ...]] + + + + Number of hidden size of branch net. An integer for all layers, or list of integer specify each layer's size. - - - - required - - - - trunk_hidden_size - - Union[int, Tuple[int, ...]] - - - - Number of hidden size of trunk net. + + + + required + + + + trunk_hidden_size + + Union[int, Tuple[int, ...]] + + + + Number of hidden size of trunk net. An integer for all layers, or list of integer specify each layer's size. - - - - required - - - - branch_skip_connection - - bool - - - - Whether to use skip connection for branch net. Defaults to False. - - - - False - - - - trunk_skip_connection - - bool - - - - Whether to use skip connection for trunk net. Defaults to False. - - - - False - - - - branch_activation - - str - - - - Name of activation function. Defaults to "tanh". - - - - 'tanh' - - - - trunk_activation - - str - - - - Name of activation function. Defaults to "tanh". - - - - 'tanh' - - - - branch_weight_norm - - bool - - - - Whether to apply weight norm on parameter(s) for branch net. Defaults to False. - - - - False - - - - trunk_weight_norm - - bool - - - - Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. - - - - False - - - - use_bias - - bool - - - - Whether to add bias on predicted G(u)(y). Defaults to True. - - - - True - - - - + + + + required + + + + branch_skip_connection + + bool + + + + Whether to use skip connection for branch net. Defaults to False. + + + + False + + + + trunk_skip_connection + + bool + + + + Whether to use skip connection for trunk net. Defaults to False. + + + + False + + + + branch_activation + + str + + + + Name of activation function. Defaults to "tanh". + + + + 'tanh' + + + + trunk_activation + + str + + + + Name of activation function. Defaults to "tanh". + + + + 'tanh' + + + + branch_weight_norm + + bool + + + + Whether to apply weight norm on parameter(s) for branch net. Defaults to False. + + + + False + + + + trunk_weight_norm + + bool + + + + Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. + + + + False + + + + use_bias + + bool + + + + Whether to add bias on predicted G(u)(y). Defaults to True. + + + + True + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepONet( ... "u", "y", "G", ... 100, 40, @@ -4146,9 +4972,9 @@ ... ) - - Source code in ppsci/arch/deeponet.py - 28 + + Source code in ppsci/arch/deeponet.py + 28 29 30 31 @@ -4390,9 +5216,9 @@ return result_dict - - + + @@ -4408,7 +5234,7 @@ - + @@ -4416,100 +5242,103 @@ + - DeepPhyLSTM + DeepPhyLSTM ¶ - - - Bases: Arch - + + + Bases: Arch + DeepPhyLSTM init function. -Parameters: - - - - Name - Type - Description - Default - - - - - input_size - - int - - - - The input size. - - - - required - - - - output_size - - int - - - - The output size. - - - - required - - - - hidden_size - - int - - - - The hidden size. Defaults to 100. - - - - 100 - - - - model_type - - int - - - - The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. - - - - 2 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_size + + int + + + + The input size. + + + + required + + + + output_size + + int + + + + The output size. + + + + required + + + + hidden_size + + int + + + + The hidden size. Defaults to 100. + + + + 100 + + + + model_type + + int + + + + The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. + + + + 2 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepPhyLSTM(1, 1, 100) - - Source code in ppsci/arch/phylstm.py - 21 + + Source code in ppsci/arch/phylstm.py + 21 22 23 24 @@ -4803,9 +5632,9 @@ "g_dot_pred_c": g_dot_pred_c, } - - + + @@ -4821,7 +5650,7 @@ - + @@ -4829,156 +5658,159 @@ + - LorenzEmbedding + LorenzEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Lorenz ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.LorenzEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 36 + + Source code in ppsci/arch/embedding_koopman.py + 36 37 38 39 @@ -5298,9 +6130,9 @@ y = self._output_transform(x, y) return y - - + + @@ -5316,7 +6148,7 @@ - + @@ -5324,156 +6156,159 @@ + - RosslerEmbedding + RosslerEmbedding ¶ - - - Bases: LorenzEmbedding - + + + Bases: LorenzEmbedding + Embedding Koopman model for the Rossler ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.RosslerEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 198 + + Source code in ppsci/arch/embedding_koopman.py + 198 199 200 201 @@ -5551,9 +6386,9 @@ drop, ) - - + + @@ -5569,7 +6404,7 @@ - + @@ -5577,156 +6412,159 @@ + - CylinderEmbedding + CylinderEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Cylinder system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states", "visc"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - embed_size - - int - - - - Number of embedding size. Defaults to 128. - - - - 128 - - - - encoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in encoder network. Defaults to None. - - - - None - - - - decoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in decoder network. Defaults to None. - - - - None - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states", "visc"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + embed_size + + int + + + + Number of embedding size. Defaults to 128. + + + + 128 + + + + encoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in encoder network. Defaults to None. + + + + None + + + + decoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in decoder network. Defaults to None. + + + + None + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ - + @@ -6276,155 +7114,158 @@ + - Generator + Generator ¶ - - - Bases: Arch - + + + Bases: Arch + Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - - - - required - - - - kernel_sizes_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - - - - required - - - - strides_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - - - - required - - - - use_bns_tuple - - Tuple[Tuple[bool, ...], ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts_tuple - - Tuple[Tuple[str, ...], ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns_tuple + + Tuple[Tuple[bool, ...], ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts_tuple + + Tuple[Tuple[str, ...], ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.AMGNet(("input", ), ("pred", ), 5, 3, 64, 2)
ppsci/arch/amgnet.py
559 + + Source code in ppsci/arch/amgnet.py + 559 560 561 562 @@ -3431,9 +4251,9 @@ return node_features - - + + @@ -3449,7 +4269,7 @@ -
559 560 561 562 @@ -3431,9 +4251,9 @@ return node_features
MLP
- Bases: Arch
+ Bases: Arch
Multi layer perceptron network.
Name of input keys, such as ("x", "y", "z").
Name of output keys, such as ("u", "v", "w").
Number of hidden layers.
hidden_size
Union[int, Tuple[int, ...]]
Number of hidden size. + +
Number of hidden size. An integer for all layers, or list of integer specify each layer's size.
activation
Name of activation function. Defaults to "tanh".
'tanh'
skip_connection
bool
Whether to use skip connection. Defaults to False.
False
weight_norm
Whether to apply weight norm on parameter(s). Defaults to False.
Optional[int]
Number of input's dimension. Defaults to None.
None
Number of output's dimension. Defaults to None.
>>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v"), 5, 128) - - Source code in ppsci/arch/mlp.py - 53 + + Source code in ppsci/arch/mlp.py + 53 54 55 56 @@ -3851,9 +4674,9 @@ y = self._output_transform(x, y) return y - - + + @@ -3869,7 +4692,7 @@ - + @@ -3877,265 +4700,268 @@ + - DeepONet + DeepONet ¶ - - - Bases: Arch - + + + Bases: Arch + Deep operator network. Lu et al. Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators. Nat Mach Intell, 2021. -Parameters: - - - - Name - Type - Description - Default - - - - - u_key - - str - - - - Name of function data for input function u(x). - - - - required - - - - y_key - - str - - - - Name of location data for input function G(u). - - - - required - - - - G_key - - str - - - - Output name of predicted G(u)(y). - - - - required - - - - num_loc - - int - - - - Number of sampled u(x), i.e. m in paper. - - - - required - - - - num_features - - int - - - - Number of features extracted from u(x), same for y. - - - - required - - - - branch_num_layers - - int - - - - Number of hidden layers of branch net. - - - - required - - - - trunk_num_layers - - int - - - - Number of hidden layers of trunk net. - - - - required - - - - branch_hidden_size - - Union[int, Tuple[int, ...]] - - - - Number of hidden size of branch net. + + Parameters: + + + + Name + Type + Description + Default + + + + + u_key + + str + + + + Name of function data for input function u(x). + + + + required + + + + y_key + + str + + + + Name of location data for input function G(u). + + + + required + + + + G_key + + str + + + + Output name of predicted G(u)(y). + + + + required + + + + num_loc + + int + + + + Number of sampled u(x), i.e. m in paper. + + + + required + + + + num_features + + int + + + + Number of features extracted from u(x), same for y. + + + + required + + + + branch_num_layers + + int + + + + Number of hidden layers of branch net. + + + + required + + + + trunk_num_layers + + int + + + + Number of hidden layers of trunk net. + + + + required + + + + branch_hidden_size + + Union[int, Tuple[int, ...]] + + + + Number of hidden size of branch net. An integer for all layers, or list of integer specify each layer's size. - - - - required - - - - trunk_hidden_size - - Union[int, Tuple[int, ...]] - - - - Number of hidden size of trunk net. + + + + required + + + + trunk_hidden_size + + Union[int, Tuple[int, ...]] + + + + Number of hidden size of trunk net. An integer for all layers, or list of integer specify each layer's size. - - - - required - - - - branch_skip_connection - - bool - - - - Whether to use skip connection for branch net. Defaults to False. - - - - False - - - - trunk_skip_connection - - bool - - - - Whether to use skip connection for trunk net. Defaults to False. - - - - False - - - - branch_activation - - str - - - - Name of activation function. Defaults to "tanh". - - - - 'tanh' - - - - trunk_activation - - str - - - - Name of activation function. Defaults to "tanh". - - - - 'tanh' - - - - branch_weight_norm - - bool - - - - Whether to apply weight norm on parameter(s) for branch net. Defaults to False. - - - - False - - - - trunk_weight_norm - - bool - - - - Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. - - - - False - - - - use_bias - - bool - - - - Whether to add bias on predicted G(u)(y). Defaults to True. - - - - True - - - - + + + + required + + + + branch_skip_connection + + bool + + + + Whether to use skip connection for branch net. Defaults to False. + + + + False + + + + trunk_skip_connection + + bool + + + + Whether to use skip connection for trunk net. Defaults to False. + + + + False + + + + branch_activation + + str + + + + Name of activation function. Defaults to "tanh". + + + + 'tanh' + + + + trunk_activation + + str + + + + Name of activation function. Defaults to "tanh". + + + + 'tanh' + + + + branch_weight_norm + + bool + + + + Whether to apply weight norm on parameter(s) for branch net. Defaults to False. + + + + False + + + + trunk_weight_norm + + bool + + + + Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. + + + + False + + + + use_bias + + bool + + + + Whether to add bias on predicted G(u)(y). Defaults to True. + + + + True + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepONet( ... "u", "y", "G", ... 100, 40, @@ -4146,9 +4972,9 @@ ... ) - - Source code in ppsci/arch/deeponet.py - 28 + + Source code in ppsci/arch/deeponet.py + 28 29 30 31 @@ -4390,9 +5216,9 @@ return result_dict - - + + @@ -4408,7 +5234,7 @@ - + @@ -4416,100 +5242,103 @@ + - DeepPhyLSTM + DeepPhyLSTM ¶ - - - Bases: Arch - + + + Bases: Arch + DeepPhyLSTM init function. -Parameters: - - - - Name - Type - Description - Default - - - - - input_size - - int - - - - The input size. - - - - required - - - - output_size - - int - - - - The output size. - - - - required - - - - hidden_size - - int - - - - The hidden size. Defaults to 100. - - - - 100 - - - - model_type - - int - - - - The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. - - - - 2 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_size + + int + + + + The input size. + + + + required + + + + output_size + + int + + + + The output size. + + + + required + + + + hidden_size + + int + + + + The hidden size. Defaults to 100. + + + + 100 + + + + model_type + + int + + + + The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. + + + + 2 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepPhyLSTM(1, 1, 100) - - Source code in ppsci/arch/phylstm.py - 21 + + Source code in ppsci/arch/phylstm.py + 21 22 23 24 @@ -4803,9 +5632,9 @@ "g_dot_pred_c": g_dot_pred_c, } - - + + @@ -4821,7 +5650,7 @@ - + @@ -4829,156 +5658,159 @@ + - LorenzEmbedding + LorenzEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Lorenz ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.LorenzEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 36 + + Source code in ppsci/arch/embedding_koopman.py + 36 37 38 39 @@ -5298,9 +6130,9 @@ y = self._output_transform(x, y) return y - - + + @@ -5316,7 +6148,7 @@ - + @@ -5324,156 +6156,159 @@ + - RosslerEmbedding + RosslerEmbedding ¶ - - - Bases: LorenzEmbedding - + + + Bases: LorenzEmbedding + Embedding Koopman model for the Rossler ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.RosslerEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 198 + + Source code in ppsci/arch/embedding_koopman.py + 198 199 200 201 @@ -5551,9 +6386,9 @@ drop, ) - - + + @@ -5569,7 +6404,7 @@ - + @@ -5577,156 +6412,159 @@ + - CylinderEmbedding + CylinderEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Cylinder system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states", "visc"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - embed_size - - int - - - - Number of embedding size. Defaults to 128. - - - - 128 - - - - encoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in encoder network. Defaults to None. - - - - None - - - - decoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in decoder network. Defaults to None. - - - - None - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states", "visc"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + embed_size + + int + + + + Number of embedding size. Defaults to 128. + + + + 128 + + + + encoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in encoder network. Defaults to None. + + + + None + + + + decoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in decoder network. Defaults to None. + + + + None + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ - + @@ -6276,155 +7114,158 @@ + - Generator + Generator ¶ - - - Bases: Arch - + + + Bases: Arch + Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - - - - required - - - - kernel_sizes_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - - - - required - - - - strides_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - - - - required - - - - use_bns_tuple - - Tuple[Tuple[bool, ...], ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts_tuple - - Tuple[Tuple[str, ...], ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns_tuple + + Tuple[Tuple[bool, ...], ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts_tuple + + Tuple[Tuple[str, ...], ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v"), 5, 128)
ppsci/arch/mlp.py
53 + + Source code in ppsci/arch/mlp.py + 53 54 55 56 @@ -3851,9 +4674,9 @@ y = self._output_transform(x, y) return y - - + + @@ -3869,7 +4692,7 @@ -
53 54 55 56 @@ -3851,9 +4674,9 @@ y = self._output_transform(x, y) return y
DeepONet
Deep operator network.
Lu et al. Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators. Nat Mach Intell, 2021.
u_key
Name of function data for input function u(x).
y_key
Name of location data for input function G(u).
G_key
Output name of predicted G(u)(y).
num_loc
Number of sampled u(x), i.e. m in paper.
m
num_features
Number of features extracted from u(x), same for y.
branch_num_layers
Number of hidden layers of branch net.
trunk_num_layers
Number of hidden layers of trunk net.
branch_hidden_size
Number of hidden size of branch net. + +
Number of hidden size of branch net. An integer for all layers, or list of integer specify each layer's size.
trunk_hidden_size
Number of hidden size of trunk net. +
Number of hidden size of trunk net. An integer for all layers, or list of integer specify each layer's size.
branch_skip_connection
Whether to use skip connection for branch net. Defaults to False.
trunk_skip_connection
Whether to use skip connection for trunk net. Defaults to False.
branch_activation
trunk_activation
branch_weight_norm
Whether to apply weight norm on parameter(s) for branch net. Defaults to False.
trunk_weight_norm
Whether to apply weight norm on parameter(s) for trunk net. Defaults to False.
use_bias
Whether to add bias on predicted G(u)(y). Defaults to True.
True
>>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepONet( ... "u", "y", "G", ... 100, 40, @@ -4146,9 +4972,9 @@ ... ) - - Source code in ppsci/arch/deeponet.py - 28 + + Source code in ppsci/arch/deeponet.py + 28 29 30 31 @@ -4390,9 +5216,9 @@ return result_dict - - + + @@ -4408,7 +5234,7 @@ - + @@ -4416,100 +5242,103 @@ + - DeepPhyLSTM + DeepPhyLSTM ¶ - - - Bases: Arch - + + + Bases: Arch + DeepPhyLSTM init function. -Parameters: - - - - Name - Type - Description - Default - - - - - input_size - - int - - - - The input size. - - - - required - - - - output_size - - int - - - - The output size. - - - - required - - - - hidden_size - - int - - - - The hidden size. Defaults to 100. - - - - 100 - - - - model_type - - int - - - - The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. - - - - 2 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_size + + int + + + + The input size. + + + + required + + + + output_size + + int + + + + The output size. + + + + required + + + + hidden_size + + int + + + + The hidden size. Defaults to 100. + + + + 100 + + + + model_type + + int + + + + The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. + + + + 2 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepPhyLSTM(1, 1, 100) - - Source code in ppsci/arch/phylstm.py - 21 + + Source code in ppsci/arch/phylstm.py + 21 22 23 24 @@ -4803,9 +5632,9 @@ "g_dot_pred_c": g_dot_pred_c, } - - + + @@ -4821,7 +5650,7 @@ - + @@ -4829,156 +5658,159 @@ + - LorenzEmbedding + LorenzEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Lorenz ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.LorenzEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 36 + + Source code in ppsci/arch/embedding_koopman.py + 36 37 38 39 @@ -5298,9 +6130,9 @@ y = self._output_transform(x, y) return y - - + + @@ -5316,7 +6148,7 @@ - + @@ -5324,156 +6156,159 @@ + - RosslerEmbedding + RosslerEmbedding ¶ - - - Bases: LorenzEmbedding - + + + Bases: LorenzEmbedding + Embedding Koopman model for the Rossler ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.RosslerEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 198 + + Source code in ppsci/arch/embedding_koopman.py + 198 199 200 201 @@ -5551,9 +6386,9 @@ drop, ) - - + + @@ -5569,7 +6404,7 @@ - + @@ -5577,156 +6412,159 @@ + - CylinderEmbedding + CylinderEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Cylinder system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states", "visc"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - embed_size - - int - - - - Number of embedding size. Defaults to 128. - - - - 128 - - - - encoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in encoder network. Defaults to None. - - - - None - - - - decoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in decoder network. Defaults to None. - - - - None - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states", "visc"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + embed_size + + int + + + + Number of embedding size. Defaults to 128. + + + + 128 + + + + encoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in encoder network. Defaults to None. + + + + None + + + + decoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in decoder network. Defaults to None. + + + + None + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ - + @@ -6276,155 +7114,158 @@ + - Generator + Generator ¶ - - - Bases: Arch - + + + Bases: Arch + Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - - - - required - - - - kernel_sizes_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - - - - required - - - - strides_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - - - - required - - - - use_bns_tuple - - Tuple[Tuple[bool, ...], ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts_tuple - - Tuple[Tuple[str, ...], ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns_tuple + + Tuple[Tuple[bool, ...], ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts_tuple + + Tuple[Tuple[str, ...], ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.DeepONet( ... "u", "y", "G", ... 100, 40, @@ -4146,9 +4972,9 @@ ... )
ppsci/arch/deeponet.py
28 + + Source code in ppsci/arch/deeponet.py + 28 29 30 31 @@ -4390,9 +5216,9 @@ return result_dict - - + + @@ -4408,7 +5234,7 @@ -
28 29 30 31 @@ -4390,9 +5216,9 @@ return result_dict
DeepPhyLSTM
DeepPhyLSTM init function.
input_size
The input size.
output_size
The output size.
The hidden size. Defaults to 100.
100
model_type
The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2.
2
>>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.DeepPhyLSTM(1, 1, 100) - - Source code in ppsci/arch/phylstm.py - 21 + + Source code in ppsci/arch/phylstm.py + 21 22 23 24 @@ -4803,9 +5632,9 @@ "g_dot_pred_c": g_dot_pred_c, } - - + + @@ -4821,7 +5650,7 @@ - + @@ -4829,156 +5658,159 @@ + - LorenzEmbedding + LorenzEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Lorenz ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.LorenzEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 36 + + Source code in ppsci/arch/embedding_koopman.py + 36 37 38 39 @@ -5298,9 +6130,9 @@ y = self._output_transform(x, y) return y - - + + @@ -5316,7 +6148,7 @@ - + @@ -5324,156 +6156,159 @@ + - RosslerEmbedding + RosslerEmbedding ¶ - - - Bases: LorenzEmbedding - + + + Bases: LorenzEmbedding + Embedding Koopman model for the Rossler ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.RosslerEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 198 + + Source code in ppsci/arch/embedding_koopman.py + 198 199 200 201 @@ -5551,9 +6386,9 @@ drop, ) - - + + @@ -5569,7 +6404,7 @@ - + @@ -5577,156 +6412,159 @@ + - CylinderEmbedding + CylinderEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Cylinder system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states", "visc"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - embed_size - - int - - - - Number of embedding size. Defaults to 128. - - - - 128 - - - - encoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in encoder network. Defaults to None. - - - - None - - - - decoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in decoder network. Defaults to None. - - - - None - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states", "visc"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + embed_size + + int + + + + Number of embedding size. Defaults to 128. + + + + 128 + + + + encoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in encoder network. Defaults to None. + + + + None + + + + decoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in decoder network. Defaults to None. + + + + None + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ - + @@ -6276,155 +7114,158 @@ + - Generator + Generator ¶ - - - Bases: Arch - + + + Bases: Arch + Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - - - - required - - - - kernel_sizes_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - - - - required - - - - strides_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - - - - required - - - - use_bns_tuple - - Tuple[Tuple[bool, ...], ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts_tuple - - Tuple[Tuple[str, ...], ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns_tuple + + Tuple[Tuple[bool, ...], ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts_tuple + + Tuple[Tuple[str, ...], ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.DeepPhyLSTM(1, 1, 100)
ppsci/arch/phylstm.py
21 + + Source code in ppsci/arch/phylstm.py + 21 22 23 24 @@ -4803,9 +5632,9 @@ "g_dot_pred_c": g_dot_pred_c, } - - + + @@ -4821,7 +5650,7 @@ -
21 22 23 24 @@ -4803,9 +5632,9 @@ "g_dot_pred_c": g_dot_pred_c, }
LorenzEmbedding
Embedding Koopman model for the Lorenz ODE system.
Input keys, such as ("states",).
Output keys, such as ("pred_states", "recover_states").
mean
Optional[Tuple[float, ...]]
Mean of training dataset. Defaults to None.
std
Standard Deviation of training dataset. Defaults to None.
Size of input data. Defaults to 3.
3
Number of hidden size. Defaults to 500.
500
embed_size
Number of embedding size. Defaults to 32.
32
drop
float
Probability of dropout the units. Defaults to 0.0.
0.0
>>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.LorenzEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 36 + + Source code in ppsci/arch/embedding_koopman.py + 36 37 38 39 @@ -5298,9 +6130,9 @@ y = self._output_transform(x, y) return y - - + + @@ -5316,7 +6148,7 @@ - + @@ -5324,156 +6156,159 @@ + - RosslerEmbedding + RosslerEmbedding ¶ - - - Bases: LorenzEmbedding - + + + Bases: LorenzEmbedding + Embedding Koopman model for the Rossler ODE system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - input_size - - int - - - - Size of input data. Defaults to 3. - - - - 3 - - - - hidden_size - - int - - - - Number of hidden size. Defaults to 500. - - - - 500 - - - - embed_size - - int - - - - Number of embedding size. Defaults to 32. - - - - 32 - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + input_size + + int + + + + Size of input data. Defaults to 3. + + + + 3 + + + + hidden_size + + int + + + + Number of hidden size. Defaults to 500. + + + + 500 + + + + embed_size + + int + + + + Number of embedding size. Defaults to 32. + + + + 32 + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.RosslerEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 198 + + Source code in ppsci/arch/embedding_koopman.py + 198 199 200 201 @@ -5551,9 +6386,9 @@ drop, ) - - + + @@ -5569,7 +6404,7 @@ - + @@ -5577,156 +6412,159 @@ + - CylinderEmbedding + CylinderEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Cylinder system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states", "visc"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - embed_size - - int - - - - Number of embedding size. Defaults to 128. - - - - 128 - - - - encoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in encoder network. Defaults to None. - - - - None - - - - decoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in decoder network. Defaults to None. - - - - None - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states", "visc"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + embed_size + + int + + + + Number of embedding size. Defaults to 128. + + + + 128 + + + + encoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in encoder network. Defaults to None. + + + + None + + + + decoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in decoder network. Defaults to None. + + + + None + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ - + @@ -6276,155 +7114,158 @@ + - Generator + Generator ¶ - - - Bases: Arch - + + + Bases: Arch + Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - - - - required - - - - kernel_sizes_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - - - - required - - - - strides_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - - - - required - - - - use_bns_tuple - - Tuple[Tuple[bool, ...], ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts_tuple - - Tuple[Tuple[str, ...], ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns_tuple + + Tuple[Tuple[bool, ...], ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts_tuple + + Tuple[Tuple[str, ...], ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.LorenzEmbedding(("x", "y"), ("u", "v"))
ppsci/arch/embedding_koopman.py
36 + + Source code in ppsci/arch/embedding_koopman.py + 36 37 38 39 @@ -5298,9 +6130,9 @@ y = self._output_transform(x, y) return y - - + + @@ -5316,7 +6148,7 @@ -
36 37 38 39 @@ -5298,9 +6130,9 @@ y = self._output_transform(x, y) return y
RosslerEmbedding
- Bases: LorenzEmbedding
+ Bases: LorenzEmbedding
Embedding Koopman model for the Rossler ODE system.
>>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.RosslerEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 198 + + Source code in ppsci/arch/embedding_koopman.py + 198 199 200 201 @@ -5551,9 +6386,9 @@ drop, ) - - + + @@ -5569,7 +6404,7 @@ - + @@ -5577,156 +6412,159 @@ + - CylinderEmbedding + CylinderEmbedding ¶ - - - Bases: Arch - + + + Bases: Arch + Embedding Koopman model for the Cylinder system. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("states", "visc"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_states", "recover_states"). - - - - required - - - - mean - - Optional[Tuple[float, ...]] - - - - Mean of training dataset. Defaults to None. - - - - None - - - - std - - Optional[Tuple[float, ...]] - - - - Standard Deviation of training dataset. Defaults to None. - - - - None - - - - embed_size - - int - - - - Number of embedding size. Defaults to 128. - - - - 128 - - - - encoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in encoder network. Defaults to None. - - - - None - - - - decoder_channels - - Optional[Tuple[int, ...]] - - - - Number of channels in decoder network. Defaults to None. - - - - None - - - - drop - - float - - - - Probability of dropout the units. Defaults to 0.0. - - - - 0.0 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("states", "visc"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_states", "recover_states"). + + + + required + + + + mean + + Optional[Tuple[float, ...]] + + + + Mean of training dataset. Defaults to None. + + + + None + + + + std + + Optional[Tuple[float, ...]] + + + + Standard Deviation of training dataset. Defaults to None. + + + + None + + + + embed_size + + int + + + + Number of embedding size. Defaults to 128. + + + + 128 + + + + encoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in encoder network. Defaults to None. + + + + None + + + + decoder_channels + + Optional[Tuple[int, ...]] + + + + Number of channels in decoder network. Defaults to None. + + + + None + + + + drop + + float + + + + Probability of dropout the units. Defaults to 0.0. + + + + 0.0 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ - + @@ -6276,155 +7114,158 @@ + - Generator + Generator ¶ - - - Bases: Arch - + + + Bases: Arch + Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - - - - required - - - - kernel_sizes_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - - - - required - - - - strides_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - - - - required - - - - use_bns_tuple - - Tuple[Tuple[bool, ...], ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts_tuple - - Tuple[Tuple[str, ...], ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns_tuple + + Tuple[Tuple[bool, ...], ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts_tuple + + Tuple[Tuple[str, ...], ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.RosslerEmbedding(("x", "y"), ("u", "v"))
198 + + Source code in ppsci/arch/embedding_koopman.py + 198 199 200 201 @@ -5551,9 +6386,9 @@ drop, ) - - + + @@ -5569,7 +6404,7 @@ -
198 199 200 201 @@ -5551,9 +6386,9 @@ drop, )
CylinderEmbedding
Embedding Koopman model for the Cylinder system.
Input keys, such as ("states", "visc").
Number of embedding size. Defaults to 128.
128
encoder_channels
Optional[Tuple[int, ...]]
Number of channels in encoder network. Defaults to None.
decoder_channels
Number of channels in decoder network. Defaults to None.
>>> import ppsci + +Examples: + >>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v")) - - Source code in ppsci/arch/embedding_koopman.py - 239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ - + @@ -6276,155 +7114,158 @@ + - Generator + Generator ¶ - - - Bases: Arch - + + + Bases: Arch + Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - - - - required - - - - kernel_sizes_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - - - - required - - - - strides_tuple - - Tuple[Tuple[int, ...], ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides_tuple + + Tuple[Tuple[int, ...], ...] + + + + Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - - - - required - - - - use_bns_tuple - - Tuple[Tuple[bool, ...], ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts_tuple - - Tuple[Tuple[str, ...], ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns_tuple + + Tuple[Tuple[bool, ...], ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts_tuple + + Tuple[Tuple[str, ...], ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.CylinderEmbedding(("x", "y"), ("u", "v"))
239 + + Source code in ppsci/arch/embedding_koopman.py + 239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6268,7 +7106,7 @@ -
239 240 241 242 @@ -6250,9 +7088,9 @@ y = self._output_transform(x, y) return y
Generator
Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is unique to "tempoGAN" example but not an open source network.
Name of input keys, such as ("input1", "input2").
Name of output keys, such as ("output1", "output2").
in_channel
Number of input channels of the first conv layer.
out_channels_tuple
Tuple[Tuple[int, ...], ...]
Number of output channels of all conv layers, + +
Number of output channels of all conv layers, such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]]
kernel_sizes_tuple
Number of kernel_size of all conv layers, +
Number of kernel_size of all conv layers, such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]]
strides_tuple
Number of stride of all conv layers, +
Number of stride of all conv layers, such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]]
use_bns_tuple
Tuple[Tuple[bool, ...], ...]
Whether to use the batch_norm layer after each conv layer.
acts_tuple
Tuple[Tuple[str, ...], ...]
Whether to use the activation layer after each conv layer. If so, witch activation to use, +
Whether to use the activation layer after each conv layer. If so, witch activation to use, such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]]
>>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - - Source code in ppsci/arch/gan.py - 154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ - + @@ -6654,169 +7495,172 @@ + - Discriminator + Discriminator ¶ - - - Bases: Arch - + + + Bases: Arch + Discriminator Net of GAN. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input1", "input2"). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output1", "output2"). - - - - required - - - - in_channel - - int - - - - Number of input channels of the first conv layer. - - - - required - - - - out_channels - - Tuple[int, ...] - - - - Number of output channels of all conv layers, + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input1", "input2"). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output1", "output2"). + + + + required + + + + in_channel + + int + + + + Number of input channels of the first conv layer. + + + + required + + + + out_channels + + Tuple[int, ...] + + + + Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2). - - - - required - - - - fc_channel - - int - - - - Number of input features of linear layer. Number of output features of the layer + + + + required + + + + fc_channel + + int + + + + Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer. - - - - required - - - - kernel_sizes - - Tuple[int, ...] - - - - Number of kernel_size of all conv layers, + + + + required + + + + kernel_sizes + + Tuple[int, ...] + + + + Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - - - - required - - - - strides - - Tuple[int, ...] - - - - Number of stride of all conv layers, + + + + required + + + + strides + + Tuple[int, ...] + + + + Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2). - - - - required - - - - use_bns - - Tuple[bool, ...] - - - - Whether to use the batch_norm layer after each conv layer. - - - - required - - - - acts - - Tuple[str, ...] - - - - Whether to use the activation layer after each conv layer. If so, witch activation to use, + + + + required + + + + use_bns + + Tuple[bool, ...] + + + + Whether to use the batch_norm layer after each conv layer. + + + + required + + + + acts + + Tuple[str, ...] + + + + Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2). - - - - required - - - - + + + + required + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> in_channel = 1 >>> rb_channel0 = (2, 8, 8) >>> rb_channel1 = (128, 128, 128) @@ -6438,9 +7279,9 @@ >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple)
ppsci/arch/gan.py
154 + + Source code in ppsci/arch/gan.py + 154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y - - + + @@ -6646,7 +7487,7 @@ -
154 155 156 157 @@ -6628,9 +7469,9 @@ y = self._output_transform(x, y) return y
Discriminator
Discriminator Net of GAN.
out_channels
Tuple[int, ...]
Number of output channels of all conv layers, such as (out_conv0, out_conv1, out_conv2).
fc_channel
Number of input features of linear layer. Number of output features of the layer +
Number of input features of linear layer. Number of output features of the layer is set to 1 in this Net to construct a fully_connected layer.
kernel_sizes
Number of kernel_size of all conv layers, such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2).
strides
Number of stride of all conv layers, such as (stride_conv0, stride_conv1, stride_conv2).
use_bns
Tuple[bool, ...]
acts
Whether to use the activation layer after each conv layer. If so, witch activation to use, such as (act_conv0, act_conv1, act_conv2).
>>> import ppsci +Examples: + >>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - - Source code in ppsci/arch/gan.py - 250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> in_channel = 2 >>> in_channel_tempo = 3 >>> out_channels = (32, 64, 128, 256) @@ -6829,9 +7673,9 @@ >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts)
250 + + Source code in ppsci/arch/gan.py + 250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7099,17 +7943,20 @@ + + + - split_to_dict(data_list, keys) + split_to_dict(data_list, keys) ¶ - - + + Overwrite of split_to_dict() method belongs to Class base.Arch. Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@ } -Parameters: - - - - Name - Type - Description - Default - - - - - data_list - - List[Tensor] - - - - The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - - - - required - - - - keys - - Tuple[str, ...] - - - - Keys of outputs. - - - - required - - - - - - - Returns: - - - - Type - Description - - - - - - Dict[str, Tensor] - - - - Dict[str, paddle.Tensor]: Dict with split data. - - - - - - - Source code in ppsci/arch/gan.py - 355 + Parameters: + + + + Name + Type + Description + Default + + + + + data_list + + List[Tensor] + + + + The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + + + + required + + + + keys + + Tuple[str, ...] + + + + Keys of outputs. + + + + required + + + + + + + + Returns: + + + + Type + Description + + + + + + Dict[str, Tensor] + + + + Dict[str, paddle.Tensor]: Dict with split data. + + + + + + + + Source code in ppsci/arch/gan.py + 355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)} - - + + @@ -7239,7 +8088,7 @@ - + @@ -7247,184 +8096,187 @@ + - PhysformerGPT2 + PhysformerGPT2 ¶ - - - Bases: Arch - + + + Bases: Arch + Transformer decoder model for modeling physics. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Input keys, such as ("embeds",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Output keys, such as ("pred_embeds",). - - - - required - - - - num_layers - - int - - - - Number of transformer layers. - - - - required - - - - num_ctx - - int - - - - Context length of block. - - - - required - - - - embed_size - - int - - - - The number of embedding size. - - - - required - - - - num_heads - - int - - - - The number of heads in multi-head attention. - - - - required - - - - embd_pdrop - - float - - - - The dropout probability used on embedding features. Defaults to 0.0. - - - - 0.0 - - - - attn_pdrop - - float - - - - The dropout probability used on attention weights. Defaults to 0.0. - - - - 0.0 - - - - resid_pdrop - - float - - - - The dropout probability used on block outputs. Defaults to 0.0. - - - - 0.0 - - - - initializer_range - - float - - - - Initializer range of linear layer. Defaults to 0.05. - - - - 0.05 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Input keys, such as ("embeds",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Output keys, such as ("pred_embeds",). + + + + required + + + + num_layers + + int + + + + Number of transformer layers. + + + + required + + + + num_ctx + + int + + + + Context length of block. + + + + required + + + + embed_size + + int + + + + The number of embedding size. + + + + required + + + + num_heads + + int + + + + The number of heads in multi-head attention. + + + + required + + + + embd_pdrop + + float + + + + The dropout probability used on embedding features. Defaults to 0.0. + + + + 0.0 + + + + attn_pdrop + + float + + + + The dropout probability used on attention weights. Defaults to 0.0. + + + + 0.0 + + + + resid_pdrop + + float + + + + The dropout probability used on block outputs. Defaults to 0.0. + + + + 0.0 + + + + initializer_range + + float + + + + Initializer range of linear layer. Defaults to 0.05. + + + + 0.05 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
250 251 252 253 @@ -7085,9 +7929,9 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)}
split_to_dict(data_list, keys)
Overwrite of split_to_dict() method belongs to Class base.Arch.
Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. That is because input in "tempoGAN" example is not in a regular format, but a format like: @@ -7119,74 +7966,76 @@
data_list
List[Tensor]
The data to be split. It should be a list of tensor(s), but not a paddle.Tensor.
Keys of outputs.
Dict[str, paddle.Tensor]: Dict with split data.
355 + Parameters: +
355 356 357 358 @@ -7230,8 +8079,8 @@ return {keys[0]: data_list[0]} return {key: data_list[i] for i, key in enumerate(keys)}
PhysformerGPT2
Transformer decoder model for modeling physics.
Input keys, such as ("embeds",).
Output keys, such as ("pred_embeds",).
Number of transformer layers.
num_ctx
Context length of block.
The number of embedding size.
num_heads
The number of heads in multi-head attention.
embd_pdrop
The dropout probability used on embedding features. Defaults to 0.0.
attn_pdrop
The dropout probability used on attention weights. Defaults to 0.0.
resid_pdrop
The dropout probability used on block outputs. Defaults to 0.0.
initializer_range
Initializer range of linear layer. Defaults to 0.05.
0.05
>>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - - Source code in ppsci/arch/physx_transformer.py - 240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ - + @@ -7740,60 +8592,63 @@ + - ModelList + ModelList ¶ - - - Bases: Arch - + + + Bases: Arch + ModelList layer which wrap more than one model that shares inputs. -Parameters: - - - - Name - Type - Description - Default - - - - - model_list - - Tuple[Arch, ...] - - - - Model(s) nested in tuple. - - - - required - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + model_list + + Tuple[Arch, ...] + + + + Model(s) nested in tuple. + + + + required + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4)
ppsci/arch/physx_transformer.py
240 + + Source code in ppsci/arch/physx_transformer.py + 240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y - - + + @@ -7732,7 +8584,7 @@ -
240 241 242 243 @@ -7714,9 +8566,9 @@ y = self._output_transform(x, y) return y
ModelList
ModelList layer which wrap more than one model that shares inputs.
model_list
Tuple[Arch, ...]
Model(s) nested in tuple.
>>> import ppsci + +Examples: + >>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2)) - - Source code in ppsci/arch/model_list.py - 24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ - + @@ -7899,254 +8754,257 @@ + - AFNONet + AFNONet ¶ - - - Bases: Arch - + + + Bases: Arch + Adaptive Fourier Neural Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 20. - - - - 20 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 20. + + + + 20 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) >>> model = ppsci.arch.ModelList((model1, model2))
ppsci/arch/model_list.py
24 + + Source code in ppsci/arch/model_list.py + 24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all - - + + @@ -7891,7 +8746,7 @@ -
24 25 26 27 @@ -7873,9 +8728,9 @@ return y_all
AFNONet
Adaptive Fourier Neural Network.
Name of input keys, such as ("input",).
Name of output keys, such as ("output",).
img_size
Image size. Defaults to (720, 1440).
(720, 1440)
patch_size
Path. Defaults to (8, 8).
(8, 8)
in_channels
The input tensor channels. Defaults to 20.
20
The output tensor channels. Defaults to 20.
embed_dim
The embedding dimension for PatchEmbed. Defaults to 768.
768
depth
Number of transformer depth. Defaults to 12.
12
mlp_ratio
Number of ratio used in MLP. Defaults to 4.0.
4.0
drop_rate
The drop ratio used in MLP. Defaults to 0.0.
drop_path_rate
The drop ratio used in DropPath. Defaults to 0.0.
num_blocks
Number of blocks. Defaults to 8.
8
sparsity_threshold
The value of threshold for softshrink. Defaults to 0.01.
0.01
hard_thresholding_fraction
The value of threshold for keep mode. Defaults to 1.0.
1.0
num_timestamps
Number of timestamp. Defaults to 1.
1
>>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - - Source code in ppsci/arch/afno.py - 394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ - + @@ -8492,269 +9350,272 @@ + - PrecipNet + PrecipNet ¶ - - - Bases: Arch - + + + Bases: Arch + Precipitation Network. -Parameters: - - - - Name - Type - Description - Default - - - - - input_keys - - Tuple[str, ...] - - - - Name of input keys, such as ("input",). - - - - required - - - - output_keys - - Tuple[str, ...] - - - - Name of output keys, such as ("output",). - - - - required - - - - wind_model - - Arch - - - - Wind model. - - - - required - - - - img_size - - Tuple[int, ...] - - - - Image size. Defaults to (720, 1440). - - - - (720, 1440) - - - - patch_size - - Tuple[int, ...] - - - - Path. Defaults to (8, 8). - - - - (8, 8) - - - - in_channels - - int - - - - The input tensor channels. Defaults to 20. - - - - 20 - - - - out_channels - - int - - - - The output tensor channels. Defaults to 1. - - - - 1 - - - - embed_dim - - int - - - - The embedding dimension for PatchEmbed. Defaults to 768. - - - - 768 - - - - depth - - int - - - - Number of transformer depth. Defaults to 12. - - - - 12 - - - - mlp_ratio - - float - - - - Number of ratio used in MLP. Defaults to 4.0. - - - - 4.0 - - - - drop_rate - - float - - - - The drop ratio used in MLP. Defaults to 0.0. - - - - 0.0 - - - - drop_path_rate - - float - - - - The drop ratio used in DropPath. Defaults to 0.0. - - - - 0.0 - - - - num_blocks - - int - - - - Number of blocks. Defaults to 8. - - - - 8 - - - - sparsity_threshold - - float - - - - The value of threshold for softshrink. Defaults to 0.01. - - - - 0.01 - - - - hard_thresholding_fraction - - float - - - - The value of threshold for keep mode. Defaults to 1.0. - - - - 1.0 - - - - num_timestamps - - int - - - - Number of timestamp. Defaults to 1. - - - - 1 - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_keys + + Tuple[str, ...] + + + + Name of input keys, such as ("input",). + + + + required + + + + output_keys + + Tuple[str, ...] + + + + Name of output keys, such as ("output",). + + + + required + + + + wind_model + + Arch + + + + Wind model. + + + + required + + + + img_size + + Tuple[int, ...] + + + + Image size. Defaults to (720, 1440). + + + + (720, 1440) + + + + patch_size + + Tuple[int, ...] + + + + Path. Defaults to (8, 8). + + + + (8, 8) + + + + in_channels + + int + + + + The input tensor channels. Defaults to 20. + + + + 20 + + + + out_channels + + int + + + + The output tensor channels. Defaults to 1. + + + + 1 + + + + embed_dim + + int + + + + The embedding dimension for PatchEmbed. Defaults to 768. + + + + 768 + + + + depth + + int + + + + Number of transformer depth. Defaults to 12. + + + + 12 + + + + mlp_ratio + + float + + + + Number of ratio used in MLP. Defaults to 4.0. + + + + 4.0 + + + + drop_rate + + float + + + + The drop ratio used in MLP. Defaults to 0.0. + + + + 0.0 + + + + drop_path_rate + + float + + + + The drop ratio used in DropPath. Defaults to 0.0. + + + + 0.0 + + + + num_blocks + + int + + + + Number of blocks. Defaults to 8. + + + + 8 + + + + sparsity_threshold + + float + + + + The value of threshold for softshrink. Defaults to 0.01. + + + + 0.01 + + + + hard_thresholding_fraction + + float + + + + The value of threshold for keep mode. Defaults to 1.0. + + + + 1.0 + + + + num_timestamps + + int + + + + Number of timestamp. Defaults to 1. + + + + 1 + + + + -Examples: - >>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.AFNONet(("input", ), ("output", ))
ppsci/arch/afno.py
394 + + Source code in ppsci/arch/afno.py + 394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y - - + + @@ -8484,7 +9342,7 @@ -
394 395 396 397 @@ -8466,9 +9324,9 @@ y = self._output_transform(x, y) return y
PrecipNet
Precipitation Network.
wind_model
Wind model.
The output tensor channels. Defaults to 1.
>>> import ppsci + +Examples: + >>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - - Source code in ppsci/arch/afno.py - 556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ - + @@ -9028,199 +9889,202 @@ + - UNetEx + UNetEx ¶ - - - Bases: Arch - + + + Bases: Arch + U-Net Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020. -Parameters: - - - - Name - Type - Description - Default - - - - - input_key - - str - - - - Name of function data for input. - - - - required - - - - output_key - - str - - - - Name of function data for output. - - - - required - - - - in_channel - - int - - - - Number of channels of input. - - - - required - - - - out_channel - - int - - - - Number of channels of output. - - - - required - - - - kernel_size - - int - - - - Size of kernel of convolution layer. Defaults to 3. - - - - 3 - - - - filters - - Tuple[int, ...] - - - - Number of filters. Defaults to (16, 32, 64). - - - - (16, 32, 64) - - - - layers - - int - - - - Number of encoders or decoders. Defaults to 3. - - - - 3 - - - - weight_norm - - bool - - - - Whether use weight normalization layer. Defaults to True. - - - - True - - - - batch_norm - - bool - - - - Whether add batch normalization layer. Defaults to True. - - - - True - - - - activation - - Type[Layer] - - - - Name of activation function. Defaults to nn.ReLU. - - - - ReLU - - - - final_activation - - Optional[Type[Layer]] - - - - Name of final activation function. Defaults to None. - - - - None - - - - + + Parameters: + + + + Name + Type + Description + Default + + + + + input_key + + str + + + + Name of function data for input. + + + + required + + + + output_key + + str + + + + Name of function data for output. + + + + required + + + + in_channel + + int + + + + Number of channels of input. + + + + required + + + + out_channel + + int + + + + Number of channels of output. + + + + required + + + + kernel_size + + int + + + + Size of kernel of convolution layer. Defaults to 3. + + + + 3 + + + + filters + + Tuple[int, ...] + + + + Number of filters. Defaults to (16, 32, 64). + + + + (16, 32, 64) + + + + layers + + int + + + + Number of encoders or decoders. Defaults to 3. + + + + 3 + + + + weight_norm + + bool + + + + Whether use weight normalization layer. Defaults to True. + + + + True + + + + batch_norm + + bool + + + + Whether add batch normalization layer. Defaults to True. + + + + True + + + + activation + + Type[Layer] + + + + Name of activation function. Defaults to nn.ReLU. + + + + ReLU + + + + final_activation + + Optional[Type[Layer]] + + + + Name of final activation function. Defaults to None. + + + + None + + + + + -Examples: - >>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model)
556 + + Source code in ppsci/arch/afno.py + 556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y - - + + @@ -9020,7 +9881,7 @@ -
556 557 558 559 @@ -9002,9 +9863,9 @@ y = self._output_transform(x, y) return y
UNetEx
U-Net
Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020.
input_key
Name of function data for input.
output_key
Name of function data for output.
Number of channels of input.
out_channel
Number of channels of output.
kernel_size
Size of kernel of convolution layer. Defaults to 3.
filters
Number of filters. Defaults to (16, 32, 64).
(16, 32, 64)
layers
Number of encoders or decoders. Defaults to 3.
Whether use weight normalization layer. Defaults to True.
batch_norm
Whether add batch normalization layer. Defaults to True.
Type[Layer]
Name of activation function. Defaults to nn.ReLU.
ReLU
final_activation
Optional[Type[Layer]]
Name of final activation function. Defaults to None.
>>> import ppsci +Examples: + >>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False) - - Source code in ppsci/arch/unetex.py - 176 + + Source code in
>>> import ppsci >>> model = ppsci.arch.ppsci.arch.UNetEx("input", "output", 3, 3, (8, 16, 32, 32), 5, False, False)
ppsci/arch/unetex.py
176 + + Source code in