diff --git a/sdk/python/v1beta1/kubeflow/katib/api/katib_client.py b/sdk/python/v1beta1/kubeflow/katib/api/katib_client.py index 6993919dea2..497b8cd9723 100644 --- a/sdk/python/v1beta1/kubeflow/katib/api/katib_client.py +++ b/sdk/python/v1beta1/kubeflow/katib/api/katib_client.py @@ -12,15 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy +import inspect import json import logging import multiprocessing +import textwrap import time -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Union import kubeflow.katib.katib_api_pb2 as katib_api_pb2 -from kubeflow.katib import models +from kubeflow.katib import models, types from kubeflow.katib.api_client import ApiClient from kubeflow.katib.constants import constants from kubeflow.katib.utils import utils @@ -30,14 +31,6 @@ logger = logging.getLogger(__name__) -if TYPE_CHECKING: - from kubeflow.storage_initializer.hugging_face import ( - HuggingFaceDatasetParams, - HuggingFaceModelParams, - HuggingFaceTrainerParams, - ) - from kubeflow.storage_initializer.s3 import S3DatasetParams - class KatibClient(object): def __init__( @@ -171,15 +164,15 @@ def create_experiment( ) ) + # fmt: off def tune( self, # TODO (andreyvelich): How to be consistent with other APIs (name) ? name: str, - model_provider_parameters: Optional["HuggingFaceModelParams"] = None, - dataset_provider_parameters: Optional[ - Union["HuggingFaceDatasetParams", "S3DatasetParams"] - ] = None, - trainer_parameters: Optional["HuggingFaceTrainerParams"] = None, + model_provider_parameters: Optional["HuggingFaceModelParams"] = None, # noqa: F821 + dataset_provider_parameters: Optional[Union[ + "HuggingFaceDatasetParams", "S3DatasetParams"]] = None, # noqa: F821 + trainer_parameters: Optional["HuggingFaceTrainerParams"] = None, # noqa: F821 storage_config: Optional[Dict[str, Optional[Union[str, List[str]]]]] = { "size": constants.PVC_DEFAULT_SIZE, "storage_class": None, @@ -203,12 +196,16 @@ def tune( max_trial_count: int = None, parallel_trial_count: int = None, max_failed_trial_count: int = None, - resources_per_trial: Union[dict, client.V1ResourceRequirements, None] = None, + resources_per_trial: Union[ + dict, client.V1ResourceRequirements, types.TrainerResources, None + ] = None, retain_trials: bool = False, packages_to_install: List[str] = None, pip_index_url: str = "https://pypi.org/simple", metrics_collector_config: Dict[str, Any] = {"kind": "StdOut"}, ): + # fmt: on + """ Create HyperParameter Tuning Katib Experiment using one of the following options: @@ -296,8 +293,9 @@ class name in this argument. parallel_trial_count: Number of Trials that Experiment runs in parallel. max_failed_trial_count: Maximum number of Trials allowed to fail. resources_per_trial: A parameter that lets you specify how much resources - each trial container should have. You can either specify a - kubernetes.client.V1ResourceRequirements object (documented here: + each trial container should have. + For custom objective function, you can either specify a kubernetes.client. + V1ResourceRequirements object (documented here: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1ResourceRequirements.md) or a dictionary that includes one or more of the following keys: `cpu`, `memory`, or `gpu` (other keys will be ignored). Appropriate values @@ -313,6 +311,29 @@ class name in this argument. `nvidia.com/gpu`, i.e. an NVIDIA GPU. If you need a different type of GPU, pass in a V1ResourceRequirement instance instead, since it's more flexible. This parameter is optional and defaults to None. + + For external models and datasets, you can specify a types.TrainerResources object, + which includes `num_workers`, `num_procs_per_worker`, and `resources_per_worker`. + For example: + ``` + resources_per_trial = types.TrainerResources( + num_workers=4, + num_procs_per_worker=2, + resources_per_worker={ + "gpu": "2", + "cpu": "5", + "memory": "10Gi" + } + ) + ``` + - num_workers: Number of PyTorchJob workers. + - num_procs_per_worker: Number of processes per PyTorchJob worker for + `torchrun` CLI. You can use this parameter if you want to use more than 1 GPU + per PyTorchJob worker. + - resources_per_worker: A parameter that lets you specify how much resources + each PyTorchJob worker container should have. You can either specify + a kubernetes.client.V1ResourceRequirements object or a dictionary, same as + resources specified under the option of custom objective function. retain_trials: Whether Trials' resources (e.g. pods) are deleted after Succeeded state. packages_to_install: List of Python packages to install in addition to the base image packages. These packages are installed before @@ -402,41 +423,18 @@ class name in this argument. if max_failed_trial_count is not None: experiment.spec.max_failed_trial_count = max_failed_trial_count - # Add environment variables to the Katib Experiment. - env = [] - env_from = [] - if isinstance(env_per_trial, dict): - env = [ - client.V1EnvVar(name=str(k), value=str(v)) - for k, v in env_per_trial.items() - ] - elif env_per_trial: - for x in env_per_trial: - if isinstance(x, client.V1EnvVar): - env.append(x) - elif isinstance(x, client.V1EnvFromSource): - env_from.append(x) - else: - raise ValueError( - f"Incorrect value for env_per_trial: {env_per_trial}" - ) - - # Add metrics collector to the Katib Experiment. - # Up to now, we only support parameter `kind`, of which default value - # is `StdOut`, to specify the kind of metrics collector. - experiment.spec.metrics_collector_spec = models.V1beta1MetricsCollectorSpec( - collector=models.V1beta1CollectorSpec(kind=metrics_collector_config["kind"]) - ) - - # Create Container and Pod specifications. # If users choose to use a custom objective function. if objective is not None: - if not base_image or not parameters: - raise ValueError("One of the required parameters is None.") - # Validate objective function. utils.validate_objective_function(objective) + # Extract objective function implementation. + objective_code = inspect.getsource(objective) + + # Objective function might be defined in some indented scope + # (e.g. in another function). We need to dedent the function code. + objective_code = textwrap.dedent(objective_code) + # Iterate over input parameters. input_params = {} experiment_params = [] @@ -459,21 +457,110 @@ class name in this argument. # Otherwise, add value to the function input. input_params[p_name] = p_value - container_spec = utils.get_container_spec( - name=constants.DEFAULT_PRIMARY_CONTAINER_NAME, - base_image=base_image, - train_func=objective, - train_func_parameters=input_params, - packages_to_install=packages_to_install, - pip_index_url=pip_index_url, - resources=resources_per_trial, - env=env, - env_from=env_from, + # Wrap objective function to execute it from the file. For example: + # def objective(parameters): + # print(f'Parameters are {parameters}') + # objective({ + # 'lr': '${trialParameters.lr}', + # 'epochs': '${trialParameters.epochs}', + # 'is_dist': False + # }) + objective_code = f"{objective_code}\n{objective.__name__}({input_params})\n" + + # Prepare execute script template. + exec_script = textwrap.dedent( + """ + program_path=$(mktemp -d) + read -r -d '' SCRIPT << EOM\n + {objective_code} + EOM + printf "%s" "$SCRIPT" > $program_path/ephemeral_objective.py + python3 -u $program_path/ephemeral_objective.py""" ) - pod_spec = utils.get_pod_template_spec( - containers=[container_spec], - restart_policy="Never", + # Add objective code to the execute script. + exec_script = exec_script.format(objective_code=objective_code) + + # Install Python packages if that is required. + if packages_to_install is not None: + exec_script = ( + utils.get_script_for_python_packages( + packages_to_install, pip_index_url + ) + + exec_script + ) + + if isinstance(resources_per_trial, dict): + if "gpu" in resources_per_trial: + resources_per_trial["nvidia.com/gpu"] = resources_per_trial.pop( + "gpu" + ) + + resources_per_trial = client.V1ResourceRequirements( + requests=resources_per_trial, + limits=resources_per_trial, + ) + + env = [] + env_from = [] + if isinstance(env_per_trial, dict): + env = [ + client.V1EnvVar(name=str(k), value=str(v)) + for k, v in env_per_trial.items() + ] + elif env_per_trial: + for x in env_per_trial: + if isinstance(x, client.V1EnvVar): + env.append(x) + elif isinstance(x, client.V1EnvFromSource): + env_from.append(x) + else: + raise ValueError( + f"Incorrect value for env_per_trial: {env_per_trial}" + ) + + # Add metrics collector to the Katib Experiment. + # Up to now, we only support parameter `kind`, of which default value + # is `StdOut`, to specify the kind of metrics collector. + experiment.spec.metrics_collector_spec = models.V1beta1MetricsCollectorSpec( + collector=models.V1beta1CollectorSpec( + kind=metrics_collector_config["kind"] + ) + ) + + # Create Trial specification. + trial_spec = client.V1Job( + api_version="batch/v1", + kind="Job", + spec=client.V1JobSpec( + template=client.V1PodTemplateSpec( + metadata=models.V1ObjectMeta( + annotations={"sidecar.istio.io/inject": "false"} + ), + spec=client.V1PodSpec( + restart_policy="Never", + containers=[ + client.V1Container( + name=constants.DEFAULT_PRIMARY_CONTAINER_NAME, + image=base_image, + command=["bash", "-c"], + args=[exec_script], + env=env if env else None, + env_from=env_from if env_from else None, + resources=resources_per_trial, + ) + ], + ), + ) + ), + ) + + # Create Trial template. + trial_template = models.V1beta1TrialTemplate( + primary_container_name=constants.DEFAULT_PRIMARY_CONTAINER_NAME, + retain=retain_trials, + trial_parameters=trial_params, + trial_spec=trial_spec, ) # If users choose to use external models and datasets. @@ -495,34 +582,28 @@ class name in this argument. HuggingFaceModelParams, ) from kubeflow.storage_initializer.s3 import S3DatasetParams + from kubeflow.training import models as training_models from kubeflow.training.constants.constants import ( + JOB_PARAMETERS, + PYTORCHJOB_KIND, STORAGE_INITIALIZER, STORAGE_INITIALIZER_IMAGE, STORAGE_INITIALIZER_VOLUME_MOUNT, TRAINER_TRANSFORMER_IMAGE, ) + from kubeflow.training.utils import utils as training_utils except ImportError: raise ImportError( "LLM dependencies for Tune API are not installed. " + "Run: pip install -U 'kubeflow-katib[huggingface]' " ) - # Add metrics format for the metrics collector. - experiment.spec.metrics_collector_spec.source=models.V1beta1SourceSpec( - filter=models.V1beta1FilterSpec( - metrics_format=[ - # For example: 'train_loss':0.846 - r"'([\w|-]+)'\s*:\s*([+-]?\d*(\.\d+)?([Ee][+-]?\d+)?)", - ] - ) - ) - # Create PVC for the Storage Initializer. # TODO (helenxie-bit): PVC Creation should be part of Katib Controller. try: self.core_api.create_namespaced_persistent_volume_claim( namespace=namespace, - body=utils.get_pvc_spec( + body=training_utils.get_pvc_spec( pvc_name=name, namespace=namespace, storage_config=storage_config, @@ -559,64 +640,18 @@ class name in this argument. "or HuggingFaceDatasetParams." ) - # Iterate over input parameters. + # Iterate over input parameters and do substitutions. experiment_params = [] trial_params = [] - training_args = trainer_parameters.training_parameters - for ( - p_name, - p_value, - ) in trainer_parameters.training_parameters.to_dict().items(): - if not hasattr(training_args, p_name): - logger.warning( - f"Training parameter {p_name} is not supported by the current transformer." - ) - continue - if isinstance(p_value, models.V1beta1ParameterSpec): - old_attr = getattr(training_args, p_name, None) - if old_attr is not None: - value = f"${{trialParameters.{p_name}}}" - setattr(training_args, p_name, value) - p_value.name = p_name - experiment_params.append(p_value) - trial_params.append( - models.V1beta1TrialParameterSpec(name=p_name, reference=p_name) - ) - elif p_value is not None: - old_attr = getattr(training_args, p_name, None) - if old_attr is not None: - if isinstance(p_value, dict): - # Update the existing dictionary without nesting - value = copy.deepcopy(p_value) - else: - value = type(old_attr)(p_value) - setattr(training_args, p_name, value) - - lora_config = trainer_parameters.lora_config - for p_name, p_value in trainer_parameters.lora_config.__dict__.items(): - if not hasattr(lora_config, p_name): - logger.warning( - f"Training parameter {p_name} is not supported by the current peft." - ) - continue - if isinstance(p_value, models.V1beta1ParameterSpec): - old_attr = getattr(lora_config, p_name, None) - if old_attr is not None: - value = f"${{trialParameters.{p_name}}}" - setattr(lora_config, p_name, value) - p_value.name = p_name - experiment_params.append(p_value) - trial_params.append( - models.V1beta1TrialParameterSpec(name=p_name, reference=p_name) - ) - elif p_value is not None: - old_attr = getattr(lora_config, p_name, None) - if old_attr is not None: - value = type(old_attr)(p_value) - setattr(lora_config, p_name, value) + training_args = utils.parameter_substitution( + trainer_parameters.training_parameters, experiment_params, trial_params + ) + lora_config = utils.parameter_substitution( + trainer_parameters.lora_config, experiment_params, trial_params + ) - init_container_spec = utils.get_container_spec( + init_container_spec = training_utils.get_container_spec( name=STORAGE_INITIALIZER, base_image=STORAGE_INITIALIZER_IMAGE, args=[ @@ -634,11 +669,8 @@ class name in this argument. volume_mounts=[STORAGE_INITIALIZER_VOLUME_MOUNT], ) - lora_config = json.dumps(lora_config.__dict__, cls=utils.SetEncoder) - training_args = json.dumps(training_args.to_dict()) - - container_spec = utils.get_container_spec( - name=constants.DEFAULT_PRIMARY_CONTAINER_NAME, + container_spec = training_utils.get_container_spec( + name=JOB_PARAMETERS[PYTORCHJOB_KIND]["container"], base_image=TRAINER_TRANSFORMER_IMAGE, args=[ "--model_uri", @@ -655,7 +687,7 @@ class name in this argument. f"'{training_args}'", ], volume_mounts=[STORAGE_INITIALIZER_VOLUME_MOUNT], - resources=resources_per_trial, + resources=resources_per_trial.resources_per_worker, ) storage_initializer_volume = models.V1Volume( @@ -665,29 +697,74 @@ class name in this argument. ), ) - pod_spec = utils.get_pod_template_spec( + # create worker pod spec + worker_pod_template_spec = training_utils.get_pod_template_spec( + containers=[container_spec], + volumes=[storage_initializer_volume], + ) + + # create master pod spec + master_pod_template_spec = training_utils.get_pod_template_spec( containers=[container_spec], init_containers=[init_container_spec], volumes=[storage_initializer_volume], - restart_policy="Never", ) - # Create Trial specification. - trial_spec = client.V1Job( - api_version="batch/v1", - kind="Job", - spec=client.V1JobSpec( - template=pod_spec, - ), - ) + # Create pytorchjob. + pytorchjob = training_models.KubeflowOrgV1PyTorchJob( + api_version="kubeflow.org/v1", + kind="PyTorchJob", + spec=training_models.KubeflowOrgV1PyTorchJobSpec( + run_policy=training_models.KubeflowOrgV1RunPolicy( + clean_pod_policy=None + ), + pytorch_replica_specs={}, + ), + ) - # Create Trial template. - trial_template = models.V1beta1TrialTemplate( - primary_container_name=constants.DEFAULT_PRIMARY_CONTAINER_NAME, - retain=retain_trials, - trial_parameters=trial_params, - trial_spec=trial_spec, - ) + if resources_per_trial.num_procs_per_worker: + pytorchjob.spec.nproc_per_node = str( + resources_per_trial.num_procs_per_worker + ) + + pytorchjob.spec.pytorch_replica_specs["Master"] = ( + training_models.KubeflowOrgV1ReplicaSpec( + replicas=1, + template=master_pod_template_spec, + ) + ) + + if resources_per_trial.num_workers > 1: + pytorchjob.spec.pytorch_replica_specs["Worker"] = ( + training_models.KubeflowOrgV1ReplicaSpec( + replicas=resources_per_trial.num_workers - 1, + template=worker_pod_template_spec, + ) + ) + + # Add metrics collector to the Katib Experiment. + # Specify metrics format for the collector. + experiment.spec.metrics_collector_spec = models.V1beta1MetricsCollectorSpec( + collector=models.V1beta1CollectorSpec( + kind=metrics_collector_config["kind"] + ), + source=models.V1beta1SourceSpec( + filter=models.V1beta1FilterSpec( + metrics_format=[ + # For example: 'train_loss':0.846 + r"'([\w|-]+)'\s*:\s*([+-]?\d*(\.\d+)?([Ee][+-]?\d+)?)", + ] + ) + ), + ) + + # Create Trial template. + trial_template = models.V1beta1TrialTemplate( + primary_container_name=JOB_PARAMETERS[PYTORCHJOB_KIND]["container"], + retain=retain_trials, + trial_parameters=trial_params, + trial_spec=pytorchjob, + ) # Add parameters to the Katib Experiment. experiment.spec.parameters = experiment_params diff --git a/sdk/python/v1beta1/kubeflow/katib/utils/utils.py b/sdk/python/v1beta1/kubeflow/katib/utils/utils.py index e743dae8816..3e2dc4459e3 100644 --- a/sdk/python/v1beta1/kubeflow/katib/utils/utils.py +++ b/sdk/python/v1beta1/kubeflow/katib/utils/utils.py @@ -12,15 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import inspect import json +import logging import os import textwrap -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, List, Union from kubeflow.katib import models from kubeflow.katib.constants import constants +logger = logging.getLogger(__name__) + def is_running_in_k8s(): return os.path.isdir("/var/run/secrets/kubernetes.io/") @@ -130,176 +134,6 @@ def __init__(self, obj): self.data = json.dumps(obj) -def get_command_using_train_func( - train_func: Optional[Callable], - train_func_parameters: Optional[Dict[str, Any]] = None, - packages_to_install: Optional[List[str]] = None, - pip_index_url: str = "https://pypi.org/simple", -) -> Tuple[List[str], List[str]]: - """ - Get container args and command from the given training function and parameters. - """ - # Check if function is callable. - if not callable(train_func): - raise ValueError( - f"Training function must be callable, got function type: {type(train_func)}" - ) - - # Extract function implementation. - func_code = inspect.getsource(train_func) - - # Function might be defined in some indented scope (e.g. in another function). - # We need to dedent the function code. - func_code = textwrap.dedent(func_code) - - # Wrap function code to execute it from the file. For example: - # def train(parameters): - # print('Start Training...') - # train({'lr': 0.01}) - if train_func_parameters is None: - func_code = f"{func_code}\n{train_func.__name__}()\n" - else: - func_code = f"{func_code}\n{train_func.__name__}({train_func_parameters})\n" - - # Prepare execute script template. - exec_script = textwrap.dedent( - """ - program_path=$(mktemp -d) - read -r -d '' SCRIPT << EOM\n - {func_code} - EOM - printf "%s" \"$SCRIPT\" > \"$program_path/ephemeral_script.py\" - python3 -u \"$program_path/ephemeral_script.py\"""" - ) - - # Add function code to the execute script. - exec_script = exec_script.format(func_code=func_code) - - # Install Python packages if that is required. - if packages_to_install is not None: - exec_script = ( - get_script_for_python_packages(packages_to_install, pip_index_url) - + exec_script - ) - - # Return container command and args to execute training function. - return ["bash", "-c"], [exec_script] - - -def get_container_spec( - name: str, - base_image: str, - train_func: Optional[Callable] = None, - train_func_parameters: Optional[Dict[str, Any]] = None, - packages_to_install: Optional[List[str]] = None, - pip_index_url: str = "https://pypi.org/simple", - args: Optional[List[str]] = None, - resources: Union[dict, models.V1ResourceRequirements, None] = None, - volume_mounts: Optional[List[models.V1VolumeMount]] = None, - env: Optional[List[models.V1EnvVar]] = None, - env_from: Optional[List[models.V1EnvFromSource]] = None, -) -> models.V1Container: - """ - Get container spec for the given parameters. - """ - - if name is None or base_image is None: - raise ValueError("Container name or base image cannot be none") - - # Create initial container spec. - container_spec = models.V1Container( - name=name, image=base_image, args=args, volume_mounts=volume_mounts - ) - - # If training function is set, override container command and args to execute the function. - if train_func is not None: - container_spec.command, container_spec.args = get_command_using_train_func( - train_func=train_func, - train_func_parameters=train_func_parameters, - packages_to_install=packages_to_install, - pip_index_url=pip_index_url, - ) - - # Convert dict to the Kubernetes container resources if that is required. - if isinstance(resources, dict): - # Convert all keys in resources to lowercase. - resources = {k.lower(): v for k, v in resources.items()} - if "gpu" in resources: - resources["nvidia.com/gpu"] = resources.pop("gpu") - - resources = models.V1ResourceRequirements( - requests=resources, - limits=resources, - ) - - # Add resources to the container spec. - container_spec.resources = resources - - # Add environment variables to the container spec. - if env: - container_spec.env = env - if env_from: - container_spec.env_from = env_from - - return container_spec - - -def get_pod_template_spec( - containers: List[models.V1Container], - init_containers: Optional[List[models.V1Container]] = None, - volumes: Optional[List[models.V1Volume]] = None, - restart_policy: Optional[str] = None, -) -> models.V1PodTemplateSpec: - """ - Get Pod template spec for the given parameters. - """ - - # Create Pod template spec. If the value is None, Pod doesn't have that parameter - pod_template_spec = models.V1PodTemplateSpec( - metadata=models.V1ObjectMeta(annotations={"sidecar.istio.io/inject": "false"}), - spec=models.V1PodSpec( - init_containers=init_containers, - containers=containers, - volumes=volumes, - restart_policy=restart_policy, - ), - ) - - return pod_template_spec - - -def get_pvc_spec( - pvc_name: str, - namespace: str, - storage_config: Dict[str, Optional[Union[str, List[str]]]], -): - if pvc_name is None or namespace is None: - raise ValueError("One of the required storage config argument is None") - - if "size" not in storage_config: - storage_config["size"] = constants.PVC_DEFAULT_SIZE - - if "access_modes" not in storage_config: - storage_config["access_modes"] = constants.PVC_DEFAULT_ACCESS_MODES - - pvc_spec = models.V1PersistentVolumeClaim( - api_version="v1", - kind="PersistentVolumeClaim", - metadata={"name": pvc_name, "namespace": namespace}, - spec=models.V1PersistentVolumeClaimSpec( - access_modes=storage_config["access_modes"], - resources=models.V1ResourceRequirements( - requests={"storage": storage_config["size"]} - ), - ), - ) - - if "storage_class" in storage_config: - pvc_spec.spec.storage_class_name = storage_config["storage_class"] - - return pvc_spec - - class SetEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): @@ -307,3 +141,49 @@ def default(self, obj): if isinstance(obj, type): return obj.__name__ return json.JSONEncoder.default(self, obj) + + +def parameter_substitution( + parameters: Union["TrainingArguments", "LoraConfig"], # noqa: F821 + experiment_params: List[models.V1beta1ParameterSpec], + trial_params: List[models.V1beta1TrialParameterSpec], +): + from peft import LoraConfig # noqa: F401 + from transformers import TrainingArguments # noqa: F401 + + if isinstance(parameters, TrainingArguments): + parameters_dict = parameters.to_dict() + else: + parameters_dict = parameters.__dict__ + + for p_name, p_value in parameters_dict.items(): + if not hasattr(parameters, p_name): + logger.warning(f"Training parameter {p_name} is not supported.") + continue + + if isinstance(p_value, models.V1beta1ParameterSpec): + old_attr = getattr(parameters, p_name, None) + if old_attr is not None: + value = f"${{trialParameters.{p_name}}}" + setattr(parameters, p_name, value) + p_value.name = p_name + experiment_params.append(p_value) + trial_params.append( + models.V1beta1TrialParameterSpec(name=p_name, reference=p_name) + ) + elif p_value is not None: + old_attr = getattr(parameters, p_name, None) + if old_attr is not None: + if isinstance(p_value, dict): + # Update the existing dictionary without nesting + value = copy.deepcopy(p_value) + else: + value = type(old_attr)(p_value) + setattr(parameters, p_name, value) + + if isinstance(parameters, TrainingArguments): + parameters = json.dumps(parameters.to_dict()) + else: + parameters = json.dumps(parameters.__dict__, cls=SetEncoder) + + return parameters