Skip to content

Commit

Permalink
Merge branch 'main' into starters-template-compatibility
Browse files Browse the repository at this point in the history
  • Loading branch information
lrcouto authored Nov 4, 2024
2 parents a87df2e + 7a263e9 commit 71fcb69
Show file tree
Hide file tree
Showing 6 changed files with 9 additions and 12 deletions.
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
"""Project pipelines."""
from typing import Dict

from kedro.framework.project import find_pipelines
from kedro.pipeline import Pipeline


def register_pipelines() -> Dict[str, Pipeline]:
def register_pipelines() -> dict[str, Pipeline]:
"""Register the project's pipelines.
Returns:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@
PLEASE DELETE THIS FILE ONCE YOU START WORKING ON YOUR OWN PROJECT!
"""

from typing import Any, Dict
from typing import Any

import pandas as pd


def split_data(data: pd.DataFrame, example_test_data_ratio: float) -> Dict[str, Any]:
def split_data(data: pd.DataFrame, example_test_data_ratio: float) -> dict[str, Any]:
"""Node for splitting the classical Iris data set into training and test
sets, each split into features and labels.
The split ratio parameter is taken from conf/project/parameters.yml.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
Delete this when you start working on your own Kedro project.
"""
import logging
from typing import Any, Dict
from typing import Any

import numpy as np
import pandas as pd


def train_model(
train_x: pd.DataFrame, train_y: pd.DataFrame, parameters: Dict[str, Any]
train_x: pd.DataFrame, train_y: pd.DataFrame, parameters: dict[str, Any]
) -> np.ndarray:
"""Node for training a simple multi-class logistic regression model. The
number of training iterations as well as the learning rate are taken from
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,13 @@
"""

import logging
from typing import Dict, Tuple

import numpy as np
import pandas as pd
from pyspark.sql import DataFrame


def split_data(data: DataFrame, parameters: Dict) -> Tuple:
def split_data(data: DataFrame, parameters: dict) -> tuple:
"""Splits data into features and targets training and test sets.
Args:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
"""Project pipelines."""
from typing import Dict

from kedro.framework.project import find_pipelines
from kedro.pipeline import Pipeline


def register_pipelines() -> Dict[str, Pipeline]:
def register_pipelines() -> dict[str, Pipeline]:
"""Register the project's pipelines.
Returns:
Expand Down
4 changes: 2 additions & 2 deletions features/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
import tempfile
import venv
from pathlib import Path
from typing import Any, Set
from typing import Any

_PATHS_TO_REMOVE: Set[Path] = set()
_PATHS_TO_REMOVE: set[Path] = set()



Expand Down

0 comments on commit 71fcb69

Please sign in to comment.