Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Oct 16, 2024
1 parent 6ff37e9 commit 0280849
Show file tree
Hide file tree
Showing 8 changed files with 16 additions and 48 deletions.
4 changes: 1 addition & 3 deletions src/datetime_parsing_csv/python/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@ def modify_fields(item: dict, i: int) -> dict:
new_item: dict[str, Any] = dict()
new_item["id"] = i
new_item["name"] = item["name"]
new_item["dob"] = (
datetime.strptime(item["dob"], "%m-%d-%Y").date() if item.get("dob") else None
)
new_item["dob"] = datetime.strptime(item["dob"], "%m-%d-%Y").date() if item.get("dob") else None
new_item["age"] = item["age"]
new_item["isMarried"] = item["isMarried"]
new_item["city"] = item["city"]
Expand Down
8 changes: 2 additions & 6 deletions src/intro/python/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,7 @@ def run5() -> None:
persons = [Person("Aiko", 41), Person("Rohan", 18)]
sorted_by_age = sorted(persons, key=lambda person: person.age)
youngest_person = sorted_by_age[0]
print(
f"{youngest_person.name} is the youngest person at {youngest_person.age} years old"
)
print(f"{youngest_person.name} is the youngest person at {youngest_person.age} years old")
"""
Rohan is the youngest person at 18 years old
"""
Expand Down Expand Up @@ -119,9 +117,7 @@ def run7() -> None:
"""
persons = [Person("Issa", 39), Person("Ibrahim", 26)]
persons_born_after_1995 = [
(person.name, person.age)
for person in persons
if approx_year_of_birth(person) > 1995
(person.name, person.age) for person in persons if approx_year_of_birth(person) > 1995
]
print(f"Persons born after 1995: {persons_born_after_1995}")
"""
Expand Down
25 changes: 6 additions & 19 deletions src/meilisearch_with_cli/python/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,25 +18,18 @@
console = Console()


async def preform_indexing(
data: List[dict[str, Any]], index_name: str, wait: bool
) -> None:
async def preform_indexing(data: List[dict[str, Any]], index_name: str, wait: bool) -> None:
async with AsyncClient(MEILISEARCH_URL, MEILISEARCH_API_KEY) as client:
index = client.index(index_name)
tasks = await index.add_documents_in_batches(data)
if wait:
waits = [
client.wait_for_task(task.task_uid, timeout_in_ms=None)
for task in tasks
]
waits = [client.wait_for_task(task.task_uid, timeout_in_ms=None) for task in tasks]
await asyncio.gather(*waits)


@app.command()
def create_index(
index_name: str = Option(
"wine", "-i", "--index-name", help="The name to use for the index"
),
index_name: str = Option("wine", "-i", "--index-name", help="The name to use for the index"),
) -> None:
client = Client(MEILISEARCH_URL, MEILISEARCH_API_KEY)
client.create_index(
Expand Down Expand Up @@ -79,12 +72,8 @@ def index_data(
dir_okay=False,
help="Path to the data file",
),
index_name: str = Option(
"wine", "-i", "--index-name", help="The name to use for the index"
),
wait: bool = Option(
False, "-w", "--wait", help="Wait for the data to finish indexing"
),
index_name: str = Option("wine", "-i", "--index-name", help="The name to use for the index"),
wait: bool = Option(False, "-w", "--wait", help="Wait for the data to finish indexing"),
) -> None:
if data_path:
data = srsly.read_gzip_jsonl(data_path)
Expand All @@ -101,9 +90,7 @@ def search(
query: str = Argument(..., help="The search to preform"),
limit: int = Option(20, "-l", "--limit", help="Limit the number of search results"),
sort: List[str] = Option(None, "-s", "--sort", help="Sort order for the results"),
index_name: str = Option(
"wine", "-i", "--index-name", help="The name to use for the index"
),
index_name: str = Option("wine", "-i", "--index-name", help="The name to use for the index"),
) -> None:
client = Client(MEILISEARCH_URL, MEILISEARCH_API_KEY)
index = client.index(index_name)
Expand Down
4 changes: 1 addition & 3 deletions src/meilisearch_with_cli/python/test_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,7 @@ def test_index_data(client, tmp_path, wine_data, index_name):
index = client.create_index(index_name)
file_path = tmp_path / "data.jsonl.gz"
srsly.write_gzip_jsonl(file_path, wine_data)
CliRunner().invoke(
app, ["index-data", "-d", str(file_path), "-w", "-i", index_name]
)
CliRunner().invoke(app, ["index-data", "-d", str(file_path), "-w", "-i", index_name])
documents = index.get_documents()
assert documents.total == 5

Expand Down
4 changes: 1 addition & 3 deletions src/mock_data/python/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,7 @@ def get_locations(filename: Path) -> list[Location]:
return locations


def generate_fake_persons(
faker: Faker, locations: list[Location], num: int
) -> list[Person]:
def generate_fake_persons(faker: Faker, locations: list[Location], num: int) -> list[Person]:
# Generate fake persons with the desired structure and return a list of mappings
profiles = []
for i in range(1, num + 1):
Expand Down
4 changes: 1 addition & 3 deletions src/parallelism/python/analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@ def get_result() -> pl.DataFrame:
articles2 = pl.read_csv("../data/articles2_processed.csv")
articles3 = pl.read_csv("../data/articles3_processed.csv")
# Combine the data into a single DataFrame
result = (
pl.concat([articles1, articles2, articles3]).unique(subset=["id"]).sort("id")
)
result = pl.concat([articles1, articles2, articles3]).unique(subset=["id"]).sort("id")
print(f"Number of articles: {result.height}")
return result

Expand Down
11 changes: 3 additions & 8 deletions src/parallelism/python/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,7 @@ def _clean_text(self, text: str) -> str:
"ll": " will",
}
# Replace contractions with full words
formatted_text = re.sub(
r"([’'])(s|d|ll)", lambda x: suffix_mapping[x.group(2)], text_lower
)
formatted_text = re.sub(r"([’'])(s|d|ll)", lambda x: suffix_mapping[x.group(2)], text_lower)
# Remove non-alphabetic characters
result = re.sub(r"[^a-zA-Z\s]", "", formatted_text)
return result
Expand All @@ -63,9 +61,7 @@ def calculate_counts(self, data: JsonBlob) -> JsonBlob:
text = data["content"]
result = self._clean_text(text)
tokens = result.split()
data["num_male_pronouns"], data["num_female_pronouns"] = (
count_gendered_pronouns(tokens)
)
data["num_male_pronouns"], data["num_female_pronouns"] = count_gendered_pronouns(tokens)
data.pop("content")
return data

Expand Down Expand Up @@ -105,8 +101,7 @@ def write_results(data: list[JsonBlob], file_path: Path, file_name: str) -> None
def main(file_path: Path, batch_size: int) -> None:
# Get all .csv files in the directory
files = [
Path(f"../data/{file}")
for file in ("articles1.csv", "articles2.csv", "articles3.csv")
Path(f"../data/{file}") for file in ("articles1.csv", "articles2.csv", "articles3.csv")
]
processor = BatchProcessor(batch_size)
for input_file in files:
Expand Down
4 changes: 1 addition & 3 deletions src/regex_json/python/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@ def run() -> list[dict[str, Any]]:
raise ValueError("No data found")

for company in data:
annual_revenue_lower, annual_revenue_upper = calculate_range(
company["annual_revenue"]
)
annual_revenue_lower, annual_revenue_upper = calculate_range(company["annual_revenue"])
# Append to existing dict
company["annual_revenue_lower"] = annual_revenue_lower
company["annual_revenue_upper"] = annual_revenue_upper
Expand Down

0 comments on commit 0280849

Please sign in to comment.