Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prechecks for asv #2107

Merged
merged 29 commits into from
Jan 15, 2025
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fix versions in benhcmarks.json
Georgi Rusev authored and Georgi Rusev committed Jan 10, 2025

Verified

This commit was signed with the committer’s verified signature. The key has expired.
zakkak Foivos Zakkak
commit 9b684594516c1de3f90d9432834daac9fadd5854
84 changes: 42 additions & 42 deletions python/.asv/results/benchmarks.json
Original file line number Diff line number Diff line change
@@ -531,7 +531,7 @@
"warmup_time": -1
},
"basic_functions.ModificationFunctions.time_append_large": {
"code": "class ModificationFunctions:\n def time_append_large(self, lad: LargeAppendDataModify, rows):\n large: pd.DataFrame = lad.df_append_large[rows].pop()\n self.lib.append(f\"sym\", large)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
"code": "class ModificationFunctions:\n def time_append_large(self, lad: LargeAppendDataModify, rows):\n large: pd.DataFrame = lad.df_append_large[rows].pop(0)\n self.lib.append(f\"sym\", large)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
"min_run_count": 2,
"name": "basic_functions.ModificationFunctions.time_append_large",
"number": 1,
@@ -547,15 +547,15 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "b817d86d1bf76649691197bfaf1261a96a1a34c9a25f053d66f6dfcf14c6f279",
"warmup_time": -1
"version": "2b592208898cce70f9bc92ae629f428d8a4c7e9dc1a3445686c2420888afeeb5",
"warmup_time": 0
},
"basic_functions.ModificationFunctions.time_append_short_wide": {
"code": "class ModificationFunctions:\n def time_append_short_wide(self, lad: LargeAppendDataModify, rows):\n large: pd.DataFrame = lad.df_append_short_wide[rows].pop()\n self.lib_short_wide.append(\"short_wide_sym\", large)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
"code": "class ModificationFunctions:\n def time_append_short_wide(self, lad: LargeAppendDataModify, rows):\n large: pd.DataFrame = lad.df_append_short_wide[rows].pop(0)\n self.lib_short_wide.append(\"short_wide_sym\", large)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
"min_run_count": 2,
"name": "basic_functions.ModificationFunctions.time_append_short_wide",
"number": 1,
@@ -571,12 +571,12 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "3678115ad2d40bf19062212095071431ff63cedc159661ee3056be7cbf109f98",
"warmup_time": -1
"version": "29559b9bb03141c96f325ce1019c95e339620bba07d872e91e78cf07c12e92ee",
"warmup_time": 0
},
"basic_functions.ModificationFunctions.time_append_single": {
"code": "class ModificationFunctions:\n def time_append_single(self, lad: LargeAppendDataModify, rows):\n self.lib.append(f\"sym\", self.df_append_single)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
@@ -595,12 +595,12 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "8f398155deb342c70fe4c65e8da636b1f18c9296632b4649aab8dae306aa8453",
"warmup_time": -1
"warmup_time": 0
},
"basic_functions.ModificationFunctions.time_delete": {
"code": "class ModificationFunctions:\n def time_delete(self, lad: LargeAppendDataModify, rows):\n self.lib.delete(f\"sym\")\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
@@ -619,12 +619,12 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "6d8afae2414e0f842495a7962f5950472814bde20e99eebc474db6953d8e1ae3",
"warmup_time": -1
"warmup_time": 0
},
"basic_functions.ModificationFunctions.time_delete_short_wide": {
"code": "class ModificationFunctions:\n def time_delete_short_wide(self, lad: LargeAppendDataModify, rows):\n self.lib_short_wide.delete(\"short_wide_sym\")\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
@@ -643,12 +643,12 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "f867fc9cac4d0706b01166662af37434100460706d4f6118de0bc2e0e3087bae",
"warmup_time": -1
"warmup_time": 0
},
"basic_functions.ModificationFunctions.time_update_half": {
"code": "class ModificationFunctions:\n def time_update_half(self, lad: LargeAppendDataModify, rows):\n self.lib.update(f\"sym\", self.df_update_half)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
@@ -667,12 +667,12 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "6a011f58b79c483849a70576915c2d56deed1227d38489a21140341ca860ce33",
"warmup_time": -1
"warmup_time": 0
},
"basic_functions.ModificationFunctions.time_update_short_wide": {
"code": "class ModificationFunctions:\n def time_update_short_wide(self, lad: LargeAppendDataModify, rows):\n self.lib_short_wide.update(\"short_wide_sym\", self.df_update_short_wide)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
@@ -691,12 +691,12 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "111496c5bd4a4c498df28819d3cbcd9d699c4d3363ad3969f102a1d2076b3086",
"warmup_time": -1
"warmup_time": 0
},
"basic_functions.ModificationFunctions.time_update_single": {
"code": "class ModificationFunctions:\n def time_update_single(self, lad: LargeAppendDataModify, rows):\n self.lib.update(f\"sym\", self.df_update_single)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
@@ -715,12 +715,12 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "c45c168d5713f3028a9a5b97959d52116c8d228870ad580be06d86336d2476c6",
"warmup_time": -1
"warmup_time": 0
},
"basic_functions.ModificationFunctions.time_update_upsert": {
"code": "class ModificationFunctions:\n def time_update_upsert(self, lad: LargeAppendDataModify, rows):\n self.lib.update(f\"sym\", self.df_update_upsert, upsert=True)\n\n def setup(self, lad: LargeAppendDataModify, rows):\n \n self.df_update_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(0.5, rows))\n self.df_update_half = generate_pseudo_random_dataframe(rows//2, \"s\", get_time_at_fraction_of_df(0.75, rows))\n self.df_update_upsert = generate_pseudo_random_dataframe(rows, \"s\", get_time_at_fraction_of_df(1.5, rows))\n self.df_append_single = generate_pseudo_random_dataframe(1, \"s\", get_time_at_fraction_of_df(1.1, rows))\n \n self.df_update_short_wide = generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n )\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n self.lib = self.ac[get_prewritten_lib_name(rows)]\n self.lib_short_wide = self.ac[get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)]\n\n def setup_cache(self):\n \n self.ac = Arctic(ModificationFunctions.CONNECTION_STRING)\n rows_values = ModificationFunctions.params\n \n self.init_dfs = {rows: generate_pseudo_random_dataframe(rows) for rows in rows_values}\n for rows in rows_values:\n lib_name = get_prewritten_lib_name(rows)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n df = self.init_dfs[rows]\n lib.write(\"sym\", df)\n print(f\"INITIAL DATAFRAME {rows} rows has Index {df.iloc[0].name} - {df.iloc[df.shape[0] - 1].name}\")\n \n lib_name = get_prewritten_lib_name(ModificationFunctions.WIDE_DF_ROWS)\n self.ac.delete_library(lib_name)\n lib = self.ac.create_library(lib_name)\n lib.write(\n \"short_wide_sym\",\n generate_random_floats_dataframe_with_index(\n ModificationFunctions.WIDE_DF_ROWS, ModificationFunctions.WIDE_DF_COLS\n ),\n )\n \n # We use the fact that we're running on LMDB to store a copy of the initial arctic directory.\n # Then on each teardown we restore the initial state by overwriting the modified with the original.\n copytree(ModificationFunctions.ARCTIC_DIR, ModificationFunctions.ARCTIC_DIR_ORIGINAL)\n \n number_iteration = ModificationFunctions.repeat * ModificationFunctions.number * ModificationFunctions.rounds\n \n lad = ModificationFunctions.LargeAppendDataModify(ModificationFunctions.params, number_iteration)\n \n return lad",
@@ -739,12 +739,12 @@
"repeat": 3,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "basic_functions:278",
"setup_cache_key": "basic_functions:280",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "7f139bf03457104abe937914aa3572503ed52330b3a271d82112696060331d8f",
"warmup_time": -1
"warmup_time": 0
},
"bi_benchmarks.BIBenchmarks.peakmem_query_groupby_city_count_all": {
"code": "class BIBenchmarks:\n def peakmem_query_groupby_city_count_all(self, times_bigger) -> pd.DataFrame:\n return self.query_groupby_city_count_all(times_bigger)\n\n def setup(self, num_rows):\n self.ac = Arctic(f\"lmdb://opensource_datasets_{self.lib_name}?map_size=20GB\")\n self.lib = self.ac.get_library(self.lib_name)\n\n def setup_cache(self):\n \n start_time = time.time()\n \n file = os.path.join(Path(__file__).resolve().parent.parent, BIBenchmarks.CITY_BI_FILE2)\n if (not os.path.exists(file)) :\n dfo = download_and_process_city_to_parquet(file)\n dff = pd.read_parquet(file)\n pd.testing.assert_frame_equal(dfo,dff)\n else:\n print(\"Parquet file exists!\")\n \n # read data from bz.2 file\n # abs_path = os.path.join(Path(__file__).resolve().parent.parent,BIBenchmarks.CITY_BI_FILE)\n # self.df : pd.DataFrame = process_city(abs_path)\n \n self.df : pd.DataFrame = pd.read_parquet(file)\n \n self.ac = Arctic(f\"lmdb://opensource_datasets_{self.lib_name}?map_size=20GB\")\n self.ac.delete_library(self.lib_name)\n self.lib = self.ac.create_library(self.lib_name)\n \n print(\"The procedure is creating N times larger dataframes\")\n print(\"by concatenating original DF N times\")\n print(\"Size of original Dataframe: \", self.df.shape[0])\n for num in BIBenchmarks.params:\n _df = pd.concat([self.df] * num)\n print(\"DF for iterration xSize original ready: \", num)\n self.lib.write(f\"{self.symbol}{num}\", _df)\n \n print(\"If pandas query produces different dataframe than arctic one stop tests!\")\n print(\"This will mean query problem is there most likely\")\n \n print(\"Pre-check correctness for query_groupby_city_count_all\")\n _df = self.df.copy(deep=True)\n arctic_df = self.time_query_groupby_city_count_all(BIBenchmarks.params[0])\n _df = get_query_groupby_city_count_all(_df)\n assert_frame_equal(_df, arctic_df)\n \n print(\"Pre-check correctness for query_groupby_city_count_isin_filter\")\n _df = self.df.copy(deep=True)\n arctic_df = self.time_query_groupby_city_count_isin_filter(BIBenchmarks.params[0])\n _df = get_query_groupby_city_count_isin_filter(_df)\n assert_frame_equal(_df, arctic_df)\n \n print(\"Pre-check correctness for query_groupby_city_count_filter_two_aggregations\")\n _df = self.df.copy(deep=True)\n arctic_df = self.time_query_groupby_city_count_filter_two_aggregations(BIBenchmarks.params[0])\n _df = get_query_groupby_city_count_filter_two_aggregations(_df)\n assert_frame_equal(_df, arctic_df)\n \n print(\"All pre-checks completed SUCCESSFULLY. Time: \", time.time() - start_time)\n \n del self.ac",
@@ -758,7 +758,7 @@
"10"
]
],
"setup_cache_key": "bi_benchmarks:68",
"setup_cache_key": "bi_benchmarks:72",
"timeout": 6000,
"type": "peakmemory",
"unit": "bytes",
@@ -776,7 +776,7 @@
"10"
]
],
"setup_cache_key": "bi_benchmarks:68",
"setup_cache_key": "bi_benchmarks:72",
"timeout": 6000,
"type": "peakmemory",
"unit": "bytes",
@@ -794,7 +794,7 @@
"10"
]
],
"setup_cache_key": "bi_benchmarks:68",
"setup_cache_key": "bi_benchmarks:72",
"timeout": 6000,
"type": "peakmemory",
"unit": "bytes",
@@ -812,7 +812,7 @@
"10"
]
],
"setup_cache_key": "bi_benchmarks:68",
"setup_cache_key": "bi_benchmarks:72",
"timeout": 6000,
"type": "peakmemory",
"unit": "bytes",
@@ -835,7 +835,7 @@
"repeat": 0,
"rounds": 2,
"sample_time": 0.01,
"setup_cache_key": "bi_benchmarks:68",
"setup_cache_key": "bi_benchmarks:72",
"timeout": 6000,
"type": "time",
"unit": "seconds",
@@ -859,7 +859,7 @@
"repeat": 0,
"rounds": 2,
"sample_time": 0.01,
"setup_cache_key": "bi_benchmarks:68",
"setup_cache_key": "bi_benchmarks:72",
"timeout": 6000,
"type": "time",
"unit": "seconds",
@@ -883,7 +883,7 @@
"repeat": 0,
"rounds": 2,
"sample_time": 0.01,
"setup_cache_key": "bi_benchmarks:68",
"setup_cache_key": "bi_benchmarks:72",
"timeout": 6000,
"type": "time",
"unit": "seconds",
@@ -907,15 +907,15 @@
"repeat": 0,
"rounds": 2,
"sample_time": 0.01,
"setup_cache_key": "bi_benchmarks:68",
"setup_cache_key": "bi_benchmarks:72",
"timeout": 6000,
"type": "time",
"unit": "seconds",
"version": "c746faf05e4dbb872efa770cbe5ae057dafe3ecc1fb8969d1026db2dee7bfd99",
"warmup_time": -1
},
"finalize_staged_data.FinalizeStagedData.peakmem_finalize_staged_data": {
"code": "class FinalizeStagedData:\n def peakmem_finalize_staged_data(self, cache:CachedDFGenerator, param:int):\n print(\">>> Library:\", self.lib)\n print(\">>> Symbol:\", self.symbol)\n self.lib.finalize_staged_data(self.symbol, mode=StagedDataFinalizeMethod.WRITE)\n\n def setup(self, cache:CachedDFGenerator, param:int):\n cachedDF = cache\n \n # Unfortunately there is no way to tell asv to run single time\n # each of finalize_stage_data() tests if we do the large setup in the\n # setup_cache() method. We can only force it to work with single execution\n # if the symbol setup with stage data is in the setup() method\n \n self.ac = Arctic(f\"lmdb://{self.lib_name}{param}?map_size=40GB\")\n self.ac.delete_library(self.lib_name)\n self.lib = self.ac.create_library(self.lib_name)\n \n INITIAL_TIMESTAMP: TimestampNumber = TimestampNumber(0, cachedDF.TIME_UNIT) # Synchronize index frequency\n \n df = cachedDF.generate_dataframe_timestamp_indexed(200, 0, cachedDF.TIME_UNIT)\n list_of_chunks = [10000] * param\n self.symbol\n \n self.lib.write(self.symbol, data=df, prune_previous_versions=True)\n stage_chunks(self.lib, self.symbol, cachedDF, INITIAL_TIMESTAMP, list_of_chunks)\n\n def setup_cache(self):\n # Generating dataframe with all kind of supported data types\n cachedDF = CachedDFGenerator(350000, [5])\n return cachedDF",
"code": "class FinalizeStagedData:\n def peakmem_finalize_staged_data(self, cache: CachedDFGenerator, param: int):\n print(\">>> Library:\", self.lib)\n print(\">>> Symbol:\", self.symbol)\n self.lib.finalize_staged_data(self.symbol, mode=StagedDataFinalizeMethod.WRITE)\n\n def setup(self, cache: CachedDFGenerator, param: int):\n cachedDF = cache\n \n # Unfortunately there is no way to tell asv to run single time\n # each of finalize_stage_data() tests if we do the large setup in the\n # setup_cache() method. We can only force it to work with single execution\n # if the symbol setup with stage data is in the setup() method\n \n self.ac = Arctic(f\"lmdb://{self.lib_name}{param}?map_size=40GB\")\n self.ac.delete_library(self.lib_name)\n self.lib = self.ac.create_library(self.lib_name)\n \n INITIAL_TIMESTAMP: TimestampNumber = TimestampNumber(\n 0, cachedDF.TIME_UNIT\n ) # Synchronize index frequency\n \n df = cachedDF.generate_dataframe_timestamp_indexed(200, 0, cachedDF.TIME_UNIT)\n list_of_chunks = [10000] * param\n self.symbol\n \n self.lib.write(self.symbol, data=df, prune_previous_versions=True)\n stage_chunks(self.lib, self.symbol, cachedDF, INITIAL_TIMESTAMP, list_of_chunks)\n\n def setup_cache(self):\n # Generating dataframe with all kind of supported data types\n cachedDF = CachedDFGenerator(350000, [5])\n return cachedDF",
"name": "finalize_staged_data.FinalizeStagedData.peakmem_finalize_staged_data",
"param_names": [
"param1"
@@ -926,14 +926,14 @@
"2000"
]
],
"setup_cache_key": "finalize_staged_data:38",
"setup_cache_key": "finalize_staged_data:40",
"timeout": 600,
"type": "peakmemory",
"unit": "bytes",
"version": "9dece3813ff661e5876028ee105ad9549ad62e7997ade7bf3ed4cb43f77854a7"
"version": "9dcfdaf896125a0fe0d16b5538b5a8b556997064e107c8b58b93dc6e6f32d8b1"
},
"finalize_staged_data.FinalizeStagedData.time_finalize_staged_data": {
"code": "class FinalizeStagedData:\n def time_finalize_staged_data(self, cache:CachedDFGenerator, param:int):\n print(\">>> Library:\", self.lib)\n print(\">>> Symbol:\", self.symbol)\n self.lib.finalize_staged_data(self.symbol, mode=StagedDataFinalizeMethod.WRITE)\n\n def setup(self, cache:CachedDFGenerator, param:int):\n cachedDF = cache\n \n # Unfortunately there is no way to tell asv to run single time\n # each of finalize_stage_data() tests if we do the large setup in the\n # setup_cache() method. We can only force it to work with single execution\n # if the symbol setup with stage data is in the setup() method\n \n self.ac = Arctic(f\"lmdb://{self.lib_name}{param}?map_size=40GB\")\n self.ac.delete_library(self.lib_name)\n self.lib = self.ac.create_library(self.lib_name)\n \n INITIAL_TIMESTAMP: TimestampNumber = TimestampNumber(0, cachedDF.TIME_UNIT) # Synchronize index frequency\n \n df = cachedDF.generate_dataframe_timestamp_indexed(200, 0, cachedDF.TIME_UNIT)\n list_of_chunks = [10000] * param\n self.symbol\n \n self.lib.write(self.symbol, data=df, prune_previous_versions=True)\n stage_chunks(self.lib, self.symbol, cachedDF, INITIAL_TIMESTAMP, list_of_chunks)\n\n def setup_cache(self):\n # Generating dataframe with all kind of supported data types\n cachedDF = CachedDFGenerator(350000, [5])\n return cachedDF",
"code": "class FinalizeStagedData:\n def time_finalize_staged_data(self, cache: CachedDFGenerator, param: int):\n print(\">>> Library:\", self.lib)\n print(\">>> Symbol:\", self.symbol)\n self.lib.finalize_staged_data(self.symbol, mode=StagedDataFinalizeMethod.WRITE)\n\n def setup(self, cache: CachedDFGenerator, param: int):\n cachedDF = cache\n \n # Unfortunately there is no way to tell asv to run single time\n # each of finalize_stage_data() tests if we do the large setup in the\n # setup_cache() method. We can only force it to work with single execution\n # if the symbol setup with stage data is in the setup() method\n \n self.ac = Arctic(f\"lmdb://{self.lib_name}{param}?map_size=40GB\")\n self.ac.delete_library(self.lib_name)\n self.lib = self.ac.create_library(self.lib_name)\n \n INITIAL_TIMESTAMP: TimestampNumber = TimestampNumber(\n 0, cachedDF.TIME_UNIT\n ) # Synchronize index frequency\n \n df = cachedDF.generate_dataframe_timestamp_indexed(200, 0, cachedDF.TIME_UNIT)\n list_of_chunks = [10000] * param\n self.symbol\n \n self.lib.write(self.symbol, data=df, prune_previous_versions=True)\n stage_chunks(self.lib, self.symbol, cachedDF, INITIAL_TIMESTAMP, list_of_chunks)\n\n def setup_cache(self):\n # Generating dataframe with all kind of supported data types\n cachedDF = CachedDFGenerator(350000, [5])\n return cachedDF",
"min_run_count": 1,
"name": "finalize_staged_data.FinalizeStagedData.time_finalize_staged_data",
"number": 1,
@@ -949,15 +949,15 @@
"repeat": 1,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "finalize_staged_data:38",
"setup_cache_key": "finalize_staged_data:40",
"timeout": 600,
"type": "time",
"unit": "seconds",
"version": "670c39a4321a96cffa7d92609c5817cd36f3cba1cce9929e9c41e246005a0b62",
"version": "c3c02d1e2369dd420b2e241fc69c4c8872d31da89d0c19c1111d503a84fb9521",
"warmup_time": -1
},
"finalize_staged_data.FinalizeStagedDataWiderDataframeX3.peakmem_finalize_staged_data": {
"code": "class FinalizeStagedDataWiderDataframeX3:\n def peakmem_finalize_staged_data(self, cache:CachedDFGenerator, param:int):\n if (not SLOW_TESTS):\n raise SkipNotImplemented (\"Slow tests are skipped\")\n super().peakmem_finalize_staged_data(cache,param)\n\n def setup(self, cache:CachedDFGenerator, param:int):\n if (not SLOW_TESTS):\n raise SkipNotImplemented (\"Slow tests are skipped\")\n super().setup(cache,param)\n\n def setup_cache(self):\n # Generating dataframe with all kind of supported data type\n cachedDF = CachedDFGenerator(350000, [5, 25, 50]) # 3 times wider DF with bigger string columns\n return cachedDF",
"code": "class FinalizeStagedDataWiderDataframeX3:\n def peakmem_finalize_staged_data(self, cache: CachedDFGenerator, param: int):\n if not SLOW_TESTS:\n raise SkipNotImplemented(\"Slow tests are skipped\")\n super().peakmem_finalize_staged_data(cache, param)\n\n def setup(self, cache: CachedDFGenerator, param: int):\n if not SLOW_TESTS:\n raise SkipNotImplemented(\"Slow tests are skipped\")\n super().setup(cache, param)\n\n def setup_cache(self):\n # Generating dataframe with all kind of supported data type\n cachedDF = CachedDFGenerator(\n 350000, [5, 25, 50]\n ) # 3 times wider DF with bigger string columns\n return cachedDF",
"name": "finalize_staged_data.FinalizeStagedDataWiderDataframeX3.peakmem_finalize_staged_data",
"param_names": [
"param1"
@@ -968,14 +968,14 @@
"2000"
]
],
"setup_cache_key": "finalize_staged_data:82",
"setup_cache_key": "finalize_staged_data:90",
"timeout": 600,
"type": "peakmemory",
"unit": "bytes",
"version": "34e03b8d818b1f727b831791e307ca5dd4a1b434643fa5bc0c98c49a0b455523"
"version": "90cde854b0e3346d50d63ab29182811b92cd7fae6c4ce0be4011a62c534e5e0f"
},
"finalize_staged_data.FinalizeStagedDataWiderDataframeX3.time_finalize_staged_data": {
"code": "class FinalizeStagedDataWiderDataframeX3:\n def time_finalize_staged_data(self, cache:CachedDFGenerator, param:int):\n if (not SLOW_TESTS):\n raise SkipNotImplemented (\"Slow tests are skipped\")\n super().time_finalize_staged_data(cache,param)\n\n def setup(self, cache:CachedDFGenerator, param:int):\n if (not SLOW_TESTS):\n raise SkipNotImplemented (\"Slow tests are skipped\")\n super().setup(cache,param)\n\n def setup_cache(self):\n # Generating dataframe with all kind of supported data type\n cachedDF = CachedDFGenerator(350000, [5, 25, 50]) # 3 times wider DF with bigger string columns\n return cachedDF",
"code": "class FinalizeStagedDataWiderDataframeX3:\n def time_finalize_staged_data(self, cache: CachedDFGenerator, param: int):\n if not SLOW_TESTS:\n raise SkipNotImplemented(\"Slow tests are skipped\")\n super().time_finalize_staged_data(cache, param)\n\n def setup(self, cache: CachedDFGenerator, param: int):\n if not SLOW_TESTS:\n raise SkipNotImplemented(\"Slow tests are skipped\")\n super().setup(cache, param)\n\n def setup_cache(self):\n # Generating dataframe with all kind of supported data type\n cachedDF = CachedDFGenerator(\n 350000, [5, 25, 50]\n ) # 3 times wider DF with bigger string columns\n return cachedDF",
"min_run_count": 1,
"name": "finalize_staged_data.FinalizeStagedDataWiderDataframeX3.time_finalize_staged_data",
"number": 1,
@@ -991,11 +991,11 @@
"repeat": 1,
"rounds": 1,
"sample_time": 0.01,
"setup_cache_key": "finalize_staged_data:82",
"setup_cache_key": "finalize_staged_data:90",
"timeout": 600,
"type": "time",
"unit": "seconds",
"version": "317bc16b3c8e30237836ae85a390b11995fe361c69b31984f0e5d32a344cd571",
"version": "a7673a8f559a07772f7a7a8e105774090534c7eb1b644b2d6247e7b792645809",
"warmup_time": -1
},
"list_functions.ListFunctions.peakmem_list_symbols": {

Unchanged files with check annotations Beta