diff --git a/app/utils/result_class.py b/app/utils/result_class.py index ee9d128..096c71e 100644 --- a/app/utils/result_class.py +++ b/app/utils/result_class.py @@ -121,6 +121,8 @@ def populate_row(df_name, leader, res_as, res_t, res_h, res_mj, res_ag, s): return new_row_1, new_row_2, new_row_pipelines, new_row_options_end + +# Function for adding the 'date' column in each dataframe def add_date(dfs, date): for df in dfs: df.insert(0, 'date', date) diff --git a/app/webapp/frontend.py b/app/webapp/frontend.py index 2ad1ed5..0bfe948 100644 --- a/app/webapp/frontend.py +++ b/app/webapp/frontend.py @@ -20,7 +20,7 @@ # Definition of the sidebar sidebar = html.Div( [ - html.H2("AutoML BenchMark", className="display-4"), + html.H2("AutoML Benchmark", className="display-4"), html.Hr(), html.P( "Choose an Options", className="lead" @@ -29,10 +29,10 @@ [ dbc.NavLink("Home", href="/", active="exact"), dbc.NavLink("OpenML Benchmark", href="/openml", active="exact"), - dbc.NavLink("Kaggle BenchMark", href="/kaggle", active="exact"), - dbc.NavLink("Test BenchMark", href="/test", active="exact"), - dbc.NavLink("Past Results OpenML", href="/results-openml", active="exact"), - dbc.NavLink("Past Results Kaggle", href="/results-kaggle", active="exact"), + dbc.NavLink("Kaggle Benchmark", href="/kaggle", active="exact"), + dbc.NavLink("Test Benchmark", href="/test", active="exact"), + dbc.NavLink("Past OpenML Benchmark", href="/results-openml", active="exact"), + dbc.NavLink("Past Kaggle Benchmark", href="/results-kaggle", active="exact"), ], vertical=True, pills=True, diff --git a/app/webapp/utils.py b/app/webapp/utils.py index fcad5a7..bc7a1c5 100644 --- a/app/webapp/utils.py +++ b/app/webapp/utils.py @@ -108,7 +108,7 @@ def get_dfs_from_timestamp(timestamp, type_bench): # Return a list of lists of d df.append(pd.read_csv('./results/'+ type_bench +'/'+ts+'/options_start.csv')) dfs.append(df) - return dfs # da 0 a 8 + return dfs # Function for managing the display of tables and graphs of the benchmarks @@ -166,11 +166,13 @@ def get_dfs_to_compare(dfs_class, dfs_reg, options_start, type, all_list): else: reg = [None] time_limit = (pd.read_csv('./results/'+ type +'/'+past_bench+'/options_start.csv')).iloc[0].to_list() - # Check if the benchmark is comparable to the one selected initially + #if collections.Counter(cls) == collections.Counter(dfs_class) and collections.Counter(reg) == collections.Counter(dfs_reg) and collections.Counter(pip) != collections.Counter(options_end): # Ora ho messo che posso comparare dei benchmark aventi gli stessi dataframe ma con start time limit diiferenti, conmfronti effettuati tutti sul primo benchmark scelto # Devo decidere se implementare il confronto tra tutti quelli selezionati + + # Check if the benchmark is comparable to the one selected initially if collections.Counter(cls) == collections.Counter(dfs_class) and collections.Counter(reg) == collections.Counter(dfs_reg) and collections.Counter(time_limit) != collections.Counter(options_start): dfs_comapre.append({'label': past_bench, 'value': past_bench}) return dfs_comapre diff --git a/makefile b/makefile index c7f2827..584f97e 100644 --- a/makefile +++ b/makefile @@ -1,7 +1,6 @@ install: pip3 install -Ur requirements.txt pip3 install -q -U git+https://github.com/mljar/mljar-supervised.git@master - pip3 uninstall bokeh -y remove: pip3 uninstall -r requirements.txt