diff --git a/perun/collect/kperf/parser.py b/perun/collect/kperf/parser.py
index 3ff78b71..b86d5fcb 100755
--- a/perun/collect/kperf/parser.py
+++ b/perun/collect/kperf/parser.py
@@ -8,9 +8,9 @@
 from typing import Any
 
 # Third-Party Imports
-import progressbar
 
 # Perun Imports
+from perun.utils import log
 
 
 def parse_events(perf_events: list[str]) -> list[dict[str, Any]]:
@@ -24,7 +24,7 @@ def parse_events(perf_events: list[str]) -> list[dict[str, Any]]:
     :return: list of resources
     """
     resources = []
-    for event in progressbar.progressbar(perf_events):
+    for event in log.progress(perf_events, description="Parsing Events"):
         if event.strip():
             *record, samples = event.split(" ")
             parts = " ".join(record).split(";")
diff --git a/perun/collect/kperf/run.py b/perun/collect/kperf/run.py
index 62681afb..1cf13cc6 100755
--- a/perun/collect/kperf/run.py
+++ b/perun/collect/kperf/run.py
@@ -10,7 +10,6 @@
 
 # Third-Party Imports
 import click
-import progressbar
 
 # Perun Imports
 from perun.collect.kperf import parser
@@ -92,13 +91,13 @@ def collect(executable: Executable, **kwargs: Any) -> tuple[CollectStatus, str,
     repeats = kwargs["repeat"]
 
     log.minor_info(f"Running {log.highlight(warmups)} warmup iterations")
-    for _ in progressbar.progressbar(range(0, warmups)):
+    for _ in log.progress(range(0, warmups), description="Warmup"):
         run_perf(executable, "warmup", kwargs.get("with_sudo", False))
 
     log.minor_info(f"Running {log.highlight(repeats)} iterations")
     before_time = time.time()
     kwargs["raw_data"] = []
-    for _ in progressbar.progressbar(range(0, repeats)):
+    for _ in log.progress(range(0, repeats), description="Main Run"):
         output = run_perf(executable, "main_run", kwargs.get("with_sudo", False))
         kwargs["raw_data"].extend(output.splitlines())
     kwargs["time"] = time.time() - before_time
diff --git a/perun/collect/time/run.py b/perun/collect/time/run.py
index 89037294..e25814e1 100644
--- a/perun/collect/time/run.py
+++ b/perun/collect/time/run.py
@@ -12,7 +12,6 @@
 
 # Third-Party Imports
 import click
-import progressbar
 
 # Perun Imports
 from perun.logic import runner
@@ -39,7 +38,7 @@ def collect(
     """
     log.major_info("Running time collector")
     log.minor_info("Warming up")
-    for _ in progressbar.progressbar(range(0, warmup)):
+    for __ in log.progress(range(0, warmup), description="Warmup"):
         command = " ".join(["time -p", str(executable)]).split(" ")
         commands.get_stdout_from_external_command(
             command, log_tag="warmup", log_verbosity=log.VERBOSE_RELEASE
@@ -50,7 +49,7 @@ def collect(
     times = []
 
     before_timing = systime.time()
-    for timing in progressbar.progressbar(range(1, repeat + 1)):
+    for timing in log.progress(range(1, repeat + 1), description="Main Run"):
         command = " ".join(["time -p", str(executable)]).split(" ")
         collected_data = commands.get_stdout_from_external_command(
             command, log_tag="main_run", log_verbosity=log.VERBOSE_RELEASE
diff --git a/perun/fuzz/evaluate/by_coverage.py b/perun/fuzz/evaluate/by_coverage.py
index 6030f15e..183160ed 100644
--- a/perun/fuzz/evaluate/by_coverage.py
+++ b/perun/fuzz/evaluate/by_coverage.py
@@ -111,7 +111,7 @@ def get_initial_coverage(
     # run program with each seed
     log.minor_info("Running program with seeds")
     log.increase_indent()
-    for seed in seeds:
+    for seed in log.progress(seeds, description="Running Seeds"):
         prepare_workspace(fuzzing_config.coverage.gcno_path)
 
         command = " ".join([os.path.abspath(executable.cmd), seed.path])
diff --git a/perun/fuzz/evaluate/by_perun.py b/perun/fuzz/evaluate/by_perun.py
index 6fb9e964..b7c34c58 100644
--- a/perun/fuzz/evaluate/by_perun.py
+++ b/perun/fuzz/evaluate/by_perun.py
@@ -15,6 +15,7 @@
 import perun.check.factory as check
 import perun.logic.runner as run
 from perun.utils.structs import PerformanceChange
+from perun.utils import log
 
 if TYPE_CHECKING:
     from perun.fuzz.structs import Mutation
@@ -58,7 +59,7 @@ def baseline_testing(
         )
     )
 
-    for file in seeds[1:]:
+    for file in log.progress(seeds[1:], description="Running Seeds"):
         # target profile
         target_pg = list(
             run.generate_profiles_for(
diff --git a/perun/fuzz/filesystem.py b/perun/fuzz/filesystem.py
index a9641cb3..b9ef9275 100644
--- a/perun/fuzz/filesystem.py
+++ b/perun/fuzz/filesystem.py
@@ -80,7 +80,7 @@ def del_temp_files(
     :param output_dir: path to directory, where fuzzed files are stored
     """
     log.minor_info("Removing mutations")
-    for mutation in progressbar.progressbar(parents):
+    for mutation in log.progress(parents, description="Removing Mutations"):
         if (
             mutation not in fuzz_progress.final_results
             and mutation not in fuzz_progress.hangs
diff --git a/perun/fuzz/interpret.py b/perun/fuzz/interpret.py
index 6bc5c81d..28e151a7 100644
--- a/perun/fuzz/interpret.py
+++ b/perun/fuzz/interpret.py
@@ -8,7 +8,6 @@
 import os
 
 # Third-Party Imports
-import progressbar
 from scipy.stats import mstats
 import matplotlib.pyplot as plt
 
@@ -45,7 +44,7 @@ def save_anomalies(anomalies: list[Mutation], anomaly_type: str, file_handle: Te
     if anomalies:
         log.minor_info(f"Saving {log.highlight(anomaly_type + 's')}")
         file_handle.write(f"{anomaly_type.capitalize()}s:\n")
-        for anomaly in progressbar.progressbar(anomalies):
+        for anomaly in log.progress(anomalies, description="Saving Anomalies"):
             file_handle.write(anomaly.path + " " + str(anomaly.history) + "\n")
         log.newline()
 
@@ -76,7 +75,7 @@ def save_log_files(log_dir: str, fuzz_progress: FuzzingProgress) -> None:
     log.minor_success("Saving coverage time series")
 
     log.minor_info("Saving log files")
-    for mut in progressbar.progressbar(fuzz_progress.parents):
+    for mut in log.progress(fuzz_progress.parents, description="Saving Mutations"):
         results_data_file.write(
             str(mut.fitness)
             + " "
@@ -270,7 +269,7 @@ def files_diff(fuzz_progress: FuzzingProgress, diffs_dir: str) -> None:
     ]:
         if mutations:
             log.minor_info(mutation_type)
-        for res in progressbar.progressbar(mutations):
+        for res in log.progress(mutations, description="Computing Deltas"):
             if res.predecessor is not None:
                 pred = streams.safely_load_file(res.predecessor.path)
                 result = streams.safely_load_file(res.path)
diff --git a/perun/logic/commands.py b/perun/logic/commands.py
index 6345fffd..6f5ada39 100644
--- a/perun/logic/commands.py
+++ b/perun/logic/commands.py
@@ -16,7 +16,6 @@
 import subprocess
 
 # Third-Party Imports
-import progressbar
 
 # Perun Imports
 from perun.logic import pcs, config as perun_config, store, index, temp, stats
@@ -269,7 +268,7 @@ def add(
     """
     perun_log.major_info("Adding profiles")
     added_profile_count = 0
-    for profile_name in profile_names:
+    for profile_name in perun_log.progress(profile_names, description="Adding Profiles"):
         # Test if the given profile exists (This should hold always, or not?)
         reg_rel_path = os.path.relpath(profile_name)
         if not os.path.exists(profile_name):
@@ -1117,7 +1116,7 @@ def get_untracked_profiles() -> list[ProfileInfo]:
             f"{perun_log.highlight(str(len(untracked_list)))} files are not registered in pending index."
         )
         perun_log.minor_info("Refreshing pending index: this might take some time.")
-    for untracked_path in progressbar.progressbar(untracked_list):
+    for untracked_path in perun_log.progress(untracked_list, "Processing Untracked"):
         try:
             real_path = os.path.join(pcs.get_job_directory(), untracked_path)
             time = timestamps.timestamp_to_str(os.stat(real_path).st_mtime)
diff --git a/perun/logic/runner.py b/perun/logic/runner.py
index 28894e5a..c13d6d6e 100644
--- a/perun/logic/runner.py
+++ b/perun/logic/runner.py
@@ -526,7 +526,7 @@ def generate_jobs_on_current_working_dir(
     log.major_info("Running Jobs")
     log.increase_indent()
     job_counter = 1
-    for job_cmd, workloads_per_cmd in job_matrix.items():
+    for job_cmd, workloads_per_cmd in log.progress(job_matrix.items(), "Running Jobs"):
         for workload, jobs_per_workload in workloads_per_cmd.items():
             # Prepare the specification
             generator_spec = workload_generators_specs.get(
diff --git a/perun/profile/convert.py b/perun/profile/convert.py
index 13ba3a47..1ccc1e6c 100644
--- a/perun/profile/convert.py
+++ b/perun/profile/convert.py
@@ -27,6 +27,7 @@
 # Perun Imports
 from perun.postprocess.regression_analysis import transform
 from perun.profile import query
+from perun.utils import log
 from perun.utils.common import common_kit
 
 if TYPE_CHECKING:
@@ -72,7 +73,9 @@ def resources_to_pandas_dataframe(profile: Profile) -> pandas.DataFrame:
     values["snapshots"] = array.array("I")
 
     # All resources at this point should be flat
-    for snapshot, resource in profile.all_resources(flatten_values=True):
+    for snapshot, resource in log.progress(
+        profile.all_resources(flatten_values=True), "Converting To Pandas"
+    ):
         values["snapshots"].append(snapshot)
         for resource_key in resource_keys:
             values[resource_key].append(resource.get(resource_key, numpy.nan))
@@ -95,7 +98,7 @@ def models_to_pandas_dataframe(profile: Profile) -> pandas.DataFrame:
     model_keys = list(query.all_model_fields_of(profile))
     values: dict[str, list[Any]] = {key: [] for key in model_keys}
 
-    for _, model in profile.all_models():
+    for _, model in log.progress(profile.all_models(), description="Converting To Pandas"):
         flattened_resources = dict(list(query.all_items_of(model)))
         for model_key in model_keys:
             values[model_key].append(flattened_resources.get(model_key, numpy.nan))
diff --git a/perun/utils/log.py b/perun/utils/log.py
index 5b7ce81a..4814bfa6 100644
--- a/perun/utils/log.py
+++ b/perun/utils/log.py
@@ -3,7 +3,7 @@
 from __future__ import annotations
 
 # Standard Imports
-from typing import Any, Callable, TYPE_CHECKING, Iterable, Optional, TextIO, Type, NoReturn
+from typing import Any, Callable, TYPE_CHECKING, Iterable, Optional, TextIO, Type, NoReturn, TypeVar
 import builtins
 import collections
 import functools
@@ -18,6 +18,7 @@
 
 # Third-Party Imports
 import numpy as np
+import progressbar
 import termcolor
 
 # Perun Imports
@@ -48,6 +49,9 @@
 LOGGING: bool = False
 COLOR_OUTPUT: bool = True
 CURRENT_INDENT: int = 0
+# Note: We set this to False during testing, since it screws the tests, however,
+# in stdout and real usage we want this to not interleave the output
+REDIRECT_STDOUT_IN_PROGRESS: bool = True
 
 # Enum of verbosity levels
 VERBOSE_DEBUG: int = 2
@@ -57,6 +61,8 @@
 SUPPRESS_WARNINGS: bool = False
 SUPPRESS_PAGING: bool = True
 
+T = TypeVar("T")
+
 
 def increase_indent() -> None:
     """Increases the indent for minor and major steps"""
@@ -847,6 +853,28 @@ def collector_to_command(collector_info: dict[str, Any]) -> str:
     return f"{collector_info['name']} {params}"
 
 
+def progress(collection: Iterable[T], description: str = "") -> Iterable[T]:
+    """Wrapper for printing of any collection
+
+    :param collection: any iterable
+    :param description: tag on the left side of the output of the bar
+    """
+    widgets = [
+        (description + ": ") if description else "",
+        progressbar.Percentage(),
+        " ",
+        progressbar.Bar(),
+        " [",
+        progressbar.Timer(),
+        ", ",
+        progressbar.AdaptiveETA(),
+        "]",
+    ]
+    yield from progressbar.progressbar(
+        collection, redirect_stdout=REDIRECT_STDOUT_IN_PROGRESS, widgets=widgets
+    )
+
+
 class History:
     """Helper with wrapper, which is used when one wants to visualize the version control history
     of the project, printing specific stuff corresponding to a git history
diff --git a/perun/view_diff/datatables/run.py b/perun/view_diff/datatables/run.py
index 46a43218..9878f990 100755
--- a/perun/view_diff/datatables/run.py
+++ b/perun/view_diff/datatables/run.py
@@ -8,8 +8,6 @@
 
 # Third-Party Imports
 import click
-import jinja2
-import progressbar
 
 # Perun Imports
 from perun.templates import factory as templates
@@ -144,7 +142,7 @@ def profile_to_data(
 
     # Convert traces to some trace objects
     trace_info_map = {}
-    for trace in progressbar.progressbar(df["trace"].unique()):
+    for trace in log.progress(df["trace"].unique(), description="Converting Traces"):
         trace_as_list = trace.split(",")
         long_trace = ",".join(
             traces_kit.fold_recursive_calls_in_trace(trace_as_list, generalize=True)
@@ -169,7 +167,7 @@ def process_traces(value: str) -> TraceInfo:
         grouped_df = df.groupby(["uid", "trace"]).agg({aggregation_key: "sum"}).reset_index()
         sorted_df = grouped_df.sort_values(by=aggregation_key, ascending=False)
         amount_sum = df[aggregation_key].sum()
-        for _, row in progressbar.progressbar(sorted_df.iterrows()):
+        for _, row in log.progress(sorted_df.iterrows(), description="Processing Traces"):
             data.append(
                 TableRecord(
                     row["uid"],
diff --git a/perun/view_diff/flamegraph/run.py b/perun/view_diff/flamegraph/run.py
index 987d47e9..d727cb46 100755
--- a/perun/view_diff/flamegraph/run.py
+++ b/perun/view_diff/flamegraph/run.py
@@ -10,7 +10,6 @@
 
 # Third-Party Imports
 import click
-import progressbar
 
 # Perun Imports
 from perun.templates import factory as templates
@@ -132,7 +131,7 @@ def generate_flamegraphs(
     :param width: width of the flame graph
     """
     flamegraphs = []
-    for i, dtype in enumerate(data_types):
+    for i, dtype in log.progress(enumerate(data_types), description="Generating Flamegraphs"):
         try:
             data_type = mapping.from_readable_key(dtype)
             lhs_graph = flamegraph_factory.draw_flame_graph(
@@ -203,7 +202,9 @@ def process_maxima(
     is_inclusive = profile.get("collector_info", {}).get("name") == "kperf"
     counts: dict[str, float] = defaultdict(float)
     max_trace = 0
-    for _, resource in progressbar.progressbar(profile.all_resources()):
+    for _, resource in log.progress(
+        profile.all_resources(), description="Processing Resource Maxima"
+    ):
         max_trace = max(max_trace, len(resource["trace"]) + 1)
         if is_inclusive:
             for key in resource:
diff --git a/perun/view_diff/report/run.py b/perun/view_diff/report/run.py
index ab9a1c28..c1c9fc63 100755
--- a/perun/view_diff/report/run.py
+++ b/perun/view_diff/report/run.py
@@ -24,7 +24,6 @@
 
 # Third-Party Imports
 import click
-import progressbar
 
 # Perun Imports
 from perun.logic import config
@@ -287,7 +286,9 @@ def comma_control(commas_list: list[bool], pos: int) -> str:
 
         output = ["{"]
         commas = [False, False, False]
-        for uid, nodes in progressbar.progressbar(self.uid_to_nodes.items()):
+        for uid, nodes in log.progress(
+            self.uid_to_nodes.items(), description="Converting Nodes To Jinja"
+        ):
             output.extend([comma_control(commas, 0), f"{self.translate_node(uid)}:", "{"])
             commas[1] = False
             for node in nodes:
@@ -494,7 +495,7 @@ def process_traces(
     """
     max_trace = 0
     max_samples: dict[str, float] = defaultdict(float)
-    for _, resource in progressbar.progressbar(profile.all_resources()):
+    for _, resource in log.progress(profile.all_resources(), description="Processing Traces"):
         full_trace = [convert.to_uid(t, Config().minimize) for t in resource["trace"]]
         full_trace.append(convert.to_uid(resource["uid"], Config().minimize))
         trace_len = len(full_trace)
@@ -544,7 +545,9 @@ def generate_trace_stats(graph: Graph) -> dict[str, list[TraceStat]]:
     log.minor_info("Generating stats for traces")
     trace_cache: dict[str, TraceStat] = {}
     trace_counter: int = 0
-    for uid, traces in progressbar.progressbar(graph.uid_to_traces.items()):
+    for uid, traces in log.progress(
+        graph.uid_to_traces.items(), description="Generating Trace Stats"
+    ):
         processed = set()
         for trace in [trace for trace in traces if len(trace) > 1]:
             key = ",".join(trace)
@@ -604,7 +607,9 @@ def generate_selection(graph: Graph, trace_stats: dict[str, list[TraceStat]]) ->
     log.minor_info("Generating selection table")
     trace_stat_cache: dict[str, tuple[str, str, float, float, str]] = {}
     stat_len = len(Stats.all_stats())
-    for uid, nodes in progressbar.progressbar(graph.uid_to_nodes.items()):
+    for uid, nodes in log.progress(
+        graph.uid_to_nodes.items(), description="Generating Selection Rows"
+    ):
         baseline_overall: array.array[float] = array.array("d", [0.0] * stat_len)
         target_overall: array.array[float] = array.array("d", [0.0] * stat_len)
         stats: list[tuple[int, float, float]] = []
diff --git a/perun/view_diff/sankey/run.py b/perun/view_diff/sankey/run.py
index a3a851bf..ffe0204c 100755
--- a/perun/view_diff/sankey/run.py
+++ b/perun/view_diff/sankey/run.py
@@ -21,8 +21,6 @@
 
 # Third-Party Imports
 import click
-import jinja2
-import progressbar
 
 # Perun Imports
 from perun.templates import factory as templates
@@ -108,7 +106,7 @@ class Linkage:
     __slots__ = ["source", "target", "value", "color"]
     source: list[int]
     target: list[int]
-    value: list[int]
+    value: list[float]
     color: list[str]
 
     def __init__(self):
@@ -151,10 +149,10 @@ class SankeyGraph:
     linkage: dict[Literal["split", "merged"], Linkage]
     width: int
     height: int
-    min: int
-    max: int
-    diff: int
-    sum: int
+    min: float
+    max: float
+    diff: float
+    sum: float
 
     def __init__(self, uid: str):
         """Initializes the graph"""
@@ -270,7 +268,7 @@ def process_traces(
     :param profile_type: type of the profile
     :param cfg: configuration of the generation
     """
-    for _, resource in progressbar.progressbar(profile.all_resources()):
+    for _, resource in log.progress(profile.all_resources(), description="Processing Traces"):
         trace_len = len(resource["trace"])
         full_trace = [convert.to_uid(t) for t in resource["trace"]] + [
             convert.to_uid(resource["uid"])
@@ -323,7 +321,7 @@ def create_edge(
     edge_type: Literal["split", "merged"],
     src: int,
     tgt: int,
-    value: int,
+    value: float,
     color: str,
 ) -> None:
     """Creates single edge in the sankey graph
@@ -384,7 +382,7 @@ def minimize_sankey_maps(
     :param sankey_map: map of sankey graphs;
     """
     minimal_sankey_map = {}
-    for uid, sankey_points in progressbar.progressbar(sankey_map.items()):
+    for uid, sankey_points in log.progress(sankey_map.items(), description="Minimizing Sankey Map"):
         id_to_point = {val.id: val for val in sankey_points.values()}
         minimal_sankey_points = {}
         for key in sankey_points.keys():
@@ -413,7 +411,7 @@ def extract_graphs_from_sankey_map(
     """
     sankey_graphs = []
 
-    for uid, sankey_points in progressbar.progressbar(sankey_map.items()):
+    for uid, sankey_points in log.progress(sankey_map.items(), description="Extracting Graphs"):
         graph = SankeyGraph(uid)
         positions = []
 
@@ -469,7 +467,9 @@ def compute_reachable(profile: Profile, reachability_map: dict[str, set[str]]) -
     :param profile: profile with data
     :param reachability_map: map of nodes to their backward or forward reachable nodes
     """
-    for _, resource in progressbar.progressbar(profile.all_resources()):
+    for _, resource in log.progress(
+        profile.all_resources(), description="Computing Reachable Nodes"
+    ):
         full_trace = [convert.to_uid(t) for t in resource["trace"]] + [
             convert.to_uid(resource["uid"])
         ]
diff --git a/tests/conftest.py b/tests/conftest.py
index 6dcb8518..bcaf9bda 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -547,6 +547,8 @@ def setup():
     log.LOGGING = False
     log.CURRENT_INDENT = 0
     log.SUPPRESS_PAGING = True
+    log.REDIRECT_STDOUT_IN_PROGRESS = False
+
     # We disable the metrics by default, since they might slow down tests
     metrics.Metrics.enabled = False
     report.Stats.KnownStatsSet.clear()