From 52c9c691bb49326f6710c9b0a978bc618e67ae1e Mon Sep 17 00:00:00 2001 From: Bomme <13520622+Bomme@users.noreply.github.com> Date: Thu, 4 Jan 2024 22:42:02 +0100 Subject: [PATCH 1/3] remove a few more calls to old_div --- apiv2/combined_search_strategies.py | 3 +- .../management/commands/test_color_schemes.py | 7 +- utils/audioprocessing/color_schemes.py | 83 ++++++++++--------- .../freesound_audio_processing.py | 10 +-- utils/audioprocessing/processing.py | 57 ++++--------- utils/audioprocessing/wav2png.py | 5 +- 6 files changed, 71 insertions(+), 94 deletions(-) diff --git a/apiv2/combined_search_strategies.py b/apiv2/combined_search_strategies.py index a42facbc3..ebb11593c 100644 --- a/apiv2/combined_search_strategies.py +++ b/apiv2/combined_search_strategies.py @@ -17,7 +17,6 @@ # Authors: # See AUTHORS file. # -from past.utils import old_div from apiv2.forms import API_SORT_OPTIONS_MAP from utils.similarity_utilities import api_search as similarity_api_search from utils.search import SearchEngineException, get_search_engine @@ -90,7 +89,7 @@ def filter_both(search_form, target_file=None, extra_parameters=None): if search_form.cleaned_data['target'] or target_file: # First search into gaia and then into solr (get all gaia results) gaia_ids, gaia_count, distance_to_target_data, note = get_gaia_results(search_form, target_file, page_size=gaia_page_size, max_pages=gaia_max_pages) - valid_ids_pages = [gaia_ids[i:i+solr_filter_id_block_size] for i in range(0, len(gaia_ids), solr_filter_id_block_size) if (old_div(i,solr_filter_id_block_size)) < solr_filter_id_max_pages] + valid_ids_pages = [gaia_ids[i:i+solr_filter_id_block_size] for i in range(0, len(gaia_ids), solr_filter_id_block_size) if (i / solr_filter_id_block_size) < solr_filter_id_max_pages] solr_ids = list() search_engine = get_search_engine() for valid_ids_page in valid_ids_pages: diff --git a/sounds/management/commands/test_color_schemes.py b/sounds/management/commands/test_color_schemes.py index 0858d6749..db6ce042f 100644 --- a/sounds/management/commands/test_color_schemes.py +++ b/sounds/management/commands/test_color_schemes.py @@ -18,14 +18,15 @@ # See AUTHORS file. # -from past.utils import old_div +import os + from django.conf import settings from django.core.management.base import BaseCommand -from sounds.models import Sound import utils.audioprocessing.processing as audioprocessing +from sounds.models import Sound from utils.audioprocessing import color_schemes -import os + # docker-compose run --rm web python manage.py test_color_schemes 415154,415144,413973,402737,403343,194761 diff --git a/utils/audioprocessing/color_schemes.py b/utils/audioprocessing/color_schemes.py index 65945ba04..5a45962d5 100644 --- a/utils/audioprocessing/color_schemes.py +++ b/utils/audioprocessing/color_schemes.py @@ -1,7 +1,7 @@ -from past.utils import old_div -from PIL import ImageColor from functools import partial +from PIL import ImageColor + def desaturate(rgb, amount): """ @@ -27,80 +27,81 @@ def color_from_value(value): RAINFOREST_COLOR_SCHEME = 'Rainforest' DEFAULT_COLOR_SCHEME_KEY = FREESOUND2_COLOR_SCHEME - COLOR_SCHEMES = { FREESOUND2_COLOR_SCHEME: { 'wave_colors': [ - (0, 0, 0), # Background color - (50, 0, 200), # Low spectral centroid + (0, 0, 0), # Background color + (50, 0, 200), # Low spectral centroid (0, 220, 80), (255, 224, 0), - (255, 70, 0), # High spectral centroid + (255, 70, 0), # High spectral centroid ], 'spec_colors': [ - (0, 0, 0), # Background color - (old_div(58,4), old_div(68,4), old_div(65,4)), - (old_div(80,2), old_div(100,2), old_div(153,2)), + (0, 0, 0), # Background color + (58 // 4, 68 // 4, 65 // 4), + (80 // 2, 100 // 2, 153 // 2), (90, 180, 100), (224, 224, 44), (255, 60, 30), (255, 255, 255) - ], - 'wave_zero_line_alpha': 25, + ], + 'wave_zero_line_alpha': 25, }, OLD_BEASTWHOOSH_COLOR_SCHEME: { 'wave_colors': [ - (255, 255, 255), # Background color - (29, 159, 181), # 1D9FB5, Low spectral centroid - (28, 174, 72), # 1CAE48 - (255, 158, 53), # FF9E35 - (255, 53, 70), # FF3546, High spectral centroid + (255, 255, 255), # Background color + (29, 159, 181), # 1D9FB5, Low spectral centroid + (28, 174, 72), # 1CAE48 + (255, 158, 53), # FF9E35 + (255, 53, 70), # FF3546, High spectral centroid ], 'spec_colors': [ - (0, 0, 0), # Background color/Low spectral energy - (29, 159, 181), # 1D9FB5 - (28, 174, 72), # 1CAE48 - (255, 158, 53), # FF9E35 - (255, 53, 70), # FF3546, High spectral energy - ] + (0, 0, 0), # Background color/Low spectral energy + (29, 159, 181), # 1D9FB5 + (28, 174, 72), # 1CAE48 + (255, 158, 53), # FF9E35 + (255, 53, 70), # FF3546, High spectral energy + ] }, BEASTWHOOSH_COLOR_SCHEME: { 'wave_colors': [ - (20, 20, 36), # Background color (not really used as we use transparent mode) - (29, 159, 181), # Low spectral centroid + (20, 20, 36), # Background color (not really used as we use transparent mode) + (29, 159, 181), # Low spectral centroid (0, 220, 80), (255, 200, 58), - (255, 0, 70), # High spectral centroid + (255, 0, 70), # High spectral centroid ], 'spec_colors': [ - (20, 20, 36), # Low spectral energy + (20, 20, 36), # Low spectral energy (0, 18, 25), (0, 37, 56), - (11, 95, 118), + (11, 95, 118), (29, 159, 181), (0, 220, 80), (255, 200, 58), (255, 125, 0), - (255, 0, 70), - (255, 0, 20), # High spectral energy + (255, 0, 70), + (255, 0, 20), # High spectral energy ], 'wave_transparent_background': True, 'wave_zero_line_alpha': 12, }, CYBERPUNK_COLOR_SCHEME: { - 'wave_colors': [(0, 0, 0)] + [color_from_value(value/29.0) for value in range(0, 30)], - 'spec_colors': [(0, 0, 0)] + [color_from_value(value/29.0) for value in range(0, 30)], + 'wave_colors': [(0, 0, 0)] + [color_from_value(value / 29.0) for value in range(0, 30)], + 'spec_colors': [(0, 0, 0)] + [color_from_value(value / 29.0) for value in range(0, 30)], }, RAINFOREST_COLOR_SCHEME: { - 'wave_colors': [(213, 217, 221)] + list(map(partial(desaturate, amount=0.7), [ - (50, 0, 200), - (0, 220, 80), - (255, 224, 0), - ])), - 'spec_colors': [(213, 217, 221)] + list(map(partial(desaturate, amount=0.7), [ - (50, 0, 200), - (0, 220, 80), - (255, 224, 0), - ])), + 'wave_colors': [(213, 217, 221)] + + list(map(partial(desaturate, amount=0.7), [ + (50, 0, 200), + (0, 220, 80), + (255, 224, 0), + ])), + 'spec_colors': [(213, 217, 221)] + + list(map(partial(desaturate, amount=0.7), [ + (50, 0, 200), + (0, 220, 80), + (255, 224, 0), + ])), } } diff --git a/utils/audioprocessing/freesound_audio_processing.py b/utils/audioprocessing/freesound_audio_processing.py index 82972f9ba..25e2d36d0 100644 --- a/utils/audioprocessing/freesound_audio_processing.py +++ b/utils/audioprocessing/freesound_audio_processing.py @@ -19,22 +19,22 @@ # -from past.utils import old_div import json +import logging import os import signal -import logging import tempfile +from tempfile import TemporaryDirectory +import sentry_sdk from django.apps import apps from django.conf import settings -from . import color_schemes import utils.audioprocessing.processing as audioprocessing from utils.audioprocessing.processing import AudioProcessingException -from tempfile import TemporaryDirectory from utils.mirror_files import copy_previews_to_mirror_locations, copy_displays_to_mirror_locations from utils.sound_upload import get_processing_before_describe_sound_folder +from . import color_schemes console_logger = logging.getLogger("console") @@ -244,7 +244,7 @@ def process(self, skip_previews=False, skip_displays=False, update_sound_process if self.sound.type in settings.LOSSY_FILE_EXTENSIONS: info['bitdepth'] = 0 # mp3 and ogg don't have bitdepth if info['duration'] > 0: - raw_bitrate = int(round(old_div(old_div(self.sound.filesize * 8, info['duration']), 1000))) + raw_bitrate = round((self.sound.filesize * 8 / info['duration']) / 1000) # Here we post-process a bit the bitrate to account for small rounding errors # If we see computed bitrate is very close to a common bitrate, we quantize to that number differences_with_common_bitrates = [abs(cbt - raw_bitrate) for cbt in settings.COMMON_BITRATES] diff --git a/utils/audioprocessing/processing.py b/utils/audioprocessing/processing.py index 8f32a136a..73b09f0a1 100644 --- a/utils/audioprocessing/processing.py +++ b/utils/audioprocessing/processing.py @@ -21,7 +21,6 @@ # -from past.utils import old_div import math import os import re @@ -39,31 +38,6 @@ class AudioProcessingException(Exception): pass -class TestAudioFile: - """A class that mimics pysndfile.PySndfile but generates noise instead of reading - a wave file. Additionally it can be told to have a "broken" header and thus crashing - in the middle of the file. Also useful for testing ultra-short files of 20 samples.""" - - def __init__(self, num_frames, has_broken_header=False): - self.seekpoint = 0 - self.nframes = num_frames - self.samplerate = 44100 - self.channels = 1 - self.has_broken_header = has_broken_header - - def seek(self, seekpoint): - self.seekpoint = seekpoint - - def read_frames(self, frames_to_read): - if self.has_broken_header and self.seekpoint + frames_to_read > old_div(self.num_frames, 2): - raise RuntimeError() - - num_frames_left = self.num_frames - self.seekpoint - will_read = num_frames_left if num_frames_left < frames_to_read else frames_to_read - self.seekpoint += will_read - return numpy.random.random(will_read) * 2 - 1 - - def get_max_level(filename): max_value = 0 buffer_size = 4096 @@ -170,7 +144,7 @@ def read(self, start, size, resize_if_less=False): def spectral_centroid(self, seek_point, spec_range=110.0): """ starting at seek_point read fft_size samples, and calculate the spectral centroid """ - samples = self.read(seek_point - old_div(self.fft_size, 2), self.fft_size, True) + samples = self.read(seek_point - self.fft_size // 2, self.fft_size, True) samples *= self.window fft = numpy.fft.rfft(samples) @@ -178,7 +152,8 @@ def spectral_centroid(self, seek_point, spec_range=110.0): length = numpy.float64(spectrum.shape[0]) # scale the db spectrum from [- spec_range db ... 0 db] > [0..1] - db_spectrum = old_div(((20 * (numpy.log10(spectrum + 1e-60))).clip(-spec_range, 0.0) + spec_range), spec_range) + db_spectrum = ((20 * (numpy.log10(spectrum + 1e-60))).clip(-spec_range, 0.0) + spec_range) + db_spectrum = db_spectrum / spec_range energy = spectrum.sum() spectral_centroid = 0 @@ -189,11 +164,12 @@ def spectral_centroid(self, seek_point, spec_range=110.0): if self.spectrum_range is None: self.spectrum_range = numpy.arange(length) - spectral_centroid = old_div((spectrum * self.spectrum_range).sum(), (energy * (length - 1))) * self.samplerate * 0.5 + spectral_centroid = ((spectrum * self.spectrum_range).sum() / ( + energy * (length - 1))) * self.samplerate * 0.5 # clip > log10 > scale between 0 and 1 - spectral_centroid = old_div((math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - self.lower_log), ( - self.higher_log - self.lower_log)) + spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - self.lower_log) / ( + self.higher_log - self.lower_log) return spectral_centroid, db_spectrum @@ -246,14 +222,14 @@ def peaks(self, start_seek, end_seek): def interpolate_colors(colors, flat=False, num_colors=256): """ given a list of colors, create a larger list of colors interpolating - the first one. If flatten is True a list of numers will be returned. If + the first one. If flatten is True, a list of numbers will be returned. If False, a list of (r,g,b) tuples. num_colors is the number of colors wanted in the final list """ palette = [] for i in range(num_colors): - index = old_div((i * (len(colors) - 1)), (num_colors - 1.0)) + index = (i * (len(colors) - 1)) / (num_colors - 1.0) index_int = int(index) alpha = index - float(index_int) @@ -330,7 +306,7 @@ def draw_anti_aliased_pixels(self, x, y1, y2, color): y_max_int = int(y_max) alpha = y_max - y_max_int - if alpha > 0.0 and alpha < 1.0 and y_max_int + 1 < self.image_height: + if 0.0 < alpha < 1.0 and y_max_int + 1 < self.image_height: if not self.transparent_background: current_pix = self.pix[x, y_max_int + 1] r = int((1 - alpha) * current_pix[0] + alpha * color[0]) @@ -346,7 +322,7 @@ def draw_anti_aliased_pixels(self, x, y1, y2, color): y_min_int = int(y_min) alpha = 1.0 - (y_min - y_min_int) - if alpha > 0.0 and alpha < 1.0 and y_min_int - 1 >= 0: + if 0.0 < alpha < 1.0 and y_min_int - 1 >= 0: if not self.transparent_background: r = int((1 - alpha) * current_pix[0] + alpha * color[0]) g = int((1 - alpha) * current_pix[1] + alpha * color[1]) @@ -361,7 +337,8 @@ def save(self, filename): a = self.color_scheme_to_use.get('wave_zero_line_alpha', 0) if a: for x in range(self.image_width): - self.pix[x, old_div(self.image_height, 2)] = tuple([p + a for p in self.pix[x, old_div(self.image_height, 2)]]) + center = self.image_height // 2 + self.pix[x, center] = tuple([p + a for p in self.pix[x, center]]) self.image.save(filename) @@ -391,10 +368,10 @@ def __init__(self, image_width, image_height, fft_size, color_scheme): y_min = math.log10(f_min) y_max = math.log10(f_max) for y in range(self.image_height): - freq = math.pow(10.0, y_min + old_div(y, (image_height - 1.0)) * (y_max - y_min)) - bin = freq / 22050.0 * (old_div(self.fft_size, 2) + 1) + freq = math.pow(10.0, y_min + y / (image_height - 1.0) * (y_max - y_min)) + bin = freq / 22050.0 * (self.fft_size // 2 + 1) - if bin < old_div(self.fft_size, 2): + if bin < self.fft_size // 2: alpha = bin - int(bin) self.y_to_bin.append((int(bin), alpha * 255)) @@ -440,7 +417,7 @@ def create_wave_images(input_filename, output_filename_w, output_filename_s, ima for x in range(image_width): - if progress_callback and x % (old_div(image_width, 100)) == 0: + if progress_callback and x % (image_width // 100) == 0: progress_callback(x, image_width) seek_point = int(x * samples_per_pixel) diff --git a/utils/audioprocessing/wav2png.py b/utils/audioprocessing/wav2png.py index 4c95af267..ef86f9d7d 100755 --- a/utils/audioprocessing/wav2png.py +++ b/utils/audioprocessing/wav2png.py @@ -21,7 +21,6 @@ # -from past.utils import old_div import argparse from utils.audioprocessing.processing import create_wave_images, AudioProcessingException @@ -29,8 +28,8 @@ def progress_callback(position, width): - percentage = old_div((position*100),width) - if position % (old_div(width, 10)) == 0: + percentage = (position * 100) // width + if position % (width // 10) == 0: sys.stdout.write(str(percentage) + "% ") sys.stdout.flush() From 86c5f4366a5d7d6a5a936b0fea9c474c4e08aa65 Mon Sep 17 00:00:00 2001 From: Bomme <13520622+Bomme@users.noreply.github.com> Date: Thu, 4 Jan 2024 23:09:54 +0100 Subject: [PATCH 2/3] remove future from requirements --- requirements.in | 1 - requirements.txt | 2 -- 2 files changed, 3 deletions(-) diff --git a/requirements.in b/requirements.in index f4541291b..07eab0486 100644 --- a/requirements.in +++ b/requirements.in @@ -28,7 +28,6 @@ djangorestframework==3.13.1 fabric==2.6.0 feedparser~=6.0.10 freezegun==1.2.2 -future~=0.18.2 graypy==0.2.12 gunicorn==21.2.0 ipython==8.14.0 diff --git a/requirements.txt b/requirements.txt index 4f4f2a1b3..e3d7b28a4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -146,8 +146,6 @@ feedparser==6.0.10 # via -r requirements.in freezegun==1.2.2 # via -r requirements.in -future==0.18.3 - # via -r requirements.in gprof2dot==2022.7.29 # via django-silk graypy==0.2.12 From c2763e035a3a02b7b9f44ede1bd0e75f53bb0442 Mon Sep 17 00:00:00 2001 From: Bomme <13520622+Bomme@users.noreply.github.com> Date: Fri, 5 Jan 2024 11:19:29 +0100 Subject: [PATCH 3/3] Revert "remove future from requirements" This reverts commit 86c5f4366a5d7d6a5a936b0fea9c474c4e08aa65. --- requirements.in | 1 + requirements.txt | 2 ++ 2 files changed, 3 insertions(+) diff --git a/requirements.in b/requirements.in index 07eab0486..f4541291b 100644 --- a/requirements.in +++ b/requirements.in @@ -28,6 +28,7 @@ djangorestframework==3.13.1 fabric==2.6.0 feedparser~=6.0.10 freezegun==1.2.2 +future~=0.18.2 graypy==0.2.12 gunicorn==21.2.0 ipython==8.14.0 diff --git a/requirements.txt b/requirements.txt index e3d7b28a4..4f4f2a1b3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -146,6 +146,8 @@ feedparser==6.0.10 # via -r requirements.in freezegun==1.2.2 # via -r requirements.in +future==0.18.3 + # via -r requirements.in gprof2dot==2022.7.29 # via django-silk graypy==0.2.12