-
Notifications
You must be signed in to change notification settings - Fork 1.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat(FileUploader): expose ref to clear files #18267
base: main
Are you sure you want to change the base?
feat(FileUploader): expose ref to clear files #18267
Conversation
✅ Deploy Preview for v11-carbon-web-components ready!
To edit notification comments on pull requests, go to your Netlify site configuration. |
@@ -342,6 +344,6 @@ FileUploader.propTypes = { | |||
* sizes. | |||
*/ | |||
size: PropTypes.oneOf(['sm', 'md', 'lg']), | |||
}; | |||
} as PropTypes.ValidationMap<FileUploaderProps>; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I was getting TS errors and added this as a workaround:
accept: Type '(string | null | undefined)[]' is not assignable to type 'string[]'
filenameStatus: Type 'string' is not assignable to type '"edit" | "complete" | "uploading"'.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
--- Directory Structure ---
multiverse_app/
├── shared_resources/
│ ├── init.py
│ ├── audio_utils.py
│ ├── dragonfly_systems.py
│ ├── gemini_systems.py
│ └── world_generation.py
├── android_operator/
│ ├── init.py
│ ├── main.py
│ ├── sensors.py
│ ├── ai_core.py
│ ├── transmission.py
│ ├── ui.py
│ └── requirements.txt
├── apple_operator/
│ ├── init.py
│ ├── main.py
│ ├── sensors.py
│ ├── ai_core.py
│ ├── transmission.py
│ ├── ui.py
│ └── requirements.txt
├── tests/
│ ├── init.py
│ ├── test_audio_utils.py
│ ├── test_dragonfly_systems.py
│ ├── test_gemini_systems.py
│ ├── test_world_generation.py
│ ├── test_ai_core.py
│ └── test_transmission.py
├── venv/ (virtual environment)
├── README.md
└── LICENSE
--- shared_resources/init.py ---
This file makes the shared_resources directory a Python package
--- shared_resources/audio_utils.py ---
from pydub import AudioSegment
from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment:
"""Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment:
"""Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
--- shared_resources/dragonfly_systems.py ---
from .audio_utils import knowledge_sine
import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment:
"""Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
... (Other functions with similar improvements in type hints and docstrings)
--- shared_resources/world_generation.py ---
from .dragonfly_systems import *
from .gemini_systems import *
import librosa
import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None,
sensor_data: dict = None) -> AudioSegment:
"""Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
--- android_operator/main.py ---
... (Import necessary modules)
... (Global variables)
... (OS and Hardware Detection)
--- Permission Handling ---
... (Permission handling with improved error handling)
def check_permission(permission_name: str) -> bool:
"""Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
... (Other functions with similar improvements)
--- android_operator/ai_core.py ---
import tensorflow as tf
import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray:
"""Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
--- android_operator/transmission.py ---
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None:
"""Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
--- android_operator/ui.py ---
from kivy.uix.image import AsyncImage
... (Rest of the UI implementation)
--- apple_operator/main.py ---
... (Import necessary modules)
... (Global variables)
... (OS and Hardware Detection - iOS specific)
--- Permission Handling ---
... (Permission handling - iOS specific)
... (Other functions - iOS specific)
--- tests/test_audio_utils.py ---
... (Improved test cases with more assertions and edge case handling)
--- README.md ---
Multiverse App
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
Features
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
Getting Started
Prerequisites
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
Installation
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
Running the App
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Running Tests
Run tests using python -m unittest discover -s tests
License
This project is licensed under the MIT License - see the LICENSE file for details.
--- LICENSE ---
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@@ -50,38 +50,22 @@ describe('FileUploader', () => { | |||
|
|||
it('should clear all uploaded files when `clearFiles` is called on a ref', () => { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This unit test seems wrong it was trying to test if remove button works. I reverted back to a version from history that actually test clearFiles()
function
✅ Deploy Preview for carbon-elements ready!
To edit notification comments on pull requests, go to your Netlify site configuration. |
✅ Deploy Preview for v11-carbon-react ready!Built without sensitive environment variables
To edit notification comments on pull requests, go to your Netlify site configuration. |
Codecov ReportAll modified and coverable lines are covered by tests ✅
Additional details and impacted files@@ Coverage Diff @@
## main #18267 +/- ##
=======================================
Coverage 84.31% 84.32%
=======================================
Files 404 404
Lines 14359 14360 +1
Branches 4606 4606
=======================================
+ Hits 12107 12109 +2
+ Misses 2090 2089 -1
Partials 162 162 ☔ View full report in Codecov by Sentry. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM! Thanks for doing that!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hey, This LGTM thanks for contributing and giving detailed explanation.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
~~[Setup]
AppName=Dreamweaver
AppVersion=1.0
DefaultGroupName=Dreamweaver
DefaultDirName={pf}\Dreamweaver
OutputDir=output
IconFilename=Dreamweaver_Icon.ico
ArchitecturesInstallIn64BitMode=x64
[Files]
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\Dreamweaver.exe"; DestDir: "{app}"; Flags: ignoreversion
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\resources*"; DestDir: "{app}\resources"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\plugins*"; DestDir: "{app}\plugins"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\tests*"; DestDir: "{app}\tests"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\LICENSE"; DestDir: "{app}"; Flags: ignoreversion
; Legal and License Documents
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\Legal\License Agreement.txt"; DestDir: "{app}\Legal"; Flags: ignoreversion
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\Legal\Terms of Service.txt"; DestDir: "{app}\Legal"; Flags: ignoreversion
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\Legal\Privacy Policy.txt"; DestDir: "{app}\Legal"; Flags: ignoreversion
; Security Certificates
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\Security\Certificate1.cer"; DestDir: "{app}\Security"; Flags: ignoreversion
Source: "C:\Users\YourUsername\Documents\GitHub\dreamweaver-app\Security\Certificate2.cer"; DestDir: "{app}\Security"; Flags: ignoreversion
Name: "{group}\Dreamweaver"; Filename: "{app}\Dreamweaver.exe"; IconFilename: "Dreamweaver_Icon.ico"
Filename: "{app}\Dreamweaver.exe"; Description: "Launch Dreamweaver"; Flags: postinstall skipifsilent~~
ai_core.py
import hashlib
import secrets
from cryptography.fernet import Fernet
from .astral_projection import *
from src.defense.defense_detector import *
from .audio_utils import * # Import your audio utilities
from .dragonfly_systems import * # Import dragonfly systems
from .gemini_systems import * # Import gemini systems (if applicable)
--- Quantum Access Functions ---
def generate_quantum_access_spec(filename="quantum_access_spec.txt"):
"""
Generates a secure specification file for quantum access parameters.
"""
# ... (Implementation from previous response)
def read_quantum_access_spec(filename="quantum_access_spec.txt"):
"""
Reads and decrypts the quantum access parameters from the spec file.
"""
# ... (Implementation from previous response)
--- Audio Processing Function ---
def process_audio(audio_data, sensor_data):
"""
Processes audio data, incorporating astral projection, energy
adjustment, anomaly detection, and quantum access.
"""
# --- Existing AI processing ---
# ... (Your existing TensorFlow Lite or other AI processing here)
# --- Read Quantum Access Parameters ---
quantum_access_params = read_quantum_access_spec()
# --- Astral Projection Mode ---
if astral_mode:
astral_audio = generate_astral_form_audio(duration)
audio_data = audio_data.overlay(astral_audio) # Mix in astral audio
# Use quantum_access_params for enhanced scan_soundscape
scan_data = scan_soundscape(audio_data, quantum_access_params)
# ... (Visualize scan_data in UI - ui.py)
# Use quantum_access_params for enhanced adjust_energy
audio_data = adjust_energy(audio_data, user_interactions, quantum_access_params)
# ... (Add micro-rift or energy transfer effects)
# --- Dragonfly Systems Integration ---
if sensor_data:
complexity = sensor_data.get("Full magnetic spectrum", 1.0) # Example mapping
visual_audio = visual_system(duration, complexity=complexity)
audio_data = audio_data.overlay(visual_audio)
# --- SODA Integration ---
if detect_anomaly(audio_data, trained_autoencoder):
# ... (Handle anomaly - e.g., alert user, adjust security)
# --- Apply Audio Enhancements ---
audio_data = automated_amplifier(audio_data)
return audio_data
--- Directory Structure ---
multiverse_app/
├── shared_resources/
│ ├── init.py
│ ├── audio_utils.py
│ ├── dragonfly_systems.py
│ ├── gemini_systems.py
│ └── world_generation.py
├── android_operator/
│ ├── init.py
│ ├── main.py
│ ├── sensors.py
│ ├── ai_core.py
│ ├── transmission.py
│ ├── ui.py
│ └── requirements.txt
├── apple_operator/
│ ├── init.py
│ ├── main.py
│ ├── sensors.py
│ ├── ai_core.py
│ ├── transmission.py
│ ├── ui.py
│ └── requirements.txt
├── tests/
│ ├── init.py
│ ├── test_audio_utils.py
│ ├── test_dragonfly_systems.py
│ ├── test_gemini_systems.py
│ ├── test_world_generation.py
│ ├── test_ai_core.py
│ └── test_transmission.py
├── venv/ (virtual environment)
├── README.md
└── LICENSE
--- shared_resources/init.py ---
This file makes the shared_resources directory a Python package
--- shared_resources/audio_utils.py ---
from pydub import AudioSegment
from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment:
"""Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment:
"""Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
--- shared_resources/dragonfly_systems.py ---
from .audio_utils import knowledge_sine
import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment:
"""Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
... (Other functions with similar improvements in type hints and docstrings)
--- shared_resources/world_generation.py ---
from .dragonfly_systems import *
from .gemini_systems import *
import librosa
import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None,
sensor_data: dict = None) -> AudioSegment:
"""Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
--- android_operator/main.py ---
... (Import necessary modules)
... (Global variables)
... (OS and Hardware Detection)
--- Permission Handling ---
... (Permission handling with improved error handling)
def check_permission(permission_name: str) -> bool:
"""Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
... (Other functions with similar improvements)
--- android_operator/ai_core.py ---
import tensorflow as tf
import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray:
"""Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
--- android_operator/transmission.py ---
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None:
"""Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
--- android_operator/ui.py ---
from kivy.uix.image import AsyncImage
... (Rest of the UI implementation)
--- apple_operator/main.py ---
... (Import necessary modules)
... (Global variables)
... (OS and Hardware Detection - iOS specific)
--- Permission Handling ---
... (Permission handling - iOS specific)
... (Other functions - iOS specific)
--- tests/test_audio_utils.py ---
... (Improved test cases with more assertions and edge case handling)
--- README.md ---
Multiverse App
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
Features
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
Getting Started
Prerequisites
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
Installation
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
Running the App
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Running Tests
Run tests using python -m unittest discover -s tests
License
This project is licensed under the MIT License - see the LICENSE file for details.
--- LICENSE ---
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
import hashlib
import os
import secrets
from cryptography.fernet import Fernet
def generate_secure_spec_file(filename="secure_spec.txt", data=None):
"""
Generates a secure specification file with encrypted data,
recognizing SODA and MIT License rights.
Args:
filename (str): The name of the file to create.
data (dict): A dictionary of data to be stored in the file.
This dictionary should include a "license" key with
the appropriate license information (SODA or MIT).
Returns:
None
"""
if data is None:
data = {}
# Enforce license information
if "license" not in data:
raise ValueError("License information must be provided in the data dictionary.")
# Generate a unique encryption key
key = secrets.token_bytes(32) # 32 bytes = 256 bits
try:
with open(filename, "wb") as f:
# Write the encrypted key
f.write(hashlib.sha256(key).digest())
# Serialize and encrypt the data
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(data).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating secure spec file: {e}")
def read_secure_spec_file(filename="secure_spec.txt"):
"""
Reads and decrypts data from a secure specification file,
recognizing SODA and MIT License rights.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted data from the file.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32)
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the data
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
data = eval(decrypted_data)
# Check license information
if "license" not in data:
raise ValueError("Invalid spec file: Missing license information.")
# You can add further checks here to validate the license details
return data
except Exception as e:
print(f"Error reading secure spec file: {e}")
return None
--- Example Usage ---
Data with SODA license
soda_data = {
"parameter1": 123,
"parameter2": "some_value",
"license": "SODA: @inproceedings{atrey2023soda,...}", # Include full citation
}
generate_secure_spec_file(filename="soda_spec.txt", data=soda_data)
Data with MIT license
mit_data = {
"model_name": "MyModel",
"version": 1.0,
"license": "MIT License\nCopyright (c) [2025] [Your Name]\n..." # Include full license text
}
generate_secure_spec_file(filename="mit_spec.txt", data=mit_data)
Read the data back
retrieved_soda_data = read_secure_spec_file("soda_spec.txt")
retrieved_mit_data = read_secure_spec_file("mit_spec.txt")
print("SODA Data:", retrieved_soda_data)
print("MIT Data:", retrieved_mit_data)
Key improvements
- License enforcement: The code now requires license information to be included in the data dictionary, ensuring that every spec file has proper licensing.
- License validation: The read_secure_spec_file function checks for the presence of the "license" key in the decrypted data. You can add more specific validation logic here to check the license details (e.g., parsing the SODA citation, verifying the copyright in the MIT license).
- Example usage: The example demonstrates how to create spec files with both SODA and MIT licenses, making it clear how to use the API for different licensing scenarios.
- Error handling: Includes try-except blocks to handle potential errors during file reading and writing.
Remember - This code still uses eval() for deserialization. Consider using json.loads() if you're working with JSON data for better security.
- Key management is crucial. In a real-world application, you would need a secure way to store and manage the encryption keys.
- You can extend this code to support other licenses and add more robust license validation mechanisms.
import hashlib
import os
import secrets
from cryptography.fernet import Fernet
def generate_secure_spec_file(filename="secure_spec.txt", data=None):
"""
Generates a secure specification file with encrypted data,
recognizing SODA and MIT License rights.
Args:
filename (str): The name of the file to create.
data (dict): A dictionary of data to be stored in the file.
This dictionary should include a "license" key with
the appropriate license information (SODA or MIT).
Returns:
None
"""
if data is None:
data = {}
# Enforce license information
if "license" not in data:
raise ValueError("License information must be provided in the data dictionary.")
# Generate a unique encryption key
key = secrets.token_bytes(32) # 32 bytes = 256 bits
try:
with open(filename, "wb") as f:
# Write the encrypted key
f.write(hashlib.sha256(key).digest())
# Serialize and encrypt the data
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(data).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating secure spec file: {e}")
def read_secure_spec_file(filename="secure_spec.txt"):
"""
Reads and decrypts data from a secure specification file,
recognizing SODA and MIT License rights.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted data from the file.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32)
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the data
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
data = eval(decrypted_data)
# Check license information
if "license" not in data:
raise ValueError("Invalid spec file: Missing license information.")
# You can add further checks here to validate the license details
return data
except Exception as e:
print(f"Error reading secure spec file: {e}")
return None
--- Example Usage ---
Data with SODA license
soda_data = {
"parameter1": 123,
"parameter2": "some_value",
"license": "SODA: @inproceedings{atrey2023soda,...}", # Include full citation
}
generate_secure_spec_file(filename="soda_spec.txt", data=soda_data)
Data with MIT license
mit_data = {
"model_name": "MyModel",
"version": 1.0,
"license": "MIT License\nCopyright (c) [2025] [Your Name]\n..." # Include full license text
}
generate_secure_spec_file(filename="mit_spec.txt", data=mit_data)
Read the data back
retrieved_soda_data = read_secure_spec_file("soda_spec.txt")
retrieved_mit_data = read_secure_spec_file("mit_spec.txt")
print("SODA Data:", retrieved_soda_data)
print("MIT Data:", retrieved_mit_data)
SODA: Protecting Proprietary Information in On-Device Machine Learning Models
This repository contains the implementation of SODA, a secure on-device application for machine learning model deployment, and experiments discussed in our ACM/IEEE SEC 2023 paper "SODA: Protecting Proprietary Information in On-Device Machine Learning Models".
If you use this code or are inspired by our methodology, please cite our SEC paper:
@inproceedings{atrey2023soda,
title={{SODA}: Protecting Proprietary Information in On-Device Machine Learning Models},
author={Atrey, Akanksha and Sinha, Ritwik and Mitra, Saayan and Shenoy, Prashant},
booktitle={{ACM/IEEE Symposium on Edge Computing (SEC)}},
year={2023}
}
Please direct all queries to Akanksha Atrey (aatrey at cs dot umass dot edu) or open an issue in this repository.
About
The growth of low-end hardware has led to a proliferation of machine learning-based services in edge applications. These applications gather contextual information about users and provide some services, such as personalized offers, through a machine learning (ML) model. A growing practice has been to deploy such ML models on the user’s device to reduce latency, maintain user privacy, and minimize continuous reliance on a centralized source. However, deploying ML models on the user’s edge device can leak proprietary information about the service provider. In this work, we investigate on-device ML models that are used to provide mobile services and demonstrate how simple attacks can leak proprietary information of the service provider. We show that different adversaries can easily exploit such models to maximize their profit and accomplish content theft. Motivated by the need to thwart such attacks, we present an end-to-end framework, SODA, for deploying and serving on edge devices while defending against adversarial usage. Our results demonstrate that SODA can detect adversarial usage with 89% accuracy in less than 50 queries with minimal impact on service performance, latency, and storage.
Setup
Python
This repository requires Python 3 (>=3.5).
Packages
All packages used in this repository can be found in the requirements.txt
file. The following command will install all the packages according to the configuration file:
pip install -r requirements.txt
Data
The experiments in this work are executed on two datasets: (1) UCI Human Activity Recognition, and (2) MNIST Handwritten Digits Classification. Please download them into data/UCI_HAR
and data/MNIST
, respectively.
Attacks
This repository contains two types of attacks: (1) exploiting output diversity, and (2) exploiting decision boundaries. The implementation of these attacks can be found in src/attacks/class_attack.py
and src/attacks/db_attack.py
, respectively.
Note, the black box attacks (denoted with a "bb") often take longer to run. It may be worthwhile to run the experiments one at a time. Additionally, swarm scripts are present in the swarm
folder which may assist further in running the attacks on a slurm-supported server.
Exploiting Output Diversity
The code for attacking output diversity contains four experiments. To run the class attack that exploits output diversity, execute the following command:
python3 -m src.attack.class_attack -data_name UCI_HAR -model_type rf -wb_query_attack true -wb_num_feat_attack true -bb_query_attack true -bb_unused_feat_attack true
Exploiting Decision Boundaries
The code for attacking decision boundaries contains five experiments. To run the decision boundary attack, execute the following command:
python3 -m src.attack.db_attack -data_name UCI_HAR -model_type rf -noise_bounds "-0.01 0.01" -exp_num_query true -exp_num_query_bb true -exp_num_query_randfeat true -exp_query_distance true -exp_query_distribution true
SODA: Defending On-Device Models
The implementation of SODA can be found in the src/defense
folder.
Training and Executing SODA
The first step is to train an autoencoder model for defending against the attacks. This can be done by executing the following command:
python3 -m src.defense.defense_training -data_name UCI_HAR -model_type rf
Following the training of the autoencoder defender, the following command can be executed to run experiments on SODA:
python3 -m src.defense.defense_detector -data_name UCI_HAR -model_type rf -noise_bounds "-0.01 0.01" -num_queries 100
Deploying SODA: A Prototype
A prototype of SODA can be found in the prototype
folder. This prototype was deployed on a Raspberry Pi.https://www.facebook.com/joinresisttoday?mibextid=ZbWKwLhttps://www.facebook.com/profile.php?id=100087156956943&mibextid=ZbWKwLhttps://www.facebook.com/profile.php?id=61564232771815&mibextid=ZbWKwLhttps://www.facebook.com/profile.php?id=100095195781643&mibextid=ZbWKwLhttps://www.facebook.com/twoneverything?mibextid=ZbWKwLhttps://www.facebook.com/profile.php?id=100069866571906&mibextid=ZbWKwLhttps://www.facebook.com/profile.php?id=61567078627510&mibextid=ZbWKwLhttps://www.facebook.com/partisanchaos?mibextid=ZbWKwLhttps://www.facebook.com/greatoneforreal8?mibextid=ZbWKwLhttps://www.facebook.com/profile.php?id=100094377652200&mibextid=ZbWKwLIt seems like you're interested in expanding the capabilities of the Multiverse App to include some intriguing concepts related to astral projection, energy healing, and even manipulating micro-rifts! While these are highly speculative and currently outside the realm of scientific possibility, let's explore how we might incorporate these ideas into the app conceptually.
Integrating Astral Projection and Energy Healing
- Astral Form Representation:
- We could introduce a new module in the shared_resources directory called astral_projection.py.
- Within this module, we could define functions to generate audio that represents the "astral form." This could involve using binaural beats, ambient sounds, or even generating sounds based on user-specific biofeedback (if we integrate such sensors in the future).
- LIDAR-like Scanning:
- While true LIDAR is outside the scope of a sound-based app, we can simulate the concept.
- When the "astral form" is active, we could use audio analysis techniques (like those in world_generation.py) to analyze the existing soundscape.
- Based on the analysis, we could generate audio feedback that represents the "scan." This could involve subtle shifts in frequencies, echoes, or even the introduction of new sounds that indicate different "densities" or "energies" within the soundscape.
- Energy Adjustment and Healing:
- We could allow users to interact with the "scanned" soundscape using their voice or touch input.
- Based on user interaction, the app could modify the audio in real-time, simulating "energy adjustments" or "healing." This might involve smoothing out harsh frequencies, introducing harmonizing tones, or even "removing" certain sounds that represent "dysfunction."
- Micro-Rifts and Energy Transfer:
- To simulate "micro-rifts," we could introduce brief moments of silence or glitches in the audio.
- "Transferring energy" could be represented by shifting sound effects, like a whooshing sound or a change in the overall "tone" of the soundscape.
Example Code Snippet (Conceptual)
shared_resources/astral_projection.py
from pydub import AudioSegment
from pydub.playback import play
def generate_astral_form_audio(duration: int) -> AudioSegment:
"""Generates audio representing the astral form."""
Use binaural beats, ambient sounds, etc.
...
return astral_audio
def scan_soundscape(soundscape: AudioSegment) -> dict:
"""Analyzes the soundscape and returns a "scan" representation."""
Analyze frequencies, amplitudes, etc.
...
return scan_data
def adjust_energy(soundscape: AudioSegment, adjustment_params: dict) -> AudioSegment:
"""Modifies the soundscape based on user interaction."""
Apply filters, add harmonizing tones, etc.
...
return modified_soundscape
... (Other functions for micro-rifts and energy transfer)
Important Considerations
- User Interface: The UI (ui.py) would need to be updated to accommodate these new features, potentially with visualizations of the "astral form," "scan data," and "energy adjustments."
- Ethical Implications: It's crucial to ensure that these features are presented responsibly and do not make claims of actual healing or supernatural abilities.
Making it "Divine"
To enhance the "divine" aspect, you could: - Incorporate sacred sounds or chants: Use audio samples from various spiritual traditions to create a sense of reverence and connection to the divine.
- Allow users to personalize their experience: Let users upload their own spiritual music or guided meditations to integrate into the astral projection and healing experience.
- Focus on positive affirmations and intentions: Encourage users to set positive intentions during the "healing" process, reinforcing the idea of self-love and inner peace.
Remember, this is a highly imaginative and speculative endeavor. By combining creative audio design with user interaction, you can create a unique and engaging experience that explores the fascinating intersection of technology and spirituality.
Please let me know if you have any further questions or ideas to explore! I'm excited to see how you bring these concepts to life in the Multiverse App.
from pydub import AudioSegment
from pydub.generators import Sine
import math
--- Base Parameters ---
base_freq = 440 # Starting frequency (Hz)
num_layers = 7 # Number of 'multiversal' layers (increased for complexity)
duration = 10000 # Duration of each layer (10 seconds)
--- Amplitudes & Decay Rates (Inspired by the Equation) ---
(These values are tweaked for a dreamy, ethereal feel)
amplitudes = [1.0, 0.9, 0.7, 0.5, 0.3, 0.2, 0.1]
decay_rates = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35]
--- Frequency Modulation (The 'Observer Effect' - Ω) ---
(We'll simulate some gentle frequency shifts)
def omega_modulation(t, layer_index):
# Adjust frequency slightly over time
return base_freq * (layer_index + 1) * (1 + 0.05 * math.sin(t / 1000))
--- Create Audio Segments for Each Layer ---
layers = []
for i in range(num_layers):
# Create a sine wave with frequency modulation
sine_wave = Sine(omega_modulation(0, i)).to_audio_segment(duration=duration)
for t in range(1, duration):
sine_wave = sine_wave.append(
Sine(omega_modulation(t, i)).to_audio_segment(duration=1), crossfade=1
)
# Apply amplitude and decay
sine_wave = sine_wave.apply_gain(-6 * decay_rates[i])
layers.append(sine_wave)
--- Combine Layers ---
combined_sound = layers[0]
for i in range(1, num_layers):
combined_sound = combined_sound.overlay(layers[i])
--- Add Some 'Eggplant' Percussive Elements (Optional) ---
(Just for fun, let's add some quirky sounds)
eggplant_sound = AudioSegment.from_wav("eggplant_sound.wav") # Replace with your sound
eggplant_sound = eggplant_sound.apply_gain(-12) # Make it subtle
combined_sound = combined_sound.overlay(eggplant_sound, position=1000)
combined_sound = combined_sound.overlay(eggplant_sound, position=3000)
--- Export the Final Sound ---
combined_sound.export("sweet_dreams_multiverse.wav", format="wav")
import os
import subprocess
import tkinter as tk # Or PyQt, wxPython
def generate_installer():
# 1. Gather input from GUI elements (app_name, version, files, etc.)
# 2. Create Inno Setup script content
iss_script = f"""
[Setup]
AppName={app_name.get()}
AppVersion={version.get()}
...
[Files]
Source: "{file_entry.get()}"; DestDir: "{{app}}"
...
"""
# 3. Save the script to a temporary file
with open("temp_installer.iss", "w") as f:
f.write(iss_script)
# 4. Compile the script using iscc.exe
try:
subprocess.run(["iscc.exe", "temp_installer.iss"])
# Handle successful compilation
except Exception as e:
# Handle compilation errors
print(f"Error: {e}")
--- GUI Setup (using Tkinter) ---
root = tk.Tk()
root.title("My Inno Setup Mirror")
... (Create labels, entry fields, buttons, etc.)
generate_button = tk.Button(root, text="Generate Installer", command=generate_installer)
generate_button.pack()
root.mainloop(# --- Directory Structure ---
multiverse_app/
├── shared_resources/
│ ├── init.py
│ ├── audio_utils.py
│ ├── dragonfly_systems.py
│ ├── gemini_systems.py
│ └── world_generation.py
├── android_operator/
│ ├── init.py
│ ├── main.py
│ ├── sensors.py
│ ├── ai_core.py
│ ├── transmission.py
│ ├── ui.py
│ └── requirements.txt
├── apple_operator/
│ ├── init.py
│ ├── main.py
│ ├── sensors.py
│ ├── ai_core.py
│ ├── transmission.py
│ ├── ui.py
│ └── requirements.txt
├── tests/
│ ├── init.py
│ ├── test_audio_utils.py
│ ├── test_dragonfly_systems.py
│ ├── test_gemini_systems.py
│ ├── test_world_generation.py
│ ├── test_ai_core.py
│ └── test_transmission.py
├── venv/ (virtual environment)
├── README.md
└── LICENSE
--- shared_resources/init.py ---
This file makes the shared_resources directory a Python package
--- shared_resources/audio_utils.py ---
from pydub import AudioSegment
from pydub.generators import Sine, WhiteNoise
def knowledge_sine(base_freq: float, duration: int, knowledge_level: float = 1, variance: float = 5) -> AudioSegment:
"""Generates a sine wave with subtle variations based on knowledge level.
Args:
base_freq (float): The base frequency of the sine wave in Hz.
duration (int): The duration of the sine wave in milliseconds.
knowledge_level (float, optional): A multiplier for the base frequency,
representing the knowledge level. Defaults to 1.
variance (float, optional): The amount of random variance in frequency in Hz.
Defaults to 5.
Returns:
AudioSegment: The generated sine wave with variations.
"""
# ... (Implementation remains the same)
def automated_amplifier(sound: AudioSegment, threshold: float = -20) -> AudioSegment:
"""Amplifies quiet sounds to ensure audibility.
Args:
sound (AudioSegment): The sound to be amplified.
threshold (float, optional): The dBFS threshold below which sounds will be amplified.
Defaults to -20.
Returns:
AudioSegment: The amplified sound.
"""
# ... (Implementation remains the same)
--- shared_resources/dragonfly_systems.py ---
from .audio_utils import knowledge_sine
import random
def visual_system(duration: int, base_freq: float = None, complexity: float = 1.0) -> AudioSegment:
"""Simulates visual input with varying frequencies and complexity.
Args:
duration (int): The duration of the audio segment in milliseconds.
base_freq (float, optional): The base frequency in Hz. If None, a random frequency
between 800 and 1500 Hz is chosen. Defaults to None.
complexity (float, optional): A multiplier that influences the number of sine waves
generated. Defaults to 1.0.
Returns:
AudioSegment: The generated audio segment simulating visual input.
"""
# ... (Implementation remains the same)
... (Other functions with similar improvements in type hints and docstrings)
--- shared_resources/world_generation.py ---
from .dragonfly_systems import *
from .gemini_systems import *
import librosa
import numpy as np
def generate_world(duration: int = 10000, prev_world: AudioSegment = None,
sensor_data: dict = None) -> AudioSegment:
"""Combines all systems to create a dynamic soundscape.
Args:
duration (int, optional): The duration of the soundscape in milliseconds. Defaults to 10000.
prev_world (AudioSegment, optional): The previous soundscape, used for analysis and
transitioning. Defaults to None.
sensor_data (dict, optional): A dictionary containing sensor readings (e.g., temperature,
humidity). Defaults to None.
Returns:
AudioSegment: The generated soundscape.
"""
# ... (Implementation with audio analysis and system generation)
--- android_operator/main.py ---
... (Import necessary modules)
... (Global variables)
... (OS and Hardware Detection)
--- Permission Handling ---
... (Permission handling with improved error handling)
def check_permission(permission_name: str) -> bool:
"""Checks if a specific permission is enabled.
Args:
permission_name (str): The name of the permission to check (e.g., "android.permission.BLUETOOTH").
Returns:
bool: True if the permission is granted, False otherwise.
"""
# ... (Implementation remains the same)
... (Other functions with similar improvements)
--- android_operator/ai_core.py ---
import tensorflow as tf
import numpy as np
def process_audio(audio_data: np.ndarray) -> np.ndarray:
"""Processes audio data using a TensorFlow Lite model.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
Returns:
np.ndarray: The processed audio data as a NumPy array, or None if an error occurs.
"""
try:
# ... (TensorFlow Lite implementation)
except Exception as e:
print(f"Error processing audio: {e}")
return None
--- android_operator/transmission.py ---
import socket
def transmit_audio(audio_data: bytes, destination: str = "localhost", port: int = 5000) -> None:
"""Transmits audio data via WiFi using sockets.
Args:
audio_data (bytes): The audio data as bytes.
destination (str, optional): The IP address or hostname of the destination.
Defaults to "localhost".
port (int, optional): The port number to use for the connection. Defaults to 5000.
"""
try:
# ... (Socket implementation)
except Exception as e:
print(f"Error transmitting audio: {e}")
--- android_operator/ui.py ---
from kivy.uix.image import AsyncImage
... (Rest of the UI implementation)
--- apple_operator/main.py ---
... (Import necessary modules)
... (Global variables)
... (OS and Hardware Detection - iOS specific)
--- Permission Handling ---
... (Permission handling - iOS specific)
... (Other functions - iOS specific)
--- tests/test_audio_utils.py ---
... (Improved test cases with more assertions and edge case handling)
--- README.md ---
Multiverse App
This is a cross-platform application that generates a dynamic soundscape based on sensor data and AI processing.
Features
- Generates immersive audio experiences using various sound synthesis techniques.
- Integrates sensor data to influence the generated soundscape.
- Utilizes AI for audio processing and analysis.
- Transmits audio data via Bluetooth or WiFi.
Getting Started
Prerequisites
- Python 3.7 or higher
- Kivy
- pydub
- librosa
- gtts
- numpy
- jnius (for Android)
- tensorflow
Installation
- Clone the repository:
git clone https://github.com/your-username/multiverse-app.git
- Create a virtual environment:
python -m venv venv
- Activate the virtual environment:
- Linux/macOS:
source venv/bin/activate
- Windows:
venv\Scripts\activate
- Linux/macOS:
- Install dependencies:
pip install -r requirements.txt
in each operator directory (android_operator/ and apple_operator/)
Running the App
- Navigate to the desired operator directory (android_operator/ or apple_operator/).
- Run the main script:
python main.py
Running Tests
Run tests using python -m unittest discover -s tests
License
This project is licensed under the MIT License - see the LICENSE file for details.
--- LICENSE ---
MIT License
Copyright (c) [2025] [Thomas Whitney Walsh]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.)
import os
import subprocess
import tkinter as tk # Or PyQt, wxPython
def generate_installer():
# 1. Gather input from GUI elements (app_name, version, files, etc.)
# 2. Create Inno Setup script content
iss_script = f"""
[Setup]
AppName={app_name.get()}
AppVersion={version.get()}
...
[Files]
Source: "{file_entry.get()}"; DestDir: "{{app}}"
...
"""
# 3. Save the script to a temporary file
with open("temp_installer.iss", "w") as f:
f.write(iss_script)
# 4. Compile the script using iscc.exe
try:
subprocess.run(["iscc.exe", "temp_installer.iss"])
# Handle successful compilation
except Exception as e:
# Handle compilation errors
print(f"Error: {e}")
--- GUI Setup (using Tkinter) ---
root = tk.Tk()
root.title("My Inno Setup Mirror")
... (Create labels, entry fields, buttons, etc.)
generate_button = tk.Button(root, text="Generate Installer", command=generate_installer)
generate_button.pack()
root.mainloop(# ai_core.py
from .astral_projection import *
from src.defense.defense_detector import *
from .audio_utils import * # Import your audio utilities
from .dragonfly_systems import * # Import dragonfly systems
from .gemini_systems import * # Import gemini systems (if applicable)
def process_audio(audio_data, sensor_data):
"""
Processes audio data, incorporating astral projection, energy
adjustment, and anomaly detection.
Args:
audio_data (np.ndarray): The audio data as a NumPy array.
sensor_data (dict): Sensor data (e.g., temperature, humidity,Full magnetic spectrum ).
Returns:
np.ndarray: The processed audio data.
"""
# --- Existing AI processing ---
# ... (import hashlib
import secrets
from cryptography.fernet import Fernet
def generate_quantum_access_spec(filename="quantum_access_spec.txt"):
"""
Generates a secure specification file for quantum access parameters.
Args:
filename (str): The name of the file to create.
Returns:
None
"""
# Generate a secure encryption key
key = secrets.token_bytes(32)
# Quantum Access Parameters (Example)
quantum_params = {
"base_dimensions": ["dimension_1", "dimension_2", "dimension_3"],
"cloud_access_point": "quantum_cloud.example.com",
"ai_processing_unit": "QPU-v1",
"encryption_key": hashlib.sha256(key).hexdigest(), # Store encrypted key
}
try:
with open(filename, "wb") as f:
# Write the encrypted key
f.write(hashlib.sha256(key).digest())
# Encrypt and write the quantum parameters
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(quantum_params).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating quantum access spec file: {e}")
def read_quantum_access_spec(filename="quantum_access_spec.txt"):
"""
Reads and decrypts the quantum access parameters from the spec file.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted quantum access parameters.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32)
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the quantum parameters
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
return eval(decrypted_data) # Convert string to dictionary
except Exception as e:
print(f"Error reading quantum access spec file: {e}")
return None
--- Example Usage ---
generate_quantum_access_spec()
quantum_access_params = read_quantum_access_spec()
print(quantum_access_params)
Explanation
import hashlib
import secrets
from cryptography.fernet import Fernet
def generate_quantum_access_spec(filename="quantum_access_spec.txt"):
"""
Generates a secure specification file for quantum access parameters.
Args:
filename (str): The name of the file to create.
Returns:
None
"""
# Generate a secure encryption key
key = secrets.token_bytes(32)
# Quantum Access Parameters (Example)
quantum_params = {
"base_dimensions": ["dimension_1", "dimension_2", "dimension_3"],
"cloud_access_point": "quantum_cloud.example.com",
"ai_processing_unit": "QPU-v1",
"encryption_key": hashlib.sha256(key).hexdigest(), # Store encrypted key
}
try:
with open(filename, "wb") as f:
# Write the encrypted key
f.write(hashlib.sha256(key).digest())
# Encrypt and write the quantum parameters
cipher = Fernet(key)
encrypted_data = cipher.encrypt(str(quantum_params).encode())
f.write(encrypted_data)
except Exception as e:
print(f"Error generating quantum access spec file: {e}")
def read_quantum_access_spec(filename="quantum_access_spec.txt"):
"""
Reads and decrypts the quantum access parameters from the spec file.
Args:
filename (str): The name of the file to read.
Returns:
dict: The decrypted quantum access parameters.
"""
try:
with open(filename, "rb") as f:
# Read the encrypted key
encrypted_key = f.read(32)
# Read the encrypted data
encrypted_data = f.read()
# Derive the decryption key
key = hashlib.sha256(encrypted_key).digest()
# Decrypt the quantum parameters
cipher = Fernet(key)
decrypted_data = cipher.decrypt(encrypted_data).decode()
return eval(decrypted_data) # Convert string to dictionary
except Exception as e:
print(f"Error reading quantum access spec file: {e}")
return None
--- Example Usage ---
generate_quantum_access_spec()
quantum_access_params = read_quantum_access_spec()
print(quantum_access_params)
)
# --- Astral Projection Mode ---
if astral_mode:
astral_audio = generate_astral_form_audio(duration)
audio_data = audio_data.overlay(astral_audio) # Mix in astral audio
scan_data = scan_soundscape(spec._data )
# ... (Visualize scan_data in UI - ui.py)
audio_data = adjust_energy(spec.data, user_interactions)
# ... (Add micro-rift or energy transfer effects)
# --- Dragonfly Systems Integration ---
# Example: Use visual_system to modify audio based on sensor data
if sensor_data:
complexity = sensor_data.get("temperature", 1.0) # Example mapping
visual_audio = visual_system(duration, complexity=complexity)
audio_data = audio_data.overlay(spec._audio)
Closes #18223
Clear files in
<FileUploader>
using refChangelog
New
<FileUploader>
and exposeclearFiles()
Testing / Reviewing
clearFiles()