Skip to content

Commit

Permalink
bug fixes, security additions
Browse files Browse the repository at this point in the history
  • Loading branch information
tbraun96 committed Nov 30, 2020
1 parent 2bae1dd commit fac7e9c
Show file tree
Hide file tree
Showing 54 changed files with 774 additions and 1,384 deletions.
7 changes: 7 additions & 0 deletions hyxe_crypt/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,13 @@ bitvec = "0.17.4"
env_logger = "0.7.1"
parking_lot = "0.11.0"
bigdecimal = "0.2.0"
zeroize = "1.1.1"

[target.'cfg(not(target_os = "windows"))'.dependencies]
libc = "0.2.80"

[target.'cfg(target_os = "windows")'.dependencies]
kernel32-sys = "0.2.2"

[dev-dependencies]
criterion = "*"
9 changes: 6 additions & 3 deletions hyxe_crypt/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ pub mod prelude {
pub use crate::random::HyperRandom;
pub use crate::misc::CryptError;
pub use crate::drill_algebra::PacketVector;
pub use crate::sec_string::SecString;
pub use crate::sec_bytes::SecBuffer;
pub use ::async_trait::async_trait;
pub use ez_pqcrypto::{PostQuantumContainer, algorithm_dictionary};
pub use zerocopy::{ByteSlice, ByteSliceMut};
Expand Down Expand Up @@ -56,6 +58,7 @@ pub mod aes_gcm;

/// Error type
pub mod misc;

/// If debug is on, then instead of spending time downloading new bytes each time, a thread-local rng is executed to fill the empty byte array. TURN OFF for production
pub const DEBUG: bool = true;
/// A secure mutable string type
pub mod sec_string;
///
pub mod sec_bytes;
49 changes: 36 additions & 13 deletions hyxe_crypt/src/misc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use byteorder::ByteOrder;
use crate::drill_update::DrillUpdateObject;
use rand::{Rng, thread_rng};
use std::fmt::Formatter;
//use std::os::raw::c_void;
use std::os::raw::c_void;
use rand::prelude::SliceRandom;

/// Default Error type for this crate
Expand Down Expand Up @@ -177,33 +177,56 @@ pub(crate) fn get_indices<Rnd: Rng>(count: u8, rng: &mut Rnd) -> ([usize; E_OF_X
(idx_outer, idx_inner)
}

/*
#[cfg(any(target_os = "macos", target_os = "linux"))]

#[cfg(not(target_os = "windows"))]
#[allow(unused_results)]
/// Locks-down the memory location, preventing it from being read until unlocked
/// For linux, returns zero if successful
pub unsafe fn mlock(ptr: *const u8, len: usize) {
assert_eq!(libc::mlock(ptr as *const c_void, len), 0);
libc::mlock(ptr as *const c_void, len);
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
libc::madvise(ptr as *mut c_void, len, libc::MADV_NOCORE);
#[cfg(target_os = "linux")]
libc::madvise(ptr as *mut c_void, len, libc::MADV_DONTDUMP);
}

#[cfg(any(target_os = "windows"))]
#[cfg(target_os = "windows")]
#[allow(unused_results)]
/// Locks-down the memory location, preventing it from being read until unlocked
/// For windows, returns nonzero if successful
pub unsafe fn mlock(ptr: *const u8, len: usize) {
assert_ne!(kernel32::VirtualLock(ptr as *mut c_void, len as u64), 0);
kernel32::VirtualLock(ptr as *mut c_void, len as u64);
}

#[cfg(any(target_os = "macos", target_os = "linux"))]
#[cfg(not(target_os = "windows"))]
#[allow(unused_results)]
/// Locks-down the memory location, preventing it from being read until unlocked
/// For linux, returns zero if successful
pub unsafe fn munlock(ptr: *const u8, len: usize) {
assert_eq!(libc::munlock(ptr as *const c_void, len), 0);
libc::munlock(ptr as *const c_void, len);
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
libc::madvise(ptr as *mut c_void, len, libc::MADV_CORE);
#[cfg(target_os = "linux")]
libc::madvise(ptr as *mut c_void, len, libc::MADV_DODUMP);
}

#[cfg(any(target_os = "windows"))]
#[cfg(target_os = "windows")]
#[allow(unused_results)]
/// Locks-down the memory location, preventing it from being read until unlocked
/// For windows, returns nonzero if successful. Returns 158 if already unlocked.
/// Windows unlocks a page all at once. Why wouldn't they just make VirtualUnlockAll?
/// Windows sucks!
/// Windows unlocks a page all at once
pub unsafe fn munlock(ptr: *const u8, len: usize) {
let _ = kernel32::VirtualUnlock(ptr as *mut c_void, len as u64);
}*/
kernel32::VirtualUnlock(ptr as *mut c_void, len as u64);
}

/// General `memset`.
#[inline(never)]
unsafe fn memset(s: *mut u8, c: u8, n: usize) {
core::intrinsics::volatile_set_memory(s, c, n);
}

/// General `memzero`.
#[inline]
pub unsafe fn zeroize(dest: *const u8, n: usize) {
memset(dest as *mut u8, 0, n);
}
51 changes: 6 additions & 45 deletions hyxe_crypt/src/net/crypt_splitter.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use std::collections::HashMap;
use std::ops::{Deref, DerefMut, Range};
use std::ops::Range;
use std::time::{Duration, Instant};

use bitvec::vec::BitVec;
Expand All @@ -14,6 +14,7 @@ use crate::drill_algebra::{generate_packet_coordinates_inv, generate_packet_vect
use crate::prelude::{CryptError, PostQuantumContainer, SecurityLevel};
use rayon::prelude::*;
use rayon::iter::IndexedParallelIterator;
use std::sync::Arc;

/// The maximum bytes per group
pub const MAX_BYTES_PER_GROUP: usize = 1024 * 1024 * 10;
Expand Down Expand Up @@ -72,8 +73,7 @@ pub struct PacketCoordinate {

/// header_size_bytes: This size (in bytes) of each packet's header
#[allow(unused_results)]
pub fn scramble_encrypt_group<T: AsRef<[u8]>>(plain_text: T, security_level: SecurityLevel, drill: &Drill, quantum_container: &PostQuantumContainer, header_size_bytes: usize, target_cid: u64, object_id: u32, group_id: u64, header_inscriber: impl Fn(&PacketVector, &Drill, u32, u64, &mut BytesMut) + Send + Sync) -> Result<GroupSenderDevice, CryptError<String>> {
let quantum_container = AssertSendSyncSafe::wrap(quantum_container);
pub fn scramble_encrypt_group<T: AsRef<[u8]>>(plain_text: T, security_level: SecurityLevel, drill: &Drill, quantum_container: &Arc<PostQuantumContainer>, header_size_bytes: usize, target_cid: u64, object_id: u32, group_id: u64, header_inscriber: impl Fn(&PacketVector, &Drill, u32, u64, &mut BytesMut) + Send + Sync) -> Result<GroupSenderDevice, CryptError<String>> {
let plain_text = plain_text.as_ref();
let max_packet_payload_size = get_max_packet_size(MAX_WAVEFORM_PACKET_SIZE, security_level);
let max_packets_per_wave = drill.get_multiport_width();
Expand Down Expand Up @@ -118,7 +118,7 @@ pub fn scramble_encrypt_group<T: AsRef<[u8]>>(plain_text: T, security_level: Sec
}

let packets = plain_text.chunks(max_plaintext_bytes_per_wave).enumerate().map(|(wave_idx, bytes_to_encrypt_for_this_wave)| {
let mut packets = drill.aes_gcm_encrypt(calculate_nonce_version(wave_idx, group_id), quantum_container.for_ref(), bytes_to_encrypt_for_this_wave).unwrap()
let mut packets = drill.aes_gcm_encrypt(calculate_nonce_version(wave_idx, group_id), quantum_container, bytes_to_encrypt_for_this_wave).unwrap()
.chunks(max_packet_payload_size).enumerate().map(|(relative_packet_idx, ciphertext_packet_bytes)| {
debug_assert_ne!(ciphertext_packet_bytes.len(), 0);
let mut packet = BytesMut::with_capacity(ciphertext_packet_bytes.len() + header_size_bytes);
Expand All @@ -139,49 +139,10 @@ pub fn scramble_encrypt_group<T: AsRef<[u8]>>(plain_text: T, security_level: Sec
Ok(GroupSenderDevice::new(group_receiver_config, packets))
}

/// Asserts an entity is safe to share and send between threads
pub struct AssertSendSyncSafe<T> {
inner: T
}

unsafe impl<T> Send for AssertSendSyncSafe<T> {}
unsafe impl<T> Sync for AssertSendSyncSafe<T> {}

impl<T> Deref for AssertSendSyncSafe<T> {
type Target = T;

fn deref(&self) -> &Self::Target {
&self.inner
}
}

impl<T> DerefMut for AssertSendSyncSafe<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}

impl<T> AssertSendSyncSafe<T> {
/// Wraps the entity T
pub fn wrap(inner: T) -> Self {
Self { inner }
}

/// Obtains a ref of the object
pub fn for_ref(&self) -> &T {
&self.inner
}
/// Obtains a mut ref of the object
pub fn for_mut(&mut self) -> &mut T {
&mut self.inner
}
}

/// header_size_bytes: This size (in bytes) of each packet's header
/// the feed order into the header_inscriber is first the target_cid, and then the object ID
#[allow(unused_results)]
pub fn par_scramble_encrypt_group<T: AsRef<[u8]>>(plain_text: T, security_level: SecurityLevel, drill: &Drill, quantum_container: &PostQuantumContainer, header_size_bytes: usize, target_cid: u64, object_id: u32, group_id: u64, header_inscriber: impl Fn(&PacketVector, &Drill, u32, u64, &mut BytesMut) + Send + Sync) -> Result<GroupSenderDevice, CryptError<String>> {
let quantum_container = AssertSendSyncSafe::wrap(quantum_container);
pub fn par_scramble_encrypt_group<T: AsRef<[u8]>>(plain_text: T, security_level: SecurityLevel, drill: &Drill, quantum_container: &Arc<PostQuantumContainer>, header_size_bytes: usize, target_cid: u64, object_id: u32, group_id: u64, header_inscriber: impl Fn(&PacketVector, &Drill, u32, u64, &mut BytesMut) + Send + Sync) -> Result<GroupSenderDevice, CryptError<String>> {
let plain_text = plain_text.as_ref();
let max_packet_payload_size = get_max_packet_size(1024*8, security_level);
let max_packets_per_wave = drill.get_multiport_width();
Expand Down Expand Up @@ -226,7 +187,7 @@ pub fn par_scramble_encrypt_group<T: AsRef<[u8]>>(plain_text: T, security_level:
}

let packets = plain_text.par_chunks(max_plaintext_bytes_per_wave).enumerate().map(|(wave_idx, bytes_to_encrypt_for_this_wave)| {
let mut packets = drill.aes_gcm_encrypt(calculate_nonce_version(wave_idx, group_id), quantum_container.for_ref(), bytes_to_encrypt_for_this_wave).unwrap()
let mut packets = drill.aes_gcm_encrypt(calculate_nonce_version(wave_idx, group_id), quantum_container, bytes_to_encrypt_for_this_wave).unwrap()
.chunks(max_packet_payload_size).enumerate().map(|(relative_packet_idx, ciphertext_packet_bytes)| {
debug_assert_ne!(ciphertext_packet_bytes.len(), 0);
let mut packet = BytesMut::with_capacity(ciphertext_packet_bytes.len() + header_size_bytes);
Expand Down
64 changes: 64 additions & 0 deletions hyxe_crypt/src/sec_bytes.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
use crate::sec_string::SecString;

/// A memory-secure wrapper for shipping around Bytes
#[derive(Clone)]
pub struct SecBuffer {
inner: Vec<u8>
}

impl SecBuffer {
/// Creates a new SecBytes container
pub fn new() -> Self {
Self::from(Vec::new())
}

/// Returns the inner element without dropping the memory
pub fn into_buffer(mut self) -> Vec<u8> {
self.unlock();
std::mem::take(&mut self.inner)
}

/// returns the length of the buffer
pub fn len(&self) -> usize {
self.inner.len()
}

fn lock(&self) {
unsafe { crate::misc::mlock(self.inner.as_ptr(), self.inner.len()) }
}

fn unlock(&self) {
unsafe { crate::misc::munlock(self.inner.as_ptr(), self.inner.len()) }
}

fn zeroize(&mut self) {
unsafe { crate::misc::zeroize(self.inner.as_ptr(), self.inner.len()) }
}
}

impl AsRef<[u8]> for SecBuffer {
fn as_ref(&self) -> &[u8] {
self.inner.as_ref()
}
}

impl<T: Into<Vec<u8>>> From<T> for SecBuffer {
fn from(inner: T) -> Self {
let this = Self { inner: inner.into() };
this.lock();
this
}
}

impl From<SecString> for SecBuffer {
fn from(inner: SecString) -> Self {
Self::from(inner.into_buffer().into_bytes())
}
}

impl Drop for SecBuffer {
fn drop(&mut self) {
self.unlock();
self.zeroize();
}
}
118 changes: 118 additions & 0 deletions hyxe_crypt/src/sec_string.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
use std::ops::Deref;
use std::fmt::{Debug, Display};
use serde::export::Formatter;

/// Allows mutable access
pub struct SecString {
inner: String
}

impl SecString {
/// Creates a new instance SecString
pub fn new() -> Self {
Self::from(String::new())
}

/// Safely pushes a new character
pub fn push(&mut self, val: char) {
self.unlock();
self.inner.push(val);
self.lock();
}

/// Clears and zeroizes the vector. Keeps the allocation in-tact
pub fn clear(&mut self) {
self.unlock();
self.zeroize();
self.inner.clear();
self.lock();
}

/// Inserts a char at `pos`
pub fn insert(&mut self, pos: usize, val: char) {
self.unlock();
self.inner.insert(pos, val);
self.lock();
}

/// removes a char at `pos`
pub fn remove(&mut self, pos: usize) -> char {
self.unlock();
let val = self.inner.remove(pos);
self.lock();
val
}

/// Gets the inner string
pub fn into_buffer(mut self) -> String {
self.unlock();
std::mem::take(&mut self.inner)
}

fn lock(&self) {
let (ptr, len) = decompose(&self.inner);
unsafe { crate::misc::mlock(ptr, len) }
}

fn unlock(&self) {
let (ptr, len) = decompose(&self.inner);
unsafe { crate::misc::munlock(ptr, len) }
}

fn zeroize(&mut self) {
unsafe { crate::misc::zeroize(self.inner.as_ptr(), self.inner.len()) }
}
}

impl From<String> for SecString {
fn from(inner: String) -> Self {
let this = Self { inner };
this.lock();
this
}
}

impl Drop for SecString {
fn drop(&mut self) {
self.unlock();
self.zeroize();
}
}

impl Clone for SecString {
fn clone(&self) -> Self {
Self::from(self.inner.clone())
}
}

impl Default for SecString {
fn default() -> Self {
Self::new()
}
}

impl Deref for SecString {
type Target = String;

fn deref(&self) -> &Self::Target {
&self.inner
}
}

impl Debug for SecString {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "***SECRET***")
}
}

impl Display for SecString {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
Debug::fmt(self, f)
}
}

fn decompose(input: &String) -> (*const u8, usize) {
let ptr = input.as_ptr();
let len = input.capacity();
(ptr, len)
}
Loading

0 comments on commit fac7e9c

Please sign in to comment.