diff --git a/bits/bits_challanges.py b/bits/bits_challanges.py new file mode 100755 index 0000000..42572fa --- /dev/null +++ b/bits/bits_challanges.py @@ -0,0 +1,73 @@ +#! /usr/bin/env -S /usr/bin/time /usr/bin/python3.9.5 -i + +# -*- coding: utf-8 -*- + +# Some other needed imports +import datetime +import dill +import gzip +import os +import pdb +import re +import sys +import traceback + +import numpy as np +import pandas as pd +import multiprocessing as mp + +from collections import defaultdict +from copy import deepcopy, copy +from dotmap import DotMap +from functools import reduce +from hashlib import sha256 +from io import BytesIO +from memory_tempfile import MemoryTempfile +from shutil import copyfile +from pprint import pprint +from typing import List, Set, Tuple, Dict, Union, Any +from PIL import Image + +CURRENT_WORKING_DIR = os.getcwd() +PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +HOME_DIR = os.path.expanduser("~") +TEMP_DIR = MemoryTempfile().gettempdir() +PYTHON_PROGRAMS_DIR = os.path.join(HOME_DIR, 'git/python_programs') + +# set the relative/absolute path where the utils_load_module.py file is placed! +sys.path.append(PYTHON_PROGRAMS_DIR) +from utils_load_module import load_module_dynamically + +var_glob = globals() +load_module_dynamically(**dict(var_glob=var_glob, name='utils', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_multiprocessing_manager', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils_multiprocessing_manager.py"))) + +mkdirs = utils.mkdirs +MultiprocessingManager = utils_multiprocessing_manager.MultiprocessingManager + +OBJS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'objs') +mkdirs(OBJS_DIR_PATH) + +PLOTS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'plots') +mkdirs(PLOTS_DIR_PATH) + +def bit_kth_remove(n, k): + print('Hello There') + + +def find_and_execute_function(): + with open('bits_challanges.py', 'r') as f: + content = f.read() + + a = re.search(r'def bit_kth_remove\(n, k\):\n( .*\n)+', content) + f_str = a.string[a.start():a.end()] + + return content, f_str + +if __name__ == '__main__': + print("Hello World!") + + n = 123654 + print("n: {}".format(n)) + n_bits = bin(n)[2:] + print("n_bits: {}".format(n_bits)) diff --git a/cpp_programs/Makefile_template b/cpp_programs/Makefile_template new file mode 100644 index 0000000..af8b89a --- /dev/null +++ b/cpp_programs/Makefile_template @@ -0,0 +1,10 @@ +.DEFAULT_GOAL := all + +CC=g++ +CFLAGS=-Wall -std=c++20 + +utils: + $(CC) $(CFLAGS) utils.cpp -c +main: utils + $(CC) $(CFLAGS) main.cpp utils.o -o main.o +all: main diff --git a/cpp_programs/main_template.cpp b/cpp_programs/main_template.cpp new file mode 100644 index 0000000..cd30ae3 --- /dev/null +++ b/cpp_programs/main_template.cpp @@ -0,0 +1,12 @@ +#include + +using namespace std; + +using std::cout; +using std::endl; + +int main(int argc, char* argv[]) { + cout << "Hello World!" << endl; + + return 0; +} diff --git a/cpp_programs/multi_linear_sequences/Makefile b/cpp_programs/multi_linear_sequences/Makefile new file mode 100644 index 0000000..4311132 --- /dev/null +++ b/cpp_programs/multi_linear_sequences/Makefile @@ -0,0 +1,12 @@ +.DEFAULT_GOAL := all + +CC=g++ +CFLAGS=-Werror -Wall -std=c++20 -g +# CFLAGS=-Werror -Wall -std=c++20 -O2 -g +# CFLAGS=-Wall -std=c++20 + +utils: utils.cpp + $(CC) $(CFLAGS) utils.cpp -c +main: main.cpp utils + $(CC) $(CFLAGS) main.cpp utils.o -o main.o +all: main diff --git a/cpp_programs/multi_linear_sequences/main.cpp b/cpp_programs/multi_linear_sequences/main.cpp new file mode 100644 index 0000000..6937a29 --- /dev/null +++ b/cpp_programs/multi_linear_sequences/main.cpp @@ -0,0 +1,315 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" + +using namespace std; + +using std::cout; +using std::endl; + +using std::vector; +using std::map; +using std::fill; + +typedef struct GenerateAllCombVec_ { + vector arr; + u32 count; + u32 n; + u32 m; + bool isFinished; + GenerateAllCombVec_(const u32 n_, const u32 m_) : + arr(n_, 0), count(0), n(n_), m(m_), isFinished(false) + {} + GenerateAllCombVec_(const u32 n_, const u32 m_, const u32 count_) : + GenerateAllCombVec_(n_, m_) + { + u32 c = count_; + for (u32 i = 0; i < this->n; ++i) { + this->arr[i] = c % this->m; + c /= this->m; + } + if (c > 0) { + this->isFinished = true; + } + } + const bool next() { + if (isFinished) { + return true; + } + + count += 1; + for (u32 i = 0; i < this->n; ++i) { + if ((this->arr[i] += 1) < this->m) { + return false; + } + this->arr[i] = 0; + } + + isFinished = true; + return true; + } + void reset() { + count = 0; + isFinished = false; + fill(this->arr.begin(), this->arr.end(), 0); + } + inline const u64 calcIdx() { + u64 mult = 1ull; + u64 s = 0; + for (u32 i = 0; i < this->n; ++i) { + s += (u64)(this->arr[i]) * mult; + mult *= (u64)(this->m); + } + + return s; + } +} GenerateAllCombVec; + +typedef struct ArrPrepand_ { + const GenerateAllCombVec* arr_a; + u32 n; + u32 n_2; + u32 m; + vector arr; + ArrPrepand_(const GenerateAllCombVec* arr_a_) : + arr_a(arr_a_), n(arr_a_->n), n_2(pow(arr_a_->n, 2)), m(arr_a_->m), arr(n_2, 0) + { + this->arr[this->n_2 - 1] = 1; + }; + void prepand() { + switch (this->n) { + case 1: + this->arr[0] = this->arr_a->arr[0]; + break; + case 2: + this->arr[0] = (this->arr_a->arr[0] * this->arr_a->arr[1]) % this->m; + this->arr[1] = this->arr_a->arr[0]; + this->arr[2] = this->arr_a->arr[1]; + break; + case 3: + this->arr[0] = (this->arr_a->arr[0] * this->arr_a->arr[1] * this->arr_a->arr[2]) % this->m; + this->arr[1] = (this->arr_a->arr[0] * this->arr_a->arr[1]) % this->m; + this->arr[2] = (this->arr_a->arr[0] * this->arr_a->arr[2]) % this->m; + this->arr[3] = (this->arr_a->arr[1] * this->arr_a->arr[2]) % this->m; + this->arr[4] = this->arr_a->arr[0]; + this->arr[5] = this->arr_a->arr[1]; + this->arr[6] = this->arr_a->arr[2]; + break; + default: + assert(false && "Not implemented for n > 3!"); + break; + } + } + inline const u64 calcIdx() { + u64 mult = 1ull; + u64 s = 0; + for (u32 i = 0; i < this->n; ++i) { + s += (u64)(this->arr[i]) * mult; + mult *= (u64)(this->m); + } + + return s; + } +} ArrPrepand; + +typedef struct VecTempIter_ { + vector arr; + vector arr_mult; + u32 n; + u32 n_2; + u32 m; + const ArrPrepand* arr_prep; + const GenerateAllCombVec* arr_k; + VecTempIter_(const ArrPrepand* arr_prep_, const GenerateAllCombVec* arr_k_) : + arr(arr_prep_->n, 0), arr_mult(arr_prep_->n_2, 0), + n(arr_prep_->n), n_2(arr_prep_->n_2), m(arr_prep_->m), + arr_prep(arr_prep_), arr_k(arr_k_) + {} + inline void multiply() { + for (u32 i = 0; i < this->n_2; ++i) { + this->arr_mult[i] = (this->arr_prep->arr[i] * this->arr_k->arr[i]) % this->m; + } + } + inline void shift() { + for (u32 i = 0; i < this->n - 1; ++i) { + this->arr[i] = this->arr_prep->arr_a->arr[i + 1]; + } + } + inline const u32 sum() { + this->multiply(); + u32 s = 0; + for (u32 i = 0; i < this->n_2; ++i) { + s += this->arr_mult[i]; + } + this->shift(); + return s; + } + inline void iterate() { + this->arr[this->n - 1] = this->sum() % this->m; + } + inline const u64 calcIdxNext() { + this->iterate(); + + u64 mult = 1ull; + u64 s = 0; + for (u32 i = 0; i < this->n; ++i) { + s += (u64)(this->arr[i]) * mult; + mult *= (u64)(this->m); + } + + return s; + } +} VecTempIter; + +// typedef struct CyclesOfKIdx_ { +// vector> vec_cycles; +// u64 k_idx; +// CyclesOfKIdx_(const vector>& vec_cycles_, const u32 k_idx_) : +// vec_cycles(vec_cycles_), k_idx(k_idx_) +// {} +// } CyclesOfKIdx; + +int main(int argc, char* argv[]) { + cout << "Hello World!" << endl; + + const u32 n = 2; + const u32 m = 5; + GenerateAllCombVec vec_a = GenerateAllCombVec(n, m); + GenerateAllCombVec vec_k = GenerateAllCombVec(pow(n, 2), m); + + ArrPrepand arr_prep = ArrPrepand(&vec_a); + + VecTempIter vec_temp_iter = VecTempIter(&arr_prep, &vec_k); + + vector arr_idx_to_idx_next(pow(m, n)); + + map> map_k_idx_to_map_a_idx_to_idx_next; + map>> map_k_idx_to_vec_cycles; + // vector> vec_all_cycles; + map map_len_cycle_to_count; + + while (!vec_k.isFinished) { // && vec_k.count < 1000) { + const u64 k_idx = vec_k.calcIdx(); + vec_a.reset(); + while (!vec_a.isFinished) { // && vec_a.count < 300) { + arr_prep.prepand(); + + const u64 idx_now = vec_a.calcIdx(); + const u64 idx_next = vec_temp_iter.calcIdxNext(); + + arr_idx_to_idx_next[idx_now] = idx_next; + + vec_a.next(); + } + + unordered_set set_idx_used; + vector> vec_cycles; + + unordered_set set_one_cycle; + vector vec_one_cycle; + map map_a_idx_to_idx_next; + for (u64 i = 0; i < arr_idx_to_idx_next.size(); ++i) { + map_a_idx_to_idx_next[i] = arr_idx_to_idx_next[i]; + } + + map_k_idx_to_map_a_idx_to_idx_next.emplace(k_idx, map_a_idx_to_idx_next); + + cout << "k_idx: " << k_idx << endl; + cout << "map_a_idx_to_idx_next: " << map_a_idx_to_idx_next << endl; + + //if (vec_k.count > 231) { + // exit(0); + //} + + while (map_a_idx_to_idx_next.size() > 0) { + set_one_cycle.clear(); + vec_one_cycle.clear(); + + const auto t_1 = map_a_idx_to_idx_next.begin(); + const u64 idx_now_1 = t_1->first; + const u64 idx_next_1 = t_1->second; + map_a_idx_to_idx_next.erase(t_1); + + if (idx_now_1 == idx_next_1) { + vec_one_cycle.emplace_back(idx_now_1); + vec_cycles.emplace_back(vec_one_cycle); + + set_idx_used.insert(idx_now_1); + continue; + } + + if (set_idx_used.find(idx_next_1) != set_idx_used.end()) { + continue; + } + + set_one_cycle.insert(idx_now_1); + vec_one_cycle.push_back(idx_now_1); + + set_one_cycle.insert(idx_next_1); + vec_one_cycle.push_back(idx_next_1); + + bool is_not_cycle = true; + u64 idx_now = idx_next_1; + while (true) { + const u64 idx_next = map_a_idx_to_idx_next[idx_now]; + map_a_idx_to_idx_next.erase(idx_now); + + if (set_idx_used.find(idx_next) != set_idx_used.end()){ + break; + } + + if (set_one_cycle.find(idx_next) != set_one_cycle.end()){ + const auto iter = std::find(vec_one_cycle.begin(), vec_one_cycle.end(), idx_next); + + vector vec_one_cycle_true; + for (auto it = iter; it != vec_one_cycle.end(); ++it) { + const u32 idx = *it; + vec_one_cycle_true.push_back(idx); + set_idx_used.insert(idx); + } + + vec_cycles.emplace_back(vec_one_cycle_true); + // find the cycle! and extract it + break; + } + + set_one_cycle.insert(idx_next); + vec_one_cycle.push_back(idx_next); + + idx_now = idx_next; + } + } + + // if ((vec_k.count % 10000) == 0) { + cout << "vec_k: " << vec_k.arr << endl; + cout << "vec_cycles: " << vec_cycles << endl; + // } + // break; + + // vec_cylces_of_k.emplace_back(vec_cycles, vec_k.calcIdx()); + + for (const vector& vec_cycle : vec_cycles) { + const u32 len = (u32)vec_cycle.size(); + if (map_len_cycle_to_count.find(len) == map_len_cycle_to_count.end()) { + map_len_cycle_to_count.emplace(len, 0); + } + map_len_cycle_to_count[len] += 1; + } + + map_k_idx_to_vec_cycles.emplace(k_idx, vec_cycles); + // cout << "- arr_idx_to_idx_next: " << arr_idx_to_idx_next << endl; + vec_k.next(); + } + + // cout << "map_k_idx_to_vec_cycles: " << map_k_idx_to_vec_cycles << endl; + cout << "map_len_cycle_to_count: " << map_len_cycle_to_count << endl; + // cout << "map_k_idx_to_map_a_idx_to_idx_next: " << map_k_idx_to_map_a_idx_to_idx_next << endl; + + return 0; +} diff --git a/cpp_programs/multi_linear_sequences/utils.cpp b/cpp_programs/multi_linear_sequences/utils.cpp new file mode 100644 index 0000000..a730d7b --- /dev/null +++ b/cpp_programs/multi_linear_sequences/utils.cpp @@ -0,0 +1,40 @@ +#include "utils.h" + +template +std::ostream& operator<<(std::ostream& os, const std::vector& obj) { + os << "["; + std::for_each(obj.begin(), obj.end() - 1, [&os](const T& elem) { + os << elem << ", "; + }); + os << obj.back(); + os << "]"; + return os; +} +template ostream& operator<<(ostream& os, const std::vector& obj); +template ostream& operator<<(ostream& os, const std::vector& obj); +template ostream& operator<<(ostream& os, const std::vector>& obj); + +template +ostream& operator<<(ostream& os, const unordered_set& obj) { + os << "{"; + for (auto itr = obj.begin(); itr != obj.end(); ++itr) { + cout << *itr << ", "; + } + os << "}"; + return os; +} +template ostream& operator<<(ostream& os, const unordered_set& obj); + +template +ostream& operator<<(ostream& os, const std::map& obj) { + os << "{"; + for (auto itr = obj.begin(); itr != obj.end(); ++itr) { + cout << "" << itr->first << ": " << itr->second << ", "; + } + os << "}"; + return os; +} +template ostream& operator<<(ostream& os, const std::map& obj); +template ostream& operator<<(ostream& os, const std::map& obj); +template ostream& operator<<(ostream& os, const std::map>>& obj); +template ostream& operator<<(ostream& os, const std::map>& obj); diff --git a/cpp_programs/multi_linear_sequences/utils.h b/cpp_programs/multi_linear_sequences/utils.h new file mode 100644 index 0000000..46b6bb5 --- /dev/null +++ b/cpp_programs/multi_linear_sequences/utils.h @@ -0,0 +1,35 @@ +#ifndef UTILS_H +#define UTILS_H + +#include + +using u32 = uint32_t; +using u64 = uint64_t; + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +template +ostream& operator<<(ostream& os, const std::vector& obj); + +template +ostream& operator<<(ostream& os, const unordered_set& obj); + +template +ostream& operator<<(ostream& os, const std::map& obj); + +#endif // UTILS_H diff --git a/javascript/anonymous_function_call.js b/javascript/anonymous_function_call.js new file mode 100644 index 0000000..8b44c8b --- /dev/null +++ b/javascript/anonymous_function_call.js @@ -0,0 +1,22 @@ +((d) => { + d.a = 567; + d.b = "Hello World!"; + console.log(d.b + ", " + d.a); + console.log(d); + + const fib = (n) => { + let l = [1, 1]; + let a = 1; + let b = 1; + for (let i = 0; i < n; ++i) { + let c = a + b; + a = b; + b = c; + l.push(b); + } + return l; + }; + + const l = fib(6); + console.log('l: '+l); +})({}); diff --git a/math_numbers/utils_math_numbers.py b/math_numbers/utils_math_numbers.py index 28481c6..d7d018f 100644 --- a/math_numbers/utils_math_numbers.py +++ b/math_numbers/utils_math_numbers.py @@ -7,7 +7,8 @@ def convert_n_to_other_base(n, b): while n>0: l.append(n%b) n //= b - return list(reversed(l)) + return l + # return list(reversed(l)) def convert_base_n_to_num(l, b): diff --git a/modulo_sequences/base_modulo_sequences.py b/modulo_sequences/base_modulo_sequences.py new file mode 100755 index 0000000..102e43c --- /dev/null +++ b/modulo_sequences/base_modulo_sequences.py @@ -0,0 +1,275 @@ +#! /usr/bin/env -S /usr/bin/time /usr/bin/python3.9.5 -i + +# -*- coding: utf-8 -*- + +# Some other needed imports +import datetime +import dill +import gzip +import os +import pdb +import re +import sys +import traceback + +import numpy as np +import pandas as pd +import multiprocessing as mp + +import matplotlib.pyplot as plt + +from collections import defaultdict +from copy import deepcopy, copy +from dotmap import DotMap +from functools import reduce +from hashlib import sha256 +from io import BytesIO +from memory_tempfile import MemoryTempfile +from shutil import copyfile +from pprint import pprint +from typing import List, Set, Tuple, Dict, Union, Any +from PIL import Image + +from multiprocessing.managers import SharedMemoryManager + +CURRENT_WORKING_DIR = os.getcwd() +PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +HOME_DIR = os.path.expanduser("~") +TEMP_DIR = MemoryTempfile().gettempdir() +PYTHON_PROGRAMS_DIR = os.path.join(HOME_DIR, 'git/python_programs') + +# set the relative/absolute path where the utils_load_module.py file is placed! +sys.path.append(PYTHON_PROGRAMS_DIR) +from utils_load_module import load_module_dynamically + +var_glob = globals() +load_module_dynamically(**dict(var_glob=var_glob, name='utils', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_multiprocessing_manager', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils_multiprocessing_manager.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='different_combinations', path=os.path.join(PYTHON_PROGRAMS_DIR, "combinatorics/different_combinations.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_graph_theory', path=os.path.join(PYTHON_PROGRAMS_DIR, "graph_theory/utils_graph_theory.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_math_numbers', path=os.path.join(PYTHON_PROGRAMS_DIR, "math_numbers/utils_math_numbers.py"))) + +mkdirs = utils.mkdirs +MultiprocessingManager = utils_multiprocessing_manager.MultiprocessingManager +get_all_combinations_repeat = different_combinations.get_all_combinations_repeat +get_cycles_of_1_directed_graph = utils_graph_theory.get_cycles_of_1_directed_graph + +convert_n_to_other_base = utils_math_numbers.convert_n_to_other_base +convert_base_n_to_num = utils_math_numbers.convert_base_n_to_num + +OBJS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'objs') +mkdirs(OBJS_DIR_PATH) + +PLOTS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'plots') +mkdirs(PLOTS_DIR_PATH) + +if __name__ == '__main__': + + b_1 = 3 + b_2 = 4 + + l_n = [] + + n = b_1**1000 + print(f"i: 0, n: {n}") + l_n.append(n) + + i = 1 + while n > 0: + l_b = convert_n_to_other_base(n=n, b=b_2) + + n = convert_base_n_to_num(l=(np.array(l_b) % b_1).tolist(), b=b_1) + print(f"i: {i}, n: {n}") + + l_n.append(n) + + + + sys.exit() + + # smm = SharedMemoryManager() + # smm.start() + + PKL_GZ_DIR = os.path.join(TEMP_DIR, 'objs/modulo_linear_algebra') + mkdirs(PKL_GZ_DIR) + + # m = 5 + n = 6 + + l_m_l_cycle_len_count = [] + l_len_l_cycle_len_count = [] + for m in range(1, 6): + arr_combinations = get_all_combinations_repeat(m=m, n=n) + len_arr_combinations = len(arr_combinations) + + d_comb_tpl_to_idx = {tuple(arr.tolist()): idx for idx, arr in enumerate(arr_combinations, 0)} + d_idx_to_comb_tpl = {v: k for k, v in d_comb_tpl_to_idx.items()} + + # mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count(), is_print_on=True) + mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count(), is_print_on=False) + split_amount = mult_proc_mng.worker_amount + + # split evenly if possible into pieces + arr_split_idx_diff = np.ones((split_amount, ), dtype=np.int32) * (len_arr_combinations // split_amount) + # print("arr_split_idx_diff: {}".format(arr_split_idx_diff)) + arr_split_idx_diff[:len_arr_combinations % split_amount] += 1 + + arr_split_idx = np.hstack(((0, ), np.cumsum(arr_split_idx_diff))) + + # l_arr_comb = [arr_combinations[i1:i2] for i1, i2 in zip(arr_split_idx[:-1], arr_split_idx[1:])] + + # # only for testing the responsivness! + # mult_proc_mng.test_worker_threads_response() + + arr_x = np.random.randint(0, m, (n, ), dtype=np.uint16) + + # # to get the reference for the chared memory! can be useful later for other projects + # shm_arr_k = smm.SharedMemory(size=n * np.uint16().itemsize) + # arr_k = np.ndarray((n, ), dtype=np.uint16, buffer=shm_arr_k.buf) + # arr_k[:] = np.random.randint(0, m, (n, ), dtype=np.uint16) + + # print("arr_k: {arr_k}") + # + def get_all_combinations_cycles(tpl_arr_k): + arr_next_comb = np.hstack((arr_combinations[:, 1:], (np.sum(arr_combinations*tpl_arr_k, axis=1) % m).reshape((-1, 1)))) + + d_directed_graph = { + d_comb_tpl_to_idx[tuple(arr1.tolist())]: d_comb_tpl_to_idx[tuple(arr2.tolist())] + for arr1, arr2 in zip(arr_combinations, arr_next_comb) + } + + edges_directed = list(d_directed_graph.items()) + l_cycles = get_cycles_of_1_directed_graph(edges_directed) + + try: + arr_k_idx = d_comb_tpl_to_idx[tpl_arr_k] + except: + arr_k_idx = None + + return {'arr_k_idx': arr_k_idx, 'l_cycles': l_cycles} + # return {'arr_k_idx': arr_k_idx, 'd_directed_graph': d_directed_graph, 'l_cycles': l_cycles} + + mult_proc_mng.define_new_func('func_get_all_combinations_cycles', get_all_combinations_cycles) + + l_arguments = [ + # ((1, ) * n, ) + (tuple(arr_k.tolist()), ) for arr_k in arr_combinations + ] + l_ret_l_data = mult_proc_mng.do_new_jobs( + ['func_get_all_combinations_cycles'] * len(l_arguments), + l_arguments, + ) + del mult_proc_mng + + l_cycles_all_orig = [tuple(cycle) for d in l_ret_l_data for cycle in d['l_cycles']] + l_cycles_all_shift = [(lambda i: t[i:]+t[:i])(t.index(min(t))) for t in l_cycles_all_orig] + + d_cycle_count = defaultdict(int) + for cycle in l_cycles_all_shift: + d_cycle_count[cycle] += 1 + + d_cycle_count_count = defaultdict(int) + for cycle_count in d_cycle_count.values(): + d_cycle_count_count[cycle_count] += 1 + + d_cycle_len_count = defaultdict(int) + for cycle in d_cycle_count.keys(): + d_cycle_len_count[len(cycle)] += 1 + + l_cycle_len_count = sorted(d_cycle_len_count.items()) + print(f'n: {n}, m: {m}, l_cycle_len_count: {l_cycle_len_count}') + + len_l_cycle_len_count = len(l_cycle_len_count) + + l_m_l_cycle_len_count.append((m, l_cycle_len_count)) + l_len_l_cycle_len_count.append(len_l_cycle_len_count) + + l_max_cycles_for_m = [l[-1][-1][0] for l in l_m_l_cycle_len_count] + + print() + print(f'n = {n}, max_m: {m}\n- l_max_cycles_for_m = {l_max_cycles_for_m}\n- l_len_l_cycle_len_count: {l_len_l_cycle_len_count}') + + # smm.shutdown() + +''' +were arr_k is any combination +n = 1, max_m: 20, +# OEIS A002322 +- l_max_cycles_for_m = [1, 1, 2, 2, 4, 2, 6, 2, 6, 4, 10, 2, 12, 6, 4, 4, 16, 6, 18, 4] +# OEIS A066800 +- l_len_l_cycle_len_count: [1, 1, 2, 2, 3, 2, 4, 2, 4, 3, 4, 2, 6, 4, 3, 3, 5, 4, 6, 3] + +n = 2, max_m: 20 +# OEIS A316565 +- l_max_cycles_for_m = [1, 3, 8, 6, 24, 24, 48, 12, 24, 60, 120, 24, 168, 48, 60, 24, 288, 24, 360, 60] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 3, 6, 5, 11, 8, 14, 7, 10, 14, 20, 8, 22, 14, 15, 9, 23, 10, 30, 14] + +n = 3, max_m: 15 +# OEIS not found! +- l_max_cycles_for_m = [1, 7, 26, 14, 124, 182, 342, 28, 78, 868, 1330, 182, 2196, 2394, 1612] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 5, 8, 8, 14, 21, 22, 11, 14, 34, 32, 22, 34, 40, 36] + +n = 4, max_m: 10 +# OEIS not found! +- l_max_cycles_for_m = [1, 15, 80, 30, 624, 560, 2400, 60, 240, 4368] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 8, 18, 13, 31, 52, 54, 18, 30, 83] + +n = 5, max_m: 7 +# OEIS not found! +- l_max_cycles_for_m = [1, 31, 242, 62, 3124, 7502, 16806] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 13, 26, 20, 48, 128, 78] + +n = 6, max_m: 5 +# OEIS not found! +- l_max_cycles_for_m = [1, 63, 728, 126, 15624] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 18, 42, 27, 95] + + +were arr_k is all 1 +n = 1, max_m: 30 +# OEIS A000012 +- l_max_cycles_for_m = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +# OEIS A000012 +- l_len_l_cycle_len_count: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + +n = 2, max_m: 30 +# OEIS A001175 +- l_max_cycles_for_m = [1, 3, 8, 6, 20, 24, 16, 12, 24, 60, 10, 24, 28, 48, 40, 24, 36, 24, 18, 60, 16, 30, 48, 24, 100, 84, 72, 48, 14, 120] +# OEIS A015135 +- l_len_l_cycle_len_count: [1, 2, 2, 3, 3, 4, 2, 4, 3, 6, 3, 5, 2, 4, 5, 5, 2, 4, 3, 7, 3, 6, 2, 6, 4, 4, 4, 5, 3, 10] + +n = 3, max_m: 30 +# OEIS A046738 +- l_max_cycles_for_m = [1, 4, 13, 8, 31, 52, 48, 16, 39, 124, 110, 104, 168, 48, 403, 32, 96, 156, 360, 248, 624, 220, 553, 208, 155, 168, 117, 48, 140, 1612] +# OEIS A106288 +- l_len_l_cycle_len_count: [1, 3, 2, 4, 2, 6, 3, 5, 3, 6, 4, 8, 3, 6, 4, 6, 3, 9, 3, 8, 6, 8, 2, 10, 3, 5, 4, 8, 3, 12] + +n = 4, max_m: 30 +# OEIS A106295 +- l_max_cycles_for_m = [1, 5, 26, 10, 312, 130, 342, 20, 78, 1560, 120, 130, 84, 1710, 312, 40, 4912, 390, 6858, 1560, 4446, 120, 12166, 260, 1560, 420, 234, 1710, 280, 1560] +# OEIS A106289 +- l_len_l_cycle_len_count: [1, 2, 2, 3, 2, 4, 4, 4, 4, 4, 3, 5, 3, 8, 3, 5, 3, 8, 3, 5, 7, 4, 4, 7, 3, 6, 6, 9, 4, 6] + +n = 5, max_m: 20 +# OEIS A106303 +- l_max_cycles_for_m = [1, 6, 104, 12, 781, 312, 2801, 24, 312, 4686, 16105, 312, 30941, 16806, 81224, 48, 88741, 312, 13032, 9372] +# OEIS A106290 +- l_len_l_cycle_len_count: [1, 3, 4, 4, 2, 9, 2, 6, 7, 6, 2, 11, 2, 6, 8, 8, 2, 9, 3, 8] + +n = 6, max_m: 15 +# OEIS not found! +- l_max_cycles_for_m = [1, 7, 728, 14, 208, 728, 342, 28, 2184, 1456, 354312, 728, 9520, 2394, 1456] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 2, 2, 3, 3, 3, 3, 4, 3, 6, 2, 4, 3, 6, 5] + +n = 7, max_m: 11 +# OEIS not found! +- l_max_cycles_for_m = [1, 8, 364, 16, 9372, 728, 137257, 32, 1092, 18744, 161050] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 4, 2, 5, 4, 6, 2, 6, 4, 11, 4] +''' diff --git a/modulo_sequences/modulo_linear_algebra.py b/modulo_sequences/modulo_linear_algebra.py new file mode 100755 index 0000000..355e623 --- /dev/null +++ b/modulo_sequences/modulo_linear_algebra.py @@ -0,0 +1,248 @@ +#! /usr/bin/env -S /usr/bin/time /usr/bin/python3.9.5 -i + +# -*- coding: utf-8 -*- + +# Some other needed imports +import datetime +import dill +import gzip +import os +import pdb +import re +import sys +import traceback + +import numpy as np +import pandas as pd +import multiprocessing as mp + +import matplotlib.pyplot as plt + +from collections import defaultdict +from copy import deepcopy, copy +from dotmap import DotMap +from functools import reduce +from hashlib import sha256 +from io import BytesIO +from memory_tempfile import MemoryTempfile +from shutil import copyfile +from pprint import pprint +from typing import List, Set, Tuple, Dict, Union, Any +from PIL import Image + +from multiprocessing.managers import SharedMemoryManager + +CURRENT_WORKING_DIR = os.getcwd() +PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +HOME_DIR = os.path.expanduser("~") +TEMP_DIR = MemoryTempfile().gettempdir() +PYTHON_PROGRAMS_DIR = os.path.join(HOME_DIR, 'git/python_programs') + +# set the relative/absolute path where the utils_load_module.py file is placed! +sys.path.append(PYTHON_PROGRAMS_DIR) +from utils_load_module import load_module_dynamically + +var_glob = globals() +load_module_dynamically(**dict(var_glob=var_glob, name='utils', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_multiprocessing_manager', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils_multiprocessing_manager.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='different_combinations', path=os.path.join(PYTHON_PROGRAMS_DIR, "combinatorics/different_combinations.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_graph_theory', path=os.path.join(PYTHON_PROGRAMS_DIR, "graph_theory/utils_graph_theory.py"))) + +mkdirs = utils.mkdirs +MultiprocessingManager = utils_multiprocessing_manager.MultiprocessingManager +get_all_combinations_repeat = different_combinations.get_all_combinations_repeat +get_cycles_of_1_directed_graph = utils_graph_theory.get_cycles_of_1_directed_graph + +OBJS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'objs') +mkdirs(OBJS_DIR_PATH) + +PLOTS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'plots') +mkdirs(PLOTS_DIR_PATH) + +if __name__ == '__main__': + # smm = SharedMemoryManager() + # smm.start() + + PKL_GZ_DIR = os.path.join(TEMP_DIR, 'objs/modulo_linear_algebra') + mkdirs(PKL_GZ_DIR) + + n = 3 + + l_m_l_cycle_len_count = [] + l_len_l_cycle_len_count = [] + for m in range(4, 5): + # for m in range(1, 11): + arr_combinations = get_all_combinations_repeat(m=m, n=n) + len_arr_combinations = len(arr_combinations) + + d_comb_tpl_to_idx = {tuple(arr.tolist()): idx for idx, arr in enumerate(arr_combinations, 0)} + d_idx_to_comb_tpl = {v: k for k, v in d_comb_tpl_to_idx.items()} + + # mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count(), is_print_on=True) + mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count(), is_print_on=False) + split_amount = mult_proc_mng.worker_amount + + # split evenly if possible into pieces + arr_split_idx_diff = np.ones((split_amount, ), dtype=np.int32) * (len_arr_combinations // split_amount) + # print("arr_split_idx_diff: {}".format(arr_split_idx_diff)) + arr_split_idx_diff[:len_arr_combinations % split_amount] += 1 + + arr_split_idx = np.hstack(((0, ), np.cumsum(arr_split_idx_diff))) + + # l_arr_comb = [arr_combinations[i1:i2] for i1, i2 in zip(arr_split_idx[:-1], arr_split_idx[1:])] + + # # only for testing the responsivness! + # mult_proc_mng.test_worker_threads_response() + + arr_x = np.random.randint(0, m, (n, ), dtype=np.uint16) + + # # to get the reference for the chared memory! can be useful later for other projects + # shm_arr_k = smm.SharedMemory(size=n * np.uint16().itemsize) + # arr_k = np.ndarray((n, ), dtype=np.uint16, buffer=shm_arr_k.buf) + # arr_k[:] = np.random.randint(0, m, (n, ), dtype=np.uint16) + + # print("arr_k: {arr_k}") + # + def get_all_combinations_cycles(tpl_arr_k): + arr_next_comb = np.hstack((arr_combinations[:, 1:], (np.sum(arr_combinations*tpl_arr_k, axis=1) % m).reshape((-1, 1)))) + + d_directed_graph = { + d_comb_tpl_to_idx[tuple(arr1.tolist())]: d_comb_tpl_to_idx[tuple(arr2.tolist())] + for arr1, arr2 in zip(arr_combinations, arr_next_comb) + } + + edges_directed = list(d_directed_graph.items()) + l_cycles = get_cycles_of_1_directed_graph(edges_directed) + + try: + arr_k_idx = d_comb_tpl_to_idx[tpl_arr_k] + except: + arr_k_idx = None + + return {'arr_k_idx': arr_k_idx, 'l_cycles': l_cycles} + # return {'arr_k_idx': arr_k_idx, 'd_directed_graph': d_directed_graph, 'l_cycles': l_cycles} + + mult_proc_mng.define_new_func('func_get_all_combinations_cycles', get_all_combinations_cycles) + + l_arguments = [ + # ((1, ) * n, ) + (tuple(arr_k.tolist()), ) for arr_k in arr_combinations + ] + l_ret_l_data = mult_proc_mng.do_new_jobs( + ['func_get_all_combinations_cycles'] * len(l_arguments), + l_arguments, + ) + del mult_proc_mng + + l_cycles_all_orig = [tuple(cycle) for d in l_ret_l_data for cycle in d['l_cycles']] + l_cycles_all_shift = [(lambda i: t[i:]+t[:i])(t.index(min(t))) for t in l_cycles_all_orig] + + d_cycle_count = defaultdict(int) + for cycle in l_cycles_all_shift: + d_cycle_count[cycle] += 1 + + d_cycle_count_count = defaultdict(int) + for cycle_count in d_cycle_count.values(): + d_cycle_count_count[cycle_count] += 1 + + d_cycle_len_count = defaultdict(int) + for cycle in d_cycle_count.keys(): + d_cycle_len_count[len(cycle)] += 1 + + l_cycle_len_count = sorted(d_cycle_len_count.items()) + print(f'n: {n}, m: {m}, l_cycle_len_count: {l_cycle_len_count}') + + len_l_cycle_len_count = len(l_cycle_len_count) + + l_m_l_cycle_len_count.append((m, l_cycle_len_count)) + l_len_l_cycle_len_count.append(len_l_cycle_len_count) + + l_max_cycles_for_m = [l[-1][-1][0] for l in l_m_l_cycle_len_count] + + print() + print(f'n = {n}, max_m: {m}\n- l_max_cycles_for_m = {l_max_cycles_for_m}\n- l_len_l_cycle_len_count: {l_len_l_cycle_len_count}') + + # smm.shutdown() + +''' +were arr_k is any combination +n = 1, max_m: 20, +# OEIS A002322 +- l_max_cycles_for_m = [1, 1, 2, 2, 4, 2, 6, 2, 6, 4, 10, 2, 12, 6, 4, 4, 16, 6, 18, 4] +# OEIS A066800 +- l_len_l_cycle_len_count: [1, 1, 2, 2, 3, 2, 4, 2, 4, 3, 4, 2, 6, 4, 3, 3, 5, 4, 6, 3] + +n = 2, max_m: 20 +# OEIS A316565 +- l_max_cycles_for_m = [1, 3, 8, 6, 24, 24, 48, 12, 24, 60, 120, 24, 168, 48, 60, 24, 288, 24, 360, 60] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 3, 6, 5, 11, 8, 14, 7, 10, 14, 20, 8, 22, 14, 15, 9, 23, 10, 30, 14] + +n = 3, max_m: 15 +# OEIS not found! +- l_max_cycles_for_m = [1, 7, 26, 14, 124, 182, 342, 28, 78, 868, 1330, 182, 2196, 2394, 1612] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 5, 8, 8, 14, 21, 22, 11, 14, 34, 32, 22, 34, 40, 36] + +n = 4, max_m: 10 +# OEIS not found! +- l_max_cycles_for_m = [1, 15, 80, 30, 624, 560, 2400, 60, 240, 4368] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 8, 18, 13, 31, 52, 54, 18, 30, 83] + +n = 5, max_m: 7 +# OEIS not found! +- l_max_cycles_for_m = [1, 31, 242, 62, 3124, 7502, 16806] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 13, 26, 20, 48, 128, 78] + +n = 6, max_m: 5 +# OEIS not found! +- l_max_cycles_for_m = [1, 63, 728, 126, 15624] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 18, 42, 27, 95] + + +were arr_k is all 1 +n = 1, max_m: 30 +# OEIS A000012 +- l_max_cycles_for_m = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +# OEIS A000012 +- l_len_l_cycle_len_count: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + +n = 2, max_m: 30 +# OEIS A001175 +- l_max_cycles_for_m = [1, 3, 8, 6, 20, 24, 16, 12, 24, 60, 10, 24, 28, 48, 40, 24, 36, 24, 18, 60, 16, 30, 48, 24, 100, 84, 72, 48, 14, 120] +# OEIS A015135 +- l_len_l_cycle_len_count: [1, 2, 2, 3, 3, 4, 2, 4, 3, 6, 3, 5, 2, 4, 5, 5, 2, 4, 3, 7, 3, 6, 2, 6, 4, 4, 4, 5, 3, 10] + +n = 3, max_m: 30 +# OEIS A046738 +- l_max_cycles_for_m = [1, 4, 13, 8, 31, 52, 48, 16, 39, 124, 110, 104, 168, 48, 403, 32, 96, 156, 360, 248, 624, 220, 553, 208, 155, 168, 117, 48, 140, 1612] +# OEIS A106288 +- l_len_l_cycle_len_count: [1, 3, 2, 4, 2, 6, 3, 5, 3, 6, 4, 8, 3, 6, 4, 6, 3, 9, 3, 8, 6, 8, 2, 10, 3, 5, 4, 8, 3, 12] + +n = 4, max_m: 30 +# OEIS A106295 +- l_max_cycles_for_m = [1, 5, 26, 10, 312, 130, 342, 20, 78, 1560, 120, 130, 84, 1710, 312, 40, 4912, 390, 6858, 1560, 4446, 120, 12166, 260, 1560, 420, 234, 1710, 280, 1560] +# OEIS A106289 +- l_len_l_cycle_len_count: [1, 2, 2, 3, 2, 4, 4, 4, 4, 4, 3, 5, 3, 8, 3, 5, 3, 8, 3, 5, 7, 4, 4, 7, 3, 6, 6, 9, 4, 6] + +n = 5, max_m: 20 +# OEIS A106303 +- l_max_cycles_for_m = [1, 6, 104, 12, 781, 312, 2801, 24, 312, 4686, 16105, 312, 30941, 16806, 81224, 48, 88741, 312, 13032, 9372] +# OEIS A106290 +- l_len_l_cycle_len_count: [1, 3, 4, 4, 2, 9, 2, 6, 7, 6, 2, 11, 2, 6, 8, 8, 2, 9, 3, 8] + +n = 6, max_m: 15 +# OEIS not found! +- l_max_cycles_for_m = [1, 7, 728, 14, 208, 728, 342, 28, 2184, 1456, 354312, 728, 9520, 2394, 1456] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 2, 2, 3, 3, 3, 3, 4, 3, 6, 2, 4, 3, 6, 5] + +n = 7, max_m: 11 +# OEIS not found! +- l_max_cycles_for_m = [1, 8, 364, 16, 9372, 728, 137257, 32, 1092, 18744, 161050] +# OEIS not found! +- l_len_l_cycle_len_count: [1, 4, 2, 5, 4, 6, 2, 6, 4, 11, 4] +''' diff --git a/modulo_sequences/modulo_power_numbers.py b/modulo_sequences/modulo_power_numbers.py new file mode 100755 index 0000000..f9ce4d3 --- /dev/null +++ b/modulo_sequences/modulo_power_numbers.py @@ -0,0 +1,251 @@ +#! /usr/bin/env -S /usr/bin/time /usr/bin/python3.9.5 -i + +# -*- coding: utf-8 -*- + +# Some other needed imports +import datetime +import dill +import gzip +import os +import pdb +import re +import sys +import traceback + +import numpy as np +import pandas as pd +import multiprocessing as mp + +import matplotlib.pyplot as plt + +from collections import defaultdict +from copy import deepcopy, copy +from dotmap import DotMap +from functools import reduce +from hashlib import sha256 +from io import BytesIO +from memory_tempfile import MemoryTempfile +from shutil import copyfile +from pprint import pprint +from typing import List, Set, Tuple, Dict, Union, Any +from PIL import Image + +CURRENT_WORKING_DIR = os.getcwd() +PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +HOME_DIR = os.path.expanduser("~") +TEMP_DIR = MemoryTempfile().gettempdir() +PYTHON_PROGRAMS_DIR = os.path.join(HOME_DIR, 'git/python_programs') + +# set the relative/absolute path where the utils_load_module.py file is placed! +sys.path.append(PYTHON_PROGRAMS_DIR) +from utils_load_module import load_module_dynamically + +var_glob = globals() +load_module_dynamically(**dict(var_glob=var_glob, name='utils', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_multiprocessing_manager', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils_multiprocessing_manager.py"))) + +mkdirs = utils.mkdirs +MultiprocessingManager = utils_multiprocessing_manager.MultiprocessingManager + +OBJS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'objs') +mkdirs(OBJS_DIR_PATH) + +PLOTS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'plots') +mkdirs(PLOTS_DIR_PATH) + +if __name__ == '__main__': + PKL_GZ_DIR = os.path.join(TEMP_DIR, 'objs/modulo_power_numbers') + mkdirs(PKL_GZ_DIR) + + N_MAX = 300 + P_MAX = int(N_MAX*1.5) + if P_MAX % 100 != 0: + P_MAX += 100 - P_MAX % 100 + + STEPS = 100 + + def get_l_data(p_start, p_end, n_start, n_end): + file_name = f'p_{p_start}_{p_end-1}_n_{n_start}_{n_end-1}.pkl.gz' + file_path = os.path.join(PKL_GZ_DIR, file_name) + + # TODO: create temp obj creation and loading as an util function! + if not os.path.exists(file_path): + l_data = [] + for p in range(p_start, p_end): + for n in range(n_start, n_end): + l_seq = [pow(i, p, n) for i in range(1, n)] + # l_seq = [i**p % n for i in range(1, n)] + l_data.append({'p': p, 'n': n, 'l_seq': l_seq}) + + with gzip.open(file_path, 'wb') as f: + dill.dump(l_data, f) + else: + with gzip.open(file_path, 'rb') as f: + l_data = dill.load(f) + + return l_data + + def f(x): + return x**2 + + mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count()-1) + + # # only for testing the responsivness! + # mult_proc_mng.test_worker_threads_response() + + print('Define new Function!') + mult_proc_mng.define_new_func('func_get_l_data', get_l_data) + + l_arguments = [ + (p_start, p_start + STEPS, n_start, n_start + STEPS) + for p_start in range(1, P_MAX, STEPS) + for n_start in range(1, N_MAX+1, STEPS) + ] + l_ret_l_data = mult_proc_mng.do_new_jobs( + ['func_get_l_data']*len(l_arguments), + l_arguments, + ) + del mult_proc_mng + + d_p_to_d_n_to_l_seq = {p: {} for p in range(1, P_MAX + 1)} + + for l_data in l_ret_l_data: + for d in l_data: + p_d = d['p'] + n_d = d['n'] + # assert p_d >= p_start and p_d <= p_end + # assert n_d >= n_start and n_d <= n_end + l_seq = d['l_seq'] + d_p_to_d_n_to_l_seq[p_d][n_d] = l_seq + + # for p_start in range(1, P_MAX, STEPS): + # print("p_start: {}".format(p_start)) + + # p_end = p_start + STEPS + + # for n_start in range(1, N_MAX+1, STEPS): + # n_end = n_start + STEPS + + # file_name = f'p_{p_start}_{p_end-1}_n_{n_start}_{n_end-1}.pkl.gz' + # file_path = os.path.join(PKL_GZ_DIR, file_name) + + # # TODO: create temp obj creation and loading as an util function! + # if not os.path.exists(file_path): + # l_data = [] + # for p in range(p_start, p_end): + # for n in range(n_start, n_end): + # l_seq = [i**p % n for i in range(1, n)] + # l_data.append({'p': p, 'n': n, 'l_seq': l_seq}) + + # with gzip.open(file_path, 'wb') as f: + # dill.dump(l_data, f) + # else: + # with gzip.open(file_path, 'rb') as f: + # l_data = dill.load(f) + + # for d in l_data: + # p_d = d['p'] + # n_d = d['n'] + # assert p_d >= p_start and p_d <= p_end + # assert n_d >= n_start and n_d <= n_end + # l_seq = d['l_seq'] + # d_p_to_d_n_to_l_seq[p_d][n_d] = l_seq + + # # d_n_to_l_seq[n] = l_seq + # # d_p_to_d_n_to_l_seq[p] = d_n_to_l_seq + + d_n_to_d_tpl_to_l_p = {} + d_n_to_amount_cycles = {} + # d_n_to_l_tpl_unique_with_zero = {} + d_n_to_l_tpl_unique = {} + for n in range(2, N_MAX+1): + print("n: {}".format(n)) + d_tpl_to_l_p = {} + d_n_to_d_tpl_to_l_p[n] = d_tpl_to_l_p + + for p in range(1, P_MAX+1): + l_seq = d_p_to_d_n_to_l_seq[p][n] + tpl = tuple(l_seq) + + if tpl not in d_tpl_to_l_p: + d_tpl_to_l_p[tpl] = [] + + d_tpl_to_l_p[tpl].append(p) + + amount_cycles = len(d_tpl_to_l_p) + d_n_to_amount_cycles[n] = amount_cycles + + l_tpl = list(d_tpl_to_l_p.keys()) + + # l_tpl_unique_with_zero = [] + l_tpl_unique = [] + len_tpl = len(l_tpl[0]) + for tpl in l_tpl: + u = np.sort(np.unique(tpl)) + if u.shape[0] == len_tpl: + # l_tpl_unique_with_zero.append(tpl) + + if u[0] != 0: + l_tpl_unique.append(tpl) + + # d_n_to_l_tpl_unique_with_zero[n] = l_tpl_unique_with_zero + d_n_to_l_tpl_unique[n] = l_tpl_unique + + l_seq_oeis_A109746 = [d_n_to_amount_cycles[k] for k in sorted(d_n_to_amount_cycles.keys())] + + # maybe an interesting sequence! + # l_seq_len_of_unique_with_zero_cycles = [len(d_n_to_l_tpl_unique_with_zero[k]) for k in sorted(d_n_to_l_tpl_unique_with_zero.keys())] + l_seq_len_of_unique_cycles = [len(d_n_to_l_tpl_unique[k]) for k in sorted(d_n_to_l_tpl_unique.keys())] + + # l_seq_n_len_of_unique_with_zero_cycles = [(k, len(d_n_to_l_tpl_unique_with_zero[k])) for k in sorted(d_n_to_l_tpl_unique_with_zero.keys())] + l_seq_n_len_of_unique_cycles = [(k, len(d_n_to_l_tpl_unique[k])) for k in sorted(d_n_to_l_tpl_unique.keys())] + + l_n, l_a_n = list(zip(*l_seq_n_len_of_unique_cycles)) + arr_n = np.array(l_n) + arr_a_n = np.array(l_a_n) + + arr_n_half_amount_full_cycles = arr_n[(arr_n // arr_a_n) == 2] + + l_seq_oeis_A003627 = list(arr_n_half_amount_full_cycles) + + l_min = [] + l_max = [] + l_mean = [] + + for i in range(1, len(arr_n)+1): + arr_a_n_part = arr_a_n[:i] + + l_min.append(np.min(arr_a_n_part)) + l_mean.append(np.mean(arr_a_n_part)) + l_max.append(np.max(arr_a_n_part)) + + plt.figure() + + plt.title('l_seq_n_len_of_unique_cycles') + plt.plot(arr_n, arr_a_n/arr_n, 'bo') + + plt.xlabel('arr_n') + plt.xlabel('arr_a_n/arr_n') + + + plt.figure() + + plt.title('l_seq_n_len_of_unique_cycles') + plt.plot(arr_n, arr_a_n, 'bo') + + plt.xlabel('arr_n') + plt.xlabel('arr_a_n') + + + plt.figure() + + plt.title('l_seq_n_len_of_unique_cycles min/mean/max') + plt.plot(arr_n, l_min, 'b.') + plt.plot(arr_n, l_mean, 'k.') + plt.plot(arr_n, l_max, 'r.') + + plt.xlabel('arr_n') + plt.xlabel('min/mean/max') + + + plt.show() diff --git a/modulo_sequences/multi_linear_sequences.py b/modulo_sequences/multi_linear_sequences.py new file mode 100755 index 0000000..28c07a5 --- /dev/null +++ b/modulo_sequences/multi_linear_sequences.py @@ -0,0 +1,355 @@ +#! /usr/bin/env -S /usr/bin/time /usr/bin/python3.9.5 -i + +# -*- coding: utf-8 -*- + +# Some other needed imports +import datetime +import dill +import gzip +import os +import pdb +import re +import sys +import traceback + +import numpy as np +import pandas as pd +import multiprocessing as mp + +import matplotlib.pyplot as plt + +from collections import defaultdict +from copy import deepcopy, copy +from dotmap import DotMap +from functools import reduce +from hashlib import sha256 +from io import BytesIO +from memory_tempfile import MemoryTempfile +from shutil import copyfile +from pprint import pprint +from typing import List, Set, Tuple, Dict, Union, Any +from PIL import Image + +from multiprocessing.managers import SharedMemoryManager + +CURRENT_WORKING_DIR = os.getcwd() +PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +HOME_DIR = os.path.expanduser("~") +TEMP_DIR = MemoryTempfile().gettempdir() +PYTHON_PROGRAMS_DIR = os.path.join(HOME_DIR, 'git/python_programs') + +# set the relative/absolute path where the utils_load_module.py file is placed! +sys.path.append(PYTHON_PROGRAMS_DIR) +from utils_load_module import load_module_dynamically + +var_glob = globals() +load_module_dynamically(**dict(var_glob=var_glob, name='utils', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_multiprocessing_manager', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils_multiprocessing_manager.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='different_combinations', path=os.path.join(PYTHON_PROGRAMS_DIR, "combinatorics/different_combinations.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_graph_theory', path=os.path.join(PYTHON_PROGRAMS_DIR, "graph_theory/utils_graph_theory.py"))) + +mkdirs = utils.mkdirs +MultiprocessingManager = utils_multiprocessing_manager.MultiprocessingManager +get_all_combinations_repeat = different_combinations.get_all_combinations_repeat +get_cycles_of_1_directed_graph = utils_graph_theory.get_cycles_of_1_directed_graph + +OBJS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'objs') +mkdirs(OBJS_DIR_PATH) + +PLOTS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'plots') +mkdirs(PLOTS_DIR_PATH) + +if __name__ == '__main__': + # smm = SharedMemoryManager() + # smm.start() + + PKL_GZ_DIR = os.path.join(TEMP_DIR, 'objs/multi_linear_sequences') + mkdirs(PKL_GZ_DIR) + + l_amount_full_cycle = [] + l_amount_tk_1_cycle = [] + + # n = 1 + n = 2 + # n = 3 + + # TODO: need to make this multiprocessing able! + # for m in range(1, 16): + # for m in range(1, 5): + for m in range(5, 6): + # for m in range(1, 101): + + + # method 2 + print('method 2') + arr_a = get_all_combinations_repeat(n=n, m=m).astype(dtype=np.uint32).T + arr_k = get_all_combinations_repeat(n=2**n, m=m).astype(dtype=np.uint32) + arr_mult_a = m**np.arange(arr_a.shape[0]-1, -1, -1, dtype=np.uint32)[::-1] + arr_mult_k = m**np.arange(arr_k.shape[1]-1, -1, -1, dtype=np.uint32)[::-1] + + d_t_to_i = {tuple(a.tolist()): i for i, a in enumerate(arr_a.T, 0)} + d_i_to_t = {v: k for k, v in d_t_to_i.items()} + + arr_a_idx = np.sum(arr_a.T*arr_mult_a, axis=1, dtype=np.uint32) + arr_k_idx = np.sum(arr_k*arr_mult_k, axis=1, dtype=np.uint32) + + arr_temp = np.empty(arr_k.shape, dtype=np.uint32) + arr_sum = np.empty((arr_k.shape[0], ), dtype=np.uint32) + + if n == 1: + arr_a_prep = np.vstack(( + arr_a[0], + np.ones((arr_a.shape[1], ), dtype=np.uint32), + )).T + elif n == 2: + arr_a_prep = np.vstack(( + arr_a[0]*arr_a[1], + arr_a[0], + arr_a[1], + np.ones((arr_a.shape[1], ), dtype=np.uint32), + )).T + elif n == 3: + arr_a_prep = np.vstack(( + arr_a[0]*arr_a[1]*arr_a[2], + arr_a[0]*arr_a[1], + arr_a[0]*arr_a[2], + arr_a[1]*arr_a[2], + arr_a[0], + arr_a[1], + arr_a[2], + np.ones((arr_a.shape[1], ), dtype=np.uint32), + )).T + + arr_a_prep %= m + + arr_k_a_all = np.einsum('ij,kj->ki', arr_a_prep, arr_k) % m + + d_k_idx_to_d_a_idx_to_idx_next = {} + d_a_to_l_cycles = {} + l_tk_empty_cycles = [] + d_len_l_cycles_to_l_k_idx = {} + for k_idx, arr_k_a in zip(arr_k_idx, arr_k_a_all): + edges_directed = list(zip(arr_a_idx, np.sum(np.vstack((arr_a[1:], arr_k_a)).T*arr_mult_a, axis=1))) + d_k_idx_to_d_a_idx_to_idx_next[k_idx] = dict(edges_directed) + + l_cycles = get_cycles_of_1_directed_graph(edges_directed) + + d_a_to_l_cycles[k_idx] = l_cycles + + len_l_cycles = len(l_cycles) + + if len_l_cycles == 1: + l_tk_empty_cycles.append(k_idx) + + if len_l_cycles not in d_len_l_cycles_to_l_k_idx: + d_len_l_cycles_to_l_k_idx[len_l_cycles] = [] + d_len_l_cycles_to_l_k_idx[len_l_cycles].append(k_idx) + + # if k_idx == 60: + # break + + + # # method 1 + # print('method 1') + # arr_a = get_all_combinations_repeat(n=n, m=m).astype(dtype=np.uint32) + # arr_k = get_all_combinations_repeat(n=2**n, m=m).astype(dtype=np.uint32) + # d_t_to_i = {tuple(a.tolist()): i for i, a in enumerate(arr_a, 0)} + # d_i_to_t = {v: k for k, v in d_t_to_i.items()} + + # d_a_to_l_cycles = {} + # l_tk_empty_cycles = [] + + # # for k1, k2, k3, k4, k5, k6, k7, k8 in arr_k: + # # tk = (k1, k2, k3, k4, k5, k6, k7, k8) + + # for k_idx, (k1, k2, k3, k4) in enumerate(arr_k, 0): + # tk = (k1, k2, k3, k4) + + # # for k1, k2 in arr_k: + # # tk = (k1, k2) + # # print("tk: {}".format(tk)) + + # d = {} + # # for a1, a2, a3 in arr_a: + # # t1 = (a1, a2, a3) + # # t2 = (a2, a3, (a1*a2*k1 + a1*k2 + a2*k3 + k4) % m) + + # for a1, a2 in arr_a: + # t1 = (a1, a2) + # t2 = (a2, (a1*a2*k1 + a1*k2 + a2*k3 + k4) % m) + + # # for a1, in arr_a: + # # t1 = (a1, ) + # # t2 = ((a1*k1 + k2) % m, ) + + # d[d_t_to_i[t1]] = d_t_to_i[t2] + + # edges_directed = list(d.items()) + # l_cycles = get_cycles_of_1_directed_graph(edges_directed) + + # d_a_to_l_cycles[tk] = l_cycles + + # if len(l_cycles) == 1: + # l_tk_empty_cycles.append(tk) + + # # if any([len(l)==m**2-1 for l in l_cycles]): + # # print("k_idx: {}".format(k_idx)) + # # break + + + l_cycles_all = [l1 for l in d_a_to_l_cycles.values() for l1 in l] + u, c = np.unique(list(map(len, l_cycles_all)), return_counts=True) + + l_unique_seq_tpl = [(lambda x: tuple(l[x:]+l[:x]))(l.index(min(l))) for l in l_cycles_all] + amount_unique_tpl = len(set(l_unique_seq_tpl)) + print("n: {}, m: {}, amount_unique_tpl: {}, len(u): {}".format(n, m, amount_unique_tpl, len(u))) + + l_cycles_all_l_num = [d_i_to_t[l[0]]+tuple(d_i_to_t[i][-1] for i in l[1:]) for l in l_cycles_all] + + u_len, c_len = np.unique([len(l) for l in l_cycles_all_l_num], return_counts=True) + + l_amount_full_cycle.append((m, c_len[-1])) + l_amount_tk_1_cycle.append((m, len(l_tk_empty_cycles))) + + print("n: {}".format(n)) + print("l_amount_full_cycle: {}".format(l_amount_full_cycle)) + print("l_amount_tk_1_cycle: {}".format(l_amount_tk_1_cycle)) + + l_a_n = [a for _, a in l_amount_full_cycle] + print("l_a_n: {}".format(l_a_n)) + l_tk_1_cycle = [a for _, a in l_amount_tk_1_cycle] + print("l_tk_1_cycle: {}".format(l_tk_1_cycle)) + + # for n: 1 + # >>> arr=np.array(l_a_n) + # >>> np.where(arr==np.arange(1, len(arr)+1))[0]+1 + # array([ 1, 8, 18, 36]) + + # n: 1, l_a_n: [1, 1, 2, 2, 4, 2, 6, 8, 18, 4, 10, 4, 12, 6, 8] + # n: 2, l_a_n = [1, 1, 6, 24, 20, 24, 56, 768, 1782, 72] + + # n: 2 + # l_a_n: [1, 1, 6, 24, 20, 24, 56, 768, 1782, 72, 176, 2448, 312, 104, 120, 24576, 816, 3780, 912, 480] + # l_tk_1_cycle: [1, 8, 29, 88, 129, 180, 433, 1280, 999, 752, 1451, 1816, 4081, 2876, 2181, 20480, 10097, 5832, 14401, 7512] + + # n: 2, m: 1, amount_unique_tpl: 1, len(u): 1 + # n: 2, m: 2, amount_unique_tpl: 6, len(u): 4 + # n: 2, m: 3, amount_unique_tpl: 43, len(u): 6 + # n: 2, m: 4, amount_unique_tpl: 102, len(u): 6 + # n: 2, m: 5, amount_unique_tpl: 535, len(u): 13 + # n: 2, m: 6, amount_unique_tpl: 434, len(u): 8 + # n: 2, m: 7, amount_unique_tpl: 3007, len(u): 21 + # n: 2, m: 8, amount_unique_tpl: 3140, len(u): 8 + # n: 2, m: 9, amount_unique_tpl: 8661, len(u): 10 + # n: 2, m: 10, amount_unique_tpl: 5258, len(u): 22 + # n: 2, m: 11, amount_unique_tpl: 27053, len(u): 33 + + # n: 3, m: 1, amount_unique_tpl: 1 + + # n: 3, m: 2, amount_unique_tpl: 19 + + # amount_unique_tpl: 1425, n=3, m=3 + + # len(set(l_unique_seq_tpl)) == 5986, n=3, m=4 + + # n: 3, m: 5, amount_unique_tpl: 289597 + + # n: 3, m: 6, amount_unique_tpl: 55315 + + + # l_m_l_cycle_len_count = [] + # l_len_l_cycle_len_count = [] + # for m in range(4, 5): + # # for m in range(1, 11): + # arr_combinations = get_all_combinations_repeat(m=m, n=n) + # len_arr_combinations = len(arr_combinations) + + # d_comb_tpl_to_idx = {tuple(arr.tolist()): idx for idx, arr in enumerate(arr_combinations, 0)} + # d_idx_to_comb_tpl = {v: k for k, v in d_comb_tpl_to_idx.items()} + + # # mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count(), is_print_on=True) + # mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count(), is_print_on=False) + # split_amount = mult_proc_mng.worker_amount + + # # split evenly if possible into pieces + # arr_split_idx_diff = np.ones((split_amount, ), dtype=np.int32) * (len_arr_combinations // split_amount) + # # print("arr_split_idx_diff: {}".format(arr_split_idx_diff)) + # arr_split_idx_diff[:len_arr_combinations % split_amount] += 1 + + # arr_split_idx = np.hstack(((0, ), np.cumsum(arr_split_idx_diff))) + + # # l_arr_comb = [arr_combinations[i1:i2] for i1, i2 in zip(arr_split_idx[:-1], arr_split_idx[1:])] + + # # # only for testing the responsivness! + # # mult_proc_mng.test_worker_threads_response() + + # arr_x = np.random.randint(0, m, (n, ), dtype=np.uint16) + + # # # to get the reference for the chared memory! can be useful later for other projects + # # shm_arr_k = smm.SharedMemory(size=n * np.uint16().itemsize) + # # arr_k = np.ndarray((n, ), dtype=np.uint16, buffer=shm_arr_k.buf) + # # arr_k[:] = np.random.randint(0, m, (n, ), dtype=np.uint16) + + # # print("arr_k: {arr_k}") + # # + # def get_all_combinations_cycles(tpl_arr_k): + # arr_next_comb = np.hstack((arr_combinations[:, 1:], (np.sum(arr_combinations*tpl_arr_k, axis=1) % m).reshape((-1, 1)))) + + # d_directed_graph = { + # d_comb_tpl_to_idx[tuple(arr1.tolist())]: d_comb_tpl_to_idx[tuple(arr2.tolist())] + # for arr1, arr2 in zip(arr_combinations, arr_next_comb) + # } + + # edges_directed = list(d_directed_graph.items()) + # l_cycles = get_cycles_of_1_directed_graph(edges_directed) + + # try: + # arr_k_idx = d_comb_tpl_to_idx[tpl_arr_k] + # except: + # arr_k_idx = None + + # return {'arr_k_idx': arr_k_idx, 'l_cycles': l_cycles} + # # return {'arr_k_idx': arr_k_idx, 'd_directed_graph': d_directed_graph, 'l_cycles': l_cycles} + + # mult_proc_mng.define_new_func('func_get_all_combinations_cycles', get_all_combinations_cycles) + + # l_arguments = [ + # # ((1, ) * n, ) + # (tuple(arr_k.tolist()), ) for arr_k in arr_combinations + # ] + # l_ret_l_data = mult_proc_mng.do_new_jobs( + # ['func_get_all_combinations_cycles'] * len(l_arguments), + # l_arguments, + # ) + # del mult_proc_mng + + # l_cycles_all_orig = [tuple(cycle) for d in l_ret_l_data for cycle in d['l_cycles']] + # l_cycles_all_shift = [(lambda i: t[i:]+t[:i])(t.index(min(t))) for t in l_cycles_all_orig] + + # d_cycle_count = defaultdict(int) + # for cycle in l_cycles_all_shift: + # d_cycle_count[cycle] += 1 + + # d_cycle_count_count = defaultdict(int) + # for cycle_count in d_cycle_count.values(): + # d_cycle_count_count[cycle_count] += 1 + + # d_cycle_len_count = defaultdict(int) + # for cycle in d_cycle_count.keys(): + # d_cycle_len_count[len(cycle)] += 1 + + # l_cycle_len_count = sorted(d_cycle_len_count.items()) + # print(f'n: {n}, m: {m}, l_cycle_len_count: {l_cycle_len_count}') + + # len_l_cycle_len_count = len(l_cycle_len_count) + + # l_m_l_cycle_len_count.append((m, l_cycle_len_count)) + # l_len_l_cycle_len_count.append(len_l_cycle_len_count) + + # l_max_cycles_for_m = [l[-1][-1][0] for l in l_m_l_cycle_len_count] + + # print() + # print(f'n = {n}, max_m: {m}\n- l_max_cycles_for_m = {l_max_cycles_for_m}\n- l_len_l_cycle_len_count: {l_len_l_cycle_len_count}') + + # smm.shutdown() + diff --git a/system_utils/copy_folder_to_tar_gz.py b/system_utils/copy_folder_to_tar_gz.py index 97cb08b..7e4d8f4 100755 --- a/system_utils/copy_folder_to_tar_gz.py +++ b/system_utils/copy_folder_to_tar_gz.py @@ -134,6 +134,11 @@ class RootDirsFiles(RecordClass): for file_name in rootdirfile.l_file_name: file_path = os.path.join(root, file_name) + + if os.path.islink(file_path): + l_file_path_link.append(file_path) + continue + stat = os.stat(file_path) df.loc[row_nr] = [rel_root, file_name, 'f', stat, ''] @@ -204,46 +209,3 @@ class RootDirsFiles(RecordClass): tar_obj.addfile(tarinfo=tarinfo, fileobj=bytes_file) tar_obj.close() - - sys.exit() - - # first attempt - root_first, _, _ = next(os.walk(src_folder_path)) - len_root_first = len(root_first) - for iter_nr, (root, l_dir_name, l_file_name) in enumerate(os.walk(src_folder_path), 0): - root_short = root[len_root_first:] - - for file_name in l_file_name: - src_file_path = os.path.join(root, file_name) - in_tar_file_path = os.path.join(root_short, file_name).lstrip('/') - - if os.path.islink(src_file_path): - print('Skip link "{}"'.format(src_file_path)) - l_file_path_link.append(src_file_path) - continue - - print('copy "{}" -> "{}"'.format(src_file_path, in_tar_file_path)) - try: - bytes_file = io.BytesIO() - with open(src_file_path, 'rb') as f: - bytes_file.write(f.read()) - - tarinfo = tarfile.TarInfo(name=in_tar_file_path) - tarinfo.size = bytes_file.tell() - bytes_file.seek(0) - tar.addfile(tarinfo=tarinfo, fileobj=bytes_file) - # tar.addfile(tarinfo=tarinfo, fileobj=f) - except: - print('- Could not copy the file!') - l_file_path_failed.append(src_file_path) - - # if iter_nr >= 5: - # break - - tar.close() - - tar = tarfile.open(name=dst_file_path, mode='r:gz') - members = tar.getmembers() - # tar.close() - - # TODO: create class for root and l_dir_name and l_file_name diff --git a/template_file_ultra.py b/template_file_ultra.py index 245f5c0..594592c 100644 --- a/template_file_ultra.py +++ b/template_file_ultra.py @@ -20,6 +20,8 @@ from copy import deepcopy, copy from dotmap import DotMap from functools import reduce +from hashlib import sha256 +from io import BytesIO from memory_tempfile import MemoryTempfile from shutil import copyfile from pprint import pprint diff --git a/test_programs/call_anonymous_function_recursively.py b/test_programs/call_anonymous_function_recursively.py new file mode 100755 index 0000000..f207e15 --- /dev/null +++ b/test_programs/call_anonymous_function_recursively.py @@ -0,0 +1,63 @@ +#! /usr/bin/env -S /usr/bin/time /usr/bin/python3.9.5 -i + +# -*- coding: utf-8 -*- + +# Some other needed imports +import datetime +import dill +import gzip +import os +import pdb +import re +import sys +import traceback + +import numpy as np +import pandas as pd +import multiprocessing as mp + +from collections import defaultdict +from copy import deepcopy, copy +from dotmap import DotMap +from functools import reduce +from hashlib import sha256 +from io import BytesIO +from memory_tempfile import MemoryTempfile +from shutil import copyfile +from pprint import pprint +from typing import List, Set, Tuple, Dict, Union, Any +from PIL import Image + +CURRENT_WORKING_DIR = os.getcwd() +PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +HOME_DIR = os.path.expanduser("~") +TEMP_DIR = MemoryTempfile().gettempdir() +PYTHON_PROGRAMS_DIR = os.path.join(HOME_DIR, 'git/python_programs') + +# set the relative/absolute path where the utils_load_module.py file is placed! +sys.path.append(PYTHON_PROGRAMS_DIR) +from utils_load_module import load_module_dynamically + +var_glob = globals() +load_module_dynamically(**dict(var_glob=var_glob, name='utils', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils.py"))) +load_module_dynamically(**dict(var_glob=var_glob, name='utils_multiprocessing_manager', path=os.path.join(PYTHON_PROGRAMS_DIR, "utils_multiprocessing_manager.py"))) + +mkdirs = utils.mkdirs +MultiprocessingManager = utils_multiprocessing_manager.MultiprocessingManager + +OBJS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'objs') +mkdirs(OBJS_DIR_PATH) + +PLOTS_DIR_PATH = os.path.join(PATH_ROOT_DIR, 'plots') +mkdirs(PLOTS_DIR_PATH) + +def main(): + d = {} + d['f'] = lambda x: (print(f'x: {x}'), d['f'](x-1))[-1] if x > 0 else print('finished!') + + return d + +if __name__ == '__main__': + d = main() + + (lambda g: (g.__setitem__('f', lambda x: (print(f'x: {x}'), g['f'](x-1))[-1] if x > 0 else print('finished!')), g['f'](5), 'YES')[-1])({}) diff --git a/utils_multiprocessing_manager.py b/utils_multiprocessing_manager.py index 036ecf3..d70b018 100644 --- a/utils_multiprocessing_manager.py +++ b/utils_multiprocessing_manager.py @@ -35,9 +35,10 @@ def f(x): MANAGER_SLEEP_TIME = 0.001 class MultiprocessingManager(Exception): - def __init__(self, cpu_count): + def __init__(self, cpu_count, is_print_on=True): self.cpu_count = cpu_count self.worker_amount = self.cpu_count - 1 + self.is_print_on = is_print_on # 1 proc for manager (class itself) # cpu_count-1 procs for the worker threads (processes) @@ -69,7 +70,8 @@ def _worker_thread(self, worker_nr, pipe_in, pipe_out): try: ret_val = d_func[func_name](*func_args) except: - print('Fail for func_name: {}, func_args: {}, at worker_nr: {}'.format(func_name, func_args, worker_nr)) + if self.is_print_on: + print('Fail for func_name: {}, func_args: {}, at worker_nr: {}'.format(func_name, func_args, worker_nr)) ret_val = None ret_tpl = (worker_nr, ret_val) pipe_out.send(ret_tpl) @@ -86,7 +88,8 @@ def define_new_func(self, name, func): for pipe_recv in self.pipes_recv_main: ret = pipe_recv.recv() worker_nr, text = ret - print("worker_nr: {}, text: {}".format(worker_nr, text)) + if self.is_print_on: + print("worker_nr: {}, text: {}".format(worker_nr, text)) def test_worker_threads_response(self): @@ -96,7 +99,9 @@ def test_worker_threads_response(self): for pipe_recv in self.pipes_recv_main: ret = pipe_recv.recv() assert ret == 'IS WORKING!' - # print('IS WORKING OK!!!') + if self.is_print_on: + pass + # print('IS WORKING OK!!!') def do_new_jobs(self, l_func_name, l_func_args): @@ -106,7 +111,8 @@ def do_new_jobs(self, l_func_name, l_func_args): if len_l_func_name <= self.worker_amount: for worker_nr, (pipe_send, func_name, func_args) in enumerate(zip(self.pipes_send_main, l_func_name, l_func_args), 0): pipe_send.send(('func_def_exec', (func_name, func_args))) - print("Doing job: worker_nr: {}".format(worker_nr)) + if self.is_print_on: + print("Doing job: worker_nr: {}".format(worker_nr)) finished_works = 0 dq_pipe_i = deque(range(0, len_l_func_name)) @@ -124,11 +130,13 @@ def do_new_jobs(self, l_func_name, l_func_args): l_ret.append(ret_val) finished_works += 1 - print("Finished: {:2}/{:2}, worker_nr: {}".format(finished_works, len_l_func_name, worker_nr)) + if self.is_print_on: + print("Finished: {:2}/{:2}, worker_nr: {}".format(finished_works, len_l_func_name, worker_nr)) else: for worker_nr, (pipe_send, func_name, func_args) in enumerate(zip(self.pipes_send_main, l_func_name[:self.worker_amount], l_func_args[:self.worker_amount]), 0): pipe_send.send(('func_def_exec', (func_name, func_args))) - print("Doing job: worker_nr: {}".format(worker_nr)) + if self.is_print_on: + print("Doing job: worker_nr: {}".format(worker_nr)) finished_works = 0 pipe_i = 0 @@ -146,12 +154,14 @@ def do_new_jobs(self, l_func_name, l_func_args): l_ret.append(ret_val) finished_works += 1 - print("Finished: {:2}/{:2}, worker_nr: {}".format(finished_works, len_l_func_name, worker_nr)) + if self.is_print_on: + print("Finished: {:2}/{:2}, worker_nr: {}".format(finished_works, len_l_func_name, worker_nr)) pipe_send = self.pipes_send_main[pipe_i] pipe_send.send(('func_def_exec', (func_name, func_args))) - print("Doing job: worker_nr: {}".format(worker_nr)) + if self.is_print_on: + print("Doing job: worker_nr: {}".format(worker_nr)) pipe_i = (pipe_i+1) % self.worker_amount @@ -170,7 +180,8 @@ def do_new_jobs(self, l_func_name, l_func_args): l_ret.append(ret_val) finished_works += 1 - print("Finished: {:2}/{:2}, worker_nr: {}".format(finished_works, len_l_func_name, worker_nr)) + if self.is_print_on: + print("Finished: {:2}/{:2}, worker_nr: {}".format(finished_works, len_l_func_name, worker_nr)) return l_ret