Skip to content

Commit

Permalink
Merge pull request #58 from DiracInstitute/dev
Browse files Browse the repository at this point in the history
Merge release candidate v0.3 to master
  • Loading branch information
PWhiddy authored Oct 5, 2017
2 parents 84bfa71 + 9c67180 commit 6c52810
Show file tree
Hide file tree
Showing 133 changed files with 8,249 additions and 29,020 deletions.
27 changes: 15 additions & 12 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod~]

# Apple filesystem
.DS_Store

# Jupyter Notebook Checkpoints
notebooks/.ipynb_checkpoints/*

# Emacs backups
*~
# git ls-files --others --exclude-from=.git/info/exclude
# Lines that start with '#' are comments.
# For a project mostly in C, the following would be a good set of
# exclude patterns (uncomment them if you want to use them):
# *.[oa]
# *~
*.fits
*.o
*.d
*.pyc
*.dat
.ipynb_checkpoints
__pycache__
CMakeCache.txt
CMakeFiles
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[submodule "search/pybinds/pybind11"]
path = search/pybinds/pybind11
url = https://github.com/pybind/pybind11.git
36 changes: 23 additions & 13 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,35 +1,45 @@
# KBMOD (Kernel-Based Moving Object Detection)
<img src="https://gist.githubusercontent.com/PWhiddy/d42e66a9dd8e4af205a706f388a90ed4/raw/ae5bb87ada12538289852b58ba8e54b564a81584/kbmod.svg?sanitize=true" alt="logo" width="400" height="160"/>

A kernel-Based Moving Object Detection image processing framework with CUDA
Based on a Maximum Likelihood detection algorithm for moving astronomical objects.

[![Build Status](https://travis-ci.org/DiracInstitute/kbmod.svg?branch=master)](https://travis-ci.org/DiracInstitute/kbmod) [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg)](https://opensource.org/licenses/BSD-2-Clause)

A Maximum Likelihood detection algorithm for moving astronomical objects.


KBMOD is a set of Python tools to search astronomical images for moving
objects based upon method of maximum likelihood detection.

## Requirements
## Setup

**Requirements**

The packages required to run the code are:
The packages required to build the code are:

* Numpy
* Scipy
* python3-dev
* Scipy (Numpy, Matplotlib)
* Scikit-learn
* Matplotlib
* Astropy
* Cuda 8.0
* CMake

## Setup
**To install:**
```source install.sh```
This will build the python library and run the tests.

If you log out, next time run
```source setup.bash```
to reappend the library to the python path

Build the python module by running ```cmake ./``` followed by ```make``` from inside the search/pybinds folder
## Useage

Then in the root directory run ```source setup.bash```
to append the library to the python path
[Short Demonstration](notebooks/Quick_Test.ipynb)

[Processing Real Images](notebooks/HITS_Main_Belt_Comparison.ipynb)

## Example
## Reference

See the example [ipython notebook](https://github.com/jbkalmbach/kbmod/blob/master/notebooks/kbmod_demo.ipynb).
[API Reference](notebooks/Kbmod_Reference.ipynb).

## License

Expand Down
3 changes: 3 additions & 0 deletions analysis/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
*.txt
*.py~
old
57 changes: 57 additions & 0 deletions analysis/batchSearch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from kbmodpy import kbmod as kb
import fullSearch

real_result = kb.trajectory()
real_result.flux = 5300
real_result.x = 3123
real_result.y = 3043
real_result.x_v = 2425
real_result.y_v = 1050

parameters = {
"path": '../../HITS/test_35/4,6tempExp/new_header/',
"max_images": 5,
"psf_width": 1.5,
"object_count": 4,
"x_range": (5,3650),
"y_range":(5, 3650),
"angle_range": (0.2,0.5),
"velocity_range": (2100,2800),
"flux_range": (250, 7000),
"min_observations": 3,
"angle_steps": 170,
"velocity_steps": 100,
"search_margin": 1.05,
"real_results": [real_result],
"flags": ~0,
"flag_exceptions": [32,39],
"master_flags": int('100111', 2),
"master_threshold": 2,
"results_count": 1800000,
"cluster_eps": 0.004,
"match_v": 0.02,
"match_coord": 1
}

total_matched = []
total_unmatched = []
all_stamps = []

mf = open("matched.txt", "a")
mf.write(str(parameters)+'\n::::::\n')
mf.close()
umf = open("unmatched.txt", "a")
umf.write(str(parameters)+'\n::::::\n')
umf.close()

runs = 5000

for i in range(runs):
print("running search iteration " + str(i)+ '\n')
r_m, r_um, stmp = fullSearch.run_search(parameters)
with open("matched.txt", "a") as matched_file:
for res in r_m:
matched_file.write(str(res)+'\n')
with open("unmatched.txt", "a") as unmatched_file:
for res in r_um:
unmatched_file.write(str(res)+'\n')
61 changes: 61 additions & 0 deletions analysis/batchStamps.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
from kbmodpy import kbmod as kb
import makeStamps
import scipy.misc

real_result = kb.trajectory()
real_result.flux = 5300
real_result.x = 3123
real_result.y = 3043
real_result.x_v = 2425
real_result.y_v = 1050

parameters = {
"path": '../../HITS/test_35/4,6tempExp/new_header/',
"max_images": 5,
"psf_width": 1.5,
"object_count": 100,
"x_range": (5,3650),
"y_range":(5, 3650),
"angle_range": (0.2,0.5),
"velocity_range": (2100,2800),
"flux_range": (200, 8000),
"min_observations": 3,
"angle_steps": 120,
"velocity_steps": 80,
"search_margin": 1.05,
"real_results": [real_result],
"flags": ~0,
"flag_exceptions": [32,39],
"master_flags": int('100111', 2),
"master_threshold": 2,
"results_count": 15000,
"cluster_eps": 0.004,
"match_v": 0.01,
"match_coord": 0,
"stamp_dim": 21
}

#mf = open("matched.txt", "a")
#mf.write(str(parameters)+'\n::::::\n')
#mf.close()
#umf = open("unmatched.txt", "a")
#umf.write(str(parameters)+'\n::::::\n')
#umf.close()

runs = 5000

for i in range(runs):
print("running search iteration " + str(i)+ '\n')
matched_stamps, bad_stamps = makeStamps.run_search(parameters)
for s in range(len(matched_stamps)):
scipy.misc.imsave('stamps/positive/R'+str(i+1)+'M'+str(s+1)+'.png',
matched_stamps[s])
for s in range(len(bad_stamps)):
scipy.misc.imsave('stamps/negative/R'+str(i+1)+'M'+str(s+1)+'.png',
bad_stamps[s])
#with open("matched.txt", "a") as matched_file:
# for res in r_m:
# matched_file.write(str(res)+'\n')
#with open("unmatched.txt", "a") as unmatched_file:
# for res in r_um:
# unmatched_file.write(str(res)+'\n')
144 changes: 144 additions & 0 deletions analysis/filter_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
import os
import pickle
import sys
import shutil
import numpy as np
import pandas as pd
import multiprocessing as mp
from kbmodpy import kbmod as kb
from astropy.io import fits
from astropy.wcs import WCS
from skimage import measure
from filter_utils import *

if __name__ == "__main__":

image_folder = sys.argv[1]
results_file = sys.argv[2]

# Following sets up ability to create psi/phi and is from
# HITS_Main_Belt_Example.ipynb
flags = ~0
flag_exceptions = [32, 39]
master_flags = int('100111', 2)

image_file_list = [str(image_folder + '/' + filename) for filename in os.listdir(image_folder)]
image_file_list.sort()
images = [kb.layered_image(f) for f in image_file_list]
p = kb.psf(1.4)
stack = kb.image_stack(images)
stack.apply_mask_flags(flags, flag_exceptions)
stack.apply_master_mask(master_flags, 2)

image_array = stack.get_images()
search = kb.stack_search(stack, p)

search.gpu(1, 2, -0.0442959674533, 0.741102195944, 1920.0, 4032.0, 3)

psi = search.get_psi()
phi = search.get_phi()

image_times = np.array(stack.get_times())

file_len, header_len = file_and_header_length(results_file)

chunk_size = 250000
total_chunks = 20
print(total_chunks)

results_keep = []
results_ps = []
results_curves = []
results_likes = []

for i in range(0, int(total_chunks)):
print(i)
results_arr = load_chunk(results_file, chunk_size*i + header_len, chunk_size)
if results_arr['likelihood'][0] < 5.0:
print('Likelihood now below 5.0. Breaking Loop.')
break
psi_lc, phi_lc = get_likelihood_lcs(results_arr, psi, phi, image_times)
print('Kalman Filtering')
pool = mp.Pool(processes=16)
results = [pool.apply_async(create_filter_curve,
args=(psi_lc[j*100:(j*100)+100],
phi_lc[j*100:(j*100)+100],
j)) for j in range(int(len(psi_lc)/100))]
pool.close()
pool.join()
results = [p.get() for p in results]
results.sort()
new_nu = [x for y in results for x in y[1]]
print('Reordering')
reorder = np.argsort(new_nu)[::-1]
reorder_keep = reorder[np.where(np.array(new_nu)[reorder] > 5.0)[0]]
print('Clustering %i objects' % len(reorder_keep))
if len(reorder_keep) > 0:
db_cluster, top_vals = clusterResults(results_arr[reorder_keep])
keep_details = results_arr[reorder_keep[top_vals]]
keep_new_nu = np.array(new_nu)[reorder_keep[top_vals]]
keep_ps = []
keep_lc = []
for i in range(len(top_vals)):
obj_num = reorder_keep[top_vals[i]]
kalman_curve, kalman_idx = return_filter_curve(psi_lc[obj_num], phi_lc[obj_num])

ps = createPostageStamp(np.array(image_array)[kalman_idx], results_arr[['t0_x', 't0_y']][obj_num],
results_arr[['v_x', 'v_y']][obj_num], image_times[kalman_idx],
[21., 21.])

ps_1 = np.array(ps[1])
ps_1[ps_1 < -9000] = 0.
keep_ps.append(np.sum(np.array(ps_1), axis=0))
keep_lc.append([kalman_idx, kalman_curve])

results_keep.append(keep_details)
results_ps.append(keep_ps)
results_curves.append(keep_lc)
results_likes.append(keep_new_nu)
print('Returning %i objects' % len(keep_details))

# Get the results out of lists of lists and into single lists
final_results = [x for y in results_keep for x in y]
final_ps = [x for y in results_ps for x in y]
final_curves = [x for y in results_curves for x in y]
final_likelihoods = [x for y in results_likes for x in y]

new_f = np.array(final_results, dtype=[('t0_x', np.float), ('t0_y', np.float), ('v_x', np.float),
('v_y', np.float), ('likelihood', np.float), ('est_flux', np.float)])

# Cluster again on full returned results
db_cluster, top_vals = clusterResults(new_f)

final_results = new_f[top_vals]
final_ps = np.array(final_ps)[top_vals]
final_curves = np.array(final_curves)[top_vals]
final_likelihoods = np.array(final_likelihoods)[top_vals]

# Filtering step
mom_array = []
for s in final_ps:
s[s < -9000] = 0.
# Normalize flux in each stamp
s -= np.min(s)
s /= np.sum(s)
s = np.array(s, dtype=np.float)
mom = measure.moments_central(s, cr=10, cc=10)
mom_array.append([mom[2,0], mom[0,2], mom[1,1], mom[1,0], mom[0,1]])
mom_array = np.array(mom_array)

# These are the filtering parameters, but could be fine tuned still
mom_array_test = np.where((mom_array[:,0] < 32.) & (mom_array[:,1] < 32.) &
(np.abs(mom_array[:,3]) < .3) & (np.abs(mom_array[:,4]) < .3))[0]

final_results = final_results[mom_array_test]
final_ps = np.array(final_ps)[mom_array_test]
final_curves = np.array(final_curves)[mom_array_test]
final_likelihoods = np.array(final_likelihoods)[mom_array_test]

# Save results
np.savetxt('final_results.csv', final_results)
np.savetxt('final_ps.dat', final_ps.reshape(len(final_results), 21*21))
with open('final_curves.pkl', 'wb') as fi:
pickle.dump(final_curves, fi)
np.savetxt('final_likelihoods.dat', final_likelihoods)
Loading

0 comments on commit 6c52810

Please sign in to comment.