Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Mathijs's addition of user-chosen sn_thr and bic_thr #1

Draft
wants to merge 5 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
260 changes: 260 additions & 0 deletions .ipynb_checkpoints/testing_shadow_star-checkpoint.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,260 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "5dc84dc9",
"metadata": {},
"outputs": [],
"source": [
"import star_shadow as sts\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "28b06dcd",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-10-30 18:23:40 - INFO - Start of analysis\n",
"2023-10-30 18:23:40 - INFO - Frequency extraction done. Total time elapsed: 0.3s.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Loaded analysis file with identifier: sim_002_lc_analysis_1, created on 2023-10-30 17:14:14.619250. \n",
"data_id: 2. Description: Frequency extraction results. \n",
"\n",
"Loaded analysis file with identifier: sim_002_lc_analysis_2, created on 2023-10-30 17:14:38.836586. \n",
"data_id: 2. Description: Multi-sinusoid NL-LS optimisation results. \n",
"\n",
"Loaded analysis file with identifier: sim_002_lc_analysis_2, created on 2023-10-30 17:14:38.836586. \n",
"data_id: 2. Description: Multi-sinusoid NL-LS optimisation results. \n",
"\n",
"Loaded analysis file with identifier: sim_002_lc_analysis_3, created on 2023-10-30 17:16:13.270897. \n",
"data_id: 2. Description: Harmonic frequencies coupled. \n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-10-30 18:23:41 - INFO - Harmonic analysis done. Total time elapsed: 0.9s.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Loaded analysis file with identifier: sim_002_lc_analysis_4, created on 2023-10-30 17:16:17.110115. \n",
"data_id: 2. Description: Additional non-harmonic extraction. \n",
"\n",
"Loaded analysis file with identifier: sim_002_lc_analysis_5, created on 2023-10-30 17:16:40.121536. \n",
"data_id: 2. Description: Multi-sine NL-LS optimisation results with coupled harmonics. \n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-10-30 18:23:42 - INFO - End of analysis. Total time elapsed: 3.3s.\n"
]
}
],
"source": [
"location = '/STER/mathijsv/star_shadow/data/'\n",
"file = 'sim_002_lc.dat'\n",
"sts.analyse_lc_from_file(location + file, p_orb=0, i_sectors=None, stage='harmonics', method='fitter', data_id='2', \n",
" save_dir=None, overwrite=False, verbose=True)\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "8fdfec26",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Help on function analyse_lc_from_file in module star_shadow.main_functions:\n",
"\n",
"analyse_lc_from_file(file_name, p_orb=0, i_sectors=None, stage='all', method='fitter', data_id='none', save_dir=None, overwrite=False, verbose=False)\n",
" Do all steps of the analysis for a given light curve file\n",
" \n",
" Parameters\n",
" ----------\n",
" file_name: str\n",
" Path to a file containing the light curve data, with\n",
" timestamps, normalised flux, error values as the\n",
" first three columns, respectively.\n",
" p_orb: float\n",
" Orbital period of the eclipsing binary in days\n",
" i_sectors: numpy.ndarray[int]\n",
" Pair(s) of indices indicating the separately handled timespans\n",
" in the piecewise-linear curve. These can indicate the TESS\n",
" observation sectors, but taking half the sectors is recommended.\n",
" If only a single curve is wanted, set\n",
" i_sectors = np.array([[0, len(times)]]).\n",
" stage: str\n",
" Which analysis stages to do: 'all'/'a' for everything\n",
" 'frequencies'/'freq'/'f' for just the iterative prewhitening\n",
" 'harmonics'/'harm'/'h' for up to and including the harmonic coupling only\n",
" 'timings'/'t' for up to and including finding the eclipse timings\n",
" method: str\n",
" Method of EB light curve model optimisation. Can be 'sampler' or 'fitter'.\n",
" Sampler gives extra error estimates on the eclipse parameters\n",
" Fitter is much faster and still accurate\n",
" data_id: int, str\n",
" User defined identification for the dataset used\n",
" save_dir: str\n",
" Path to a directory for saving the results. Also used to load\n",
" previous analysis results.\n",
" overwrite: bool\n",
" If set to True, overwrite old results in the same directory as\n",
" save_dir, or (if False) to continue from the last save-point.\n",
" verbose: bool\n",
" If set to True, this function will print some information\n",
" \n",
" Returns\n",
" -------\n",
" None\n",
" \n",
" Notes\n",
" -----\n",
" If save_dir is not given, results are saved in the same directory as\n",
" the given light curve file (will create a subfolder)\n",
" \n",
" The input text files are expected to have three columns with in order:\n",
" times (bjd), signal (flux), signal_err (flux error)\n",
" And the timestamps should be in ascending order.\n",
" The expected text file format is space separated.\n",
"\n"
]
}
],
"source": [
"help(sts.analyse_lc_from_file)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01de6e98",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 4,
"id": "b0c02e31",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "2855a23b",
"metadata": {},
"outputs": [
{
"ename": "ValueError",
"evalue": "Dataset(s) incompatible with Pandas data types, not table, or no datasets found in HDF5 file.",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/tmp/ipykernel_141129/2602489043.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_hdf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'/STER/mathijsv/star_shadow/data/sim_002_lc_analysis/sim_002_lc_analysis_3.hdf5'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m~/anaconda3/envs/python37/lib/python3.7/site-packages/pandas/io/pytables.py\u001b[0m in \u001b[0;36mread_hdf\u001b[0;34m(path_or_buf, key, mode, errors, where, start, stop, columns, iterator, chunksize, **kwargs)\u001b[0m\n\u001b[1;32m 437\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgroups\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 438\u001b[0m raise ValueError(\n\u001b[0;32m--> 439\u001b[0;31m \u001b[0;34m\"Dataset(s) incompatible with Pandas data types, \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 440\u001b[0m \u001b[0;34m\"not table, or no datasets found in HDF5 file.\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 441\u001b[0m )\n",
"\u001b[0;31mValueError\u001b[0m: Dataset(s) incompatible with Pandas data types, not table, or no datasets found in HDF5 file."
]
}
],
"source": [
"pd.read_hdf('/STER/mathijsv/star_shadow/data/sim_002_lc_analysis/sim_002_lc_analysis_3.hdf5')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8aaeb9b0",
"metadata": {},
"outputs": [],
"source": [
"pd.read_csv('/STER/mathijsv/star_shadow/data/sim_002_lc_analysis/sim_002_lc_analysis_6_ecl_indices.csv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d5dac03e",
"metadata": {},
"outputs": [],
"source": [
"pd.read_hdf('/STER/mathijsv/star_shadow/data/sim_002_lc_analysis/sim_002_lc_analysis_7_dists.hdf5')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "02d6b063",
"metadata": {},
"outputs": [],
"source": [
"pd.read_csv('/STER/mathijsv/star_shadow/data/sim_002_lc_analysis/sim_002_lc_analysis_summary.csv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ed40bc2a",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "b3754e28",
"metadata": {},
"outputs": [],
"source": [
"pd.__version__"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.16"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
4 changes: 4 additions & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
include *.txt
include data/*
include LICENCE.md
include README.md
13 changes: 13 additions & 0 deletions build/lib/star_shadow/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""STAR SHADOW __init__ file

Code written by: Luc IJspeert
"""

from . import main_functions
from .main_functions import *
from . import timeseries_functions as tsf
from . import timeseries_fitting as tsfit
from . import analysis_functions as af
from . import mcmc_functions as mcf
from . import utility as ut
from . import visualisation as vis
Loading