Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for Galileo #8

Open
wants to merge 20 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@
Icon?
__pycache__
*.csv
desktop.ini
desktop.ini
.venv/
.vscode/
40 changes: 27 additions & 13 deletions column_casting.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import sqlalchemy


# Default database datatype for all non-derived data is text
# This file specifies columns to cast and what the python and database datatypes should be is before importing to database

Expand All @@ -11,12 +10,12 @@
# But that first step is currently disabled, thought to be unnecesary, so conversion goes:
# str -> db_dataype in dfs_to_db()
# DateTime, boolean, and string data values are handled correctly automatically
# Conversion to db_dataype disregards the sentence_type, so if column db_datatypes are different, column names must be uniquw
# Conversion to db_dataype disregards the sentence_type, so if column db_datatypes are different, column names must be unique

# Database datatypes reference: https://www.tutorialspoint.com/postgresql/postgresql_data_types.htm


datatype_dict = {} # Must contain (key, value) pair for all destination datatypes
# Must contain (key, value) pair for all destination datatypes
datatype_dict = {}
datatype_dict['Int16'] = sqlalchemy.types.SmallInteger()
datatype_dict['Int32'] = sqlalchemy.types.Integer()
datatype_dict['float32'] = sqlalchemy.types.Float(precision=6)
Expand All @@ -25,7 +24,15 @@

# This db_datatypes dictionary is completed in dfs_to_db()
db_datatypes = {}
db_datatypes['cycle_id'] = sqlalchemy.types.Integer()
db_datatypes['unique_id'] = 'varchar(32)'
db_datatypes['cycle_id'] = 'integer'
db_datatypes['datetime'] = 'timestamp'
db_datatypes['datetime_is_interpolated'] = 'boolean'
db_datatypes['sentence_is_merged_from_multiple'] = 'boolean'
db_datatypes['talker'] = 'varchar(2)'
db_datatypes['sentence_type'] = 'varchar(4)'
db_datatypes['latitude'] = 'decimal'
db_datatypes['longitude'] = 'decimal'


columns_to_cast = {}
Expand All @@ -46,19 +53,22 @@
'sv_prn_num_13', 'elevation_deg_13', 'azimuth_13', 'snr_13',
'sv_prn_num_14', 'elevation_deg_14', 'azimuth_14', 'snr_14',
'sv_prn_num_15', 'elevation_deg_15', 'azimuth_15', 'snr_15',
'sv_prn_num_16', 'elevation_deg_16', 'azimuth_16', 'snr_16',]
'sv_prn_num_16', 'elevation_deg_16', 'azimuth_16', 'snr_16']

columns_to_cast['RMC', 'Int32'] = ['datestamp']
columns_to_cast['RMC', 'float32'] = ['timestamp', 'lat', 'lon', 'spd_over_grnd', 'true_course', 'mag_variation']
columns_to_cast['RMC', 'text'] = ['status', 'lat_dir', 'lon_dir', 'mode', 'mag_var_dir']
columns_to_cast['RMC', 'text'] = ['status', 'lat_dir', 'lon_dir', 'mode', 'mag_var_dir', 'nav_status', 'mode_indicator']
# For RMC, added 'nav_status' and 'mode_indicator'

columns_to_cast['GGA', 'float32'] = ['timestamp', 'lat', 'lon', 'horizontal_dil', 'altitude', 'geo_sep']
columns_to_cast['GGA', 'float32'] = ['timestamp', 'lat', 'lon', 'horizontal_dil', 'altitude', 'geo_sep', 'age_gps_data', 'ref_station_id']
columns_to_cast['GGA', 'Int16'] = ['gps_qual', 'num_sats']
columns_to_cast['GGA', 'text'] = ['lat_dir', 'altitude_units', 'geo_sep_units']
# TODO: For GGA, unsure about 'age_gps_data' and 'ref_station_id'
columns_to_cast['GGA', 'text'] = ['lat_dir', 'lon_dir', 'altitude_units', 'geo_sep_units']
# For GGA, unsure about 'age_gps_data' and 'ref_station_id', can be 'Int32' or 'float32'
# For GGA, added 'lon_dir'

columns_to_cast['GLL', 'float32'] = ['lat', 'lon']
columns_to_cast['GLL', 'float32'] = ['timestamp', 'lat', 'lon']
columns_to_cast['GLL', 'text'] = ['lat_dir', 'lon_dir', 'status', 'faa_mode']
# For GLL, added 'timestamp'

columns_to_cast['VTG', 'float32'] = ['true_track', 'mag_track', 'spd_over_grnd_kts', 'spd_over_grnd_kmph']
columns_to_cast['VTG', 'text'] = ['true_track_sym', 'mag_track_sym', 'spd_over_grnd_kts_sym', 'spd_over_grnd_kmph_sym', 'faa_mode']
Expand All @@ -68,8 +78,12 @@
'gp_sv_id09', 'gp_sv_id10', 'gp_sv_id11', 'gp_sv_id12',
'gl_sv_id01', 'gl_sv_id02', 'gl_sv_id03', 'gl_sv_id04',
'gl_sv_id05', 'gl_sv_id06', 'gl_sv_id07', 'gl_sv_id08',
'gl_sv_id09', 'gl_sv_id10', 'gl_sv_id11', 'gl_sv_id12',]
'gl_sv_id09', 'gl_sv_id10', 'gl_sv_id11', 'gl_sv_id12',
'ga_sv_id01', 'ga_sv_id02', 'ga_sv_id03', 'ga_sv_id04',
'ga_sv_id05', 'ga_sv_id06', 'ga_sv_id07', 'ga_sv_id08',
'ga_sv_id09', 'ga_sv_id10', 'ga_sv_id11', 'ga_sv_id12']
columns_to_cast['GSA', 'float32'] = ['pdop', 'hdop', 'vdop']
columns_to_cast['GSA', 'text'] = ['mode']
# For GSA, added 'ga_sv_id01' .. 'ga_sv_id12'

columns_to_cast['GST', 'float32'] = ['rms', 'std_dev_latitude', 'std_dev_longitude', 'std_dev_altitude']
columns_to_cast['GST', 'float32'] = ['rms', 'std_dev_latitude', 'std_dev_longitude', 'std_dev_altitude']
4 changes: 2 additions & 2 deletions db_creds.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
DB_USER = "postgres"
DB_PASSWORD = "postgres"
DB_PASSWORD = "postgres"
DB_HOST = "localhost"
DB_PORT = "5432"
DB_NAME = "nmea_data"
DB_NAME = "nmea_data"
24 changes: 12 additions & 12 deletions db_data_import.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
IF_EXISTS_OPT = 'append' # 'fail', 'replace', or 'append', see https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html


import os
import sys
import sqlalchemy #import create_engine

import pandas as pd
import psycopg2
import sqlalchemy # import create_engine
import sqlalchemy.exc

# Local modules/libary files:
import db_creds

# 'fail', 'replace', or 'append', see https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html
IF_EXISTS_OPT = 'append'

def send_data_to_db(log_file_path, dfs, table_name_base, table_name_suffixes=None, dtypes=None):

log_file_name = os.path.basename(log_file_path)

def send_data_to_db(dfs: list[pd.DataFrame], table_name_base: str, table_name_suffixes=None, dtypes=None):
db_access_str = f'postgresql://{db_creds.DB_USER}:{db_creds.DB_PASSWORD}@{db_creds.DB_HOST}:{db_creds.DB_PORT}/{db_creds.DB_NAME}'
engine = sqlalchemy.create_engine(db_access_str)

Expand All @@ -30,12 +29,13 @@ def send_data_to_db(log_file_path, dfs, table_name_base, table_name_suffixes=Non

try:
df.to_sql(table_name, engine, method='multi', if_exists=if_exists_opt_loc, index=False, dtype=dtypes)
except (sqlalchemy.exc.OperationalError, psycopg2.OperationalError) as e:
sys.exit(f"\n\n\033[1m\033[91mERROR writing to database:\n {e}\033[0m\n\nExiting.\n\n") # Print error text bold and red
except (sqlalchemy.exc.OperationalError, psycopg2.OperationalError) as ex:
# Print error text bold and red
sys.exit(f"\n\n\033[1m\033[91mERROR writing to database:\n {ex}\033[0m\n\nExiting.\n\n")

table_names.append(table_name)

return table_names

# TODO: Create separate table for log file IDs and names. Check what the current larged ID is, then append a column to
# the dfs with that ID + 1, and a row to the log file table with that ID and the log file name, or something like that
# the dfs with that ID + 1, and a row to the log file table with that ID and the log file name, or something like that
34 changes: 23 additions & 11 deletions db_table_lists.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,23 @@
nmea_tables = [
'nmea_gl_gsv',
'nmea_gn_gga',
'nmea_gn_gll',
'nmea_gn_gns',
'nmea_gn_gsa',
'nmea_gn_gst',
'nmea_gn_rmc',
'nmea_gn_vtg',
'nmea_gp_gsv',
]
NMEA_TABLES = [
# GPS
'nmea_gp_gga',
'nmea_gp_gsa',
'nmea_gp_gsv',
'nmea_gp_rmc',
'nmea_gp_vtg',
# GLONAS
'nmea_gl_gsv',
'nmea_gn_gga',
'nmea_gn_gll',
'nmea_gn_gns',
'nmea_gn_gsa',
'nmea_gn_gst',
'nmea_gn_rmc',
'nmea_gn_vtg',
# Galileo
'nmea_ga_gga',
'nmea_ga_gsa',
'nmea_ga_gsv',
'neam_ga_rmc',
'nmea_ga_vtg',
]
89 changes: 43 additions & 46 deletions db_utils.py
Original file line number Diff line number Diff line change
@@ -1,87 +1,84 @@
# TODO: Overriding the print function isn't a good way to handle this, replace with a custom library that does this
import functools
import sys

import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import sqlalchemy
import sqlalchemy.exc

# Local modules/libary files:
import db_creds
import db_table_lists

# TODO: Overriding the print function isn't a good way to handle this, replace with a custom library that does this
import functools
print = functools.partial(print, flush=True) # Prevent print statements from buffering till end of execution
# Prevent print statements from buffering till end of execution
print = functools.partial(print, flush=True)


def drop_db_tables(tables_to_drop: list[str], verbose=False):
[psql_con, psql_cursor] = setup_db_connection()

def drop_db_tables(tables_to_drop, verbose=False):

[psqlCon, psqlCursor] = setup_db_connection()

# Drop tables
tableList = ""
for idx, tableName in enumerate(tables_to_drop):
tableList = tableList + tableName
if idx < len(tables_to_drop)-1: # Don't append comma after last table name
tableList = tableList + ", "
table_list = ""
for idx, table_name in enumerate(tables_to_drop):
table_list = table_list + table_name
# Don't append a comma after last table name
if idx < len(tables_to_drop) - 1:
table_list = table_list + ", "
if verbose:
print(f"Dropping database table {tableName} (and any dependent objects) if it exists.")
print(f"Dropping database table {table_name} (and any dependent objects) if it exists.")

dropTableStmt = f"DROP TABLE IF EXISTS \"{tableName}\" CASCADE;" # Quotes arouund table names are required for case sensitivity
psqlCursor.execute(dropTableStmt);
# Quotes arouund table names are required for case sensitivity
drop_table_stmt = f"DROP TABLE IF EXISTS \"{table_name}\" CASCADE;"
psql_cursor.execute(drop_table_stmt)

free_db_connection(psqlCon, psqlCursor)
free_db_connection(psql_con, psql_cursor)


def create_table(table_name, columns=None):

db_command = f"""
CREATE TABLE IF NOT EXISTS "{table_name}" (
"""
def create_table(table_name: str, columns=None):
db_command = f"CREATE TABLE IF NOT EXISTS \"{table_name}\" ("

if columns:
for idx, column in enumerate(columns):
db_command = db_command + '"' + column['name'] + '" ' + column['datatype']
if idx < len(columns)-1: # Don't append a comman after the last column declaration
db_command = db_command + ','
# Don't append a comma after the last column declaration
if idx < len(columns) - 1:
db_command = db_command + ", "

db_command = db_command + ')'
db_command = db_command + ");"

run_db_command(db_command)


def run_db_command(db_command):

[psqlCon, psqlCursor] = setup_db_connection()
def run_db_command(db_command: str):
[psql_con, psql_cursor] = setup_db_connection()

# Run command on database
psqlCursor.execute(db_command);
psql_cursor.execute(db_command)

# print(psqlCon.notices)
# print(psqlCon.notifies)
# print(psql_con.notices)
# print(psql_con.notifies)

free_db_connection(psqlCon, psqlCursor)
free_db_connection(psql_con, psql_cursor)


def setup_db_connection():

db_access_str = f'postgresql://{db_creds.DB_USER}:{db_creds.DB_PASSWORD}@{db_creds.DB_HOST}:{db_creds.DB_PORT}/{db_creds.DB_NAME}'
db_access_str = f"postgresql://{db_creds.DB_USER}:{db_creds.DB_PASSWORD}@{db_creds.DB_HOST}:{db_creds.DB_PORT}/{db_creds.DB_NAME}"

# Start a PostgreSQL database session
try:
psqlCon = psycopg2.connect(db_access_str);
except (sqlalchemy.exc.OperationalError, psycopg2.OperationalError) as e:
sys.exit(f"\n\033[1m\033[91mERROR connecting to database:\n {e}\033[0m\n\nExiting.\n\n") # Print error text bold and red

psqlCon.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT);
psql_con = psycopg2.connect(db_access_str)
except (sqlalchemy.exc.OperationalError, psycopg2.OperationalError) as ex:
# Print error text bold and red
sys.exit(f"\n\033[1m\033[91mERROR connecting to database:\n {ex}\033[0m\n\nExiting.\n\n")

# Open a database cursor
psqlCursor = psqlCon.cursor();
psql_con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)

return [psqlCon, psqlCursor]
# Open a database cursor
psql_cursor = psql_con.cursor()

return [psql_con, psql_cursor]

def free_db_connection(psqlCon, psqlCursor):

def free_db_connection(psql_con, psql_cursor):
# Free the resources
psqlCursor.close();
psqlCon.close();
psql_cursor.close()
psql_con.close()
49 changes: 49 additions & 0 deletions fix_nmea_log.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/bin/bash

usage() {
echo "Usage: $0 <input_file> [output_file]"
}

CYCLE_END="GAGSA"

# check if args are given
[[ $# > 0 ]] && file="$1" || { usage; exit 0; }
# check args
[[ "${file}" == "-h" || "${file}" == "--help" ]] && { usage; exit 0; }
# check if first arg is a readable file
[[ ! -r "${file}" ]] && { echo "Error: File ${file} not found!" 2>&1; usage; exit 1; }
# check if second arg exists, else construct output filename based on input filename
[[ -n "$2" ]] && output="$2" || output="${file%.*}_fixed.${file##*.}"

# list of sentences in the cycle
declare -a cycle
RMC=""

while read line; do
# skip empty lines
[[ ! -n "${line}" ]] && continue

# get sentence type
cs="${line%%,*}"
cs="${cs#*$}"

if [[ "${cs}" == "GPRMC" ]]; then
# this sentence type should be placed at the start of the cycle
RMC="${line}"
elif [[ "${cs}" == "${CYCLE_END}" ]]; then
# write sentences to output file in corrected order
echo "${RMC}" >> "${output}"
for item in "${cycle[@]}"; do
echo "${item}" >> "${output}"
done
echo "${line}" >> "${output}"

# clear list
cycle=()
else
# add sentence to list
cycle=( "${cycle[@]}" "${line}")
fi
done < "${file}"

echo "Output writen to ${output}"
Loading