diff --git a/Create_PLEXOS_database/1-parse-matpower/.Rhistory b/Create_PLEXOS_database/1-parse-matpower/.Rhistory deleted file mode 100644 index d8d9691..0000000 --- a/Create_PLEXOS_database/1-parse-matpower/.Rhistory +++ /dev/null @@ -1,512 +0,0 @@ -# other.inputs <- "create_other_inputs.R" # old -other.inputs <- "create_other_inputs_rts2016.R"# new -output.dir <- "outputs" -#------------------------------------------------------------------------------| -#------------------------------------------------------------------------------| -# nesta opf format parser - based on NESTA v0.6.0 -# ----------------------------------------------- -# -# format requirements: -# - one function that creates and returns a matlab struct of -# tables and strings -# - comments that procede tables are two lines. -# first: %% data_description -# second: % col1 col2 col3, etc -# -# - colnames have no spaces -# - function definition has an output, an equal sign, and no arguments -# - fields are either one-liner strings or tables that begin with -# 'strucname.name = [' and end with '];' -# -#------------------------------------------------------------------------------| -#------------------------------------------------------------------------------| -#------------------------------------------------------------------------------| -# helper functions ---- -#------------------------------------------------------------------------------| -func.obj.init <- function(dt) { -# args: data.table with a column called orig.text -# use: finds where function is defined -# result: global environent variables: case.name, struct.name (to be used -# for building list of elemnts), and stuct.list (empty list to be -# populated as the rest of the file is parsed) -# find function definition -funct.decl <- which(grepl("^function.*=", dt$orig.text)) -if (length(funct.decl) == 0) { -stop(paste("It looks like there is either no function declaration", -"in this file or that no function declarations define", -sprintf("an output. Please check your file, %s", file.p))) -} else if (length(funct.decl) > 1) { -stop(paste("It looks like there is more than one function definition", -"in this file. This script cannot handle that at the moment.", -sprintf("Please check your file, %s", file.p))) -} -# initialize global variables to be accessed by other functions -case.name <- dt[funct.decl, tstrsplit(orig.text, "=")[[2]]] -case.name <<- gsub(" ", "", case.name) -struct.name <- dt[funct.decl, tstrsplit(orig.text, "=")[[1]]] -struct.name <<- gsub("function| ", "", struct.name) -struct.list <<- list() -} -func.find.objs <- function(dt, strct.strng) { -# find indices for all fields added to the structure -# assumes has one one-liners and tables. -# one.lines: index will be index of the line where everything is defined -# tables: begin index is line with table name; row names are one up; -# data starts one down -# end index is line after last line of data -# -file.dt[grepl(paste0("^[^%]*",strct.strng, "\\..*="), orig.text), -obj.new := TRUE] -file.dt[grepl("=.*\\[", orig.text) & obj.new == TRUE, tag := "table.beg"] -file.dt[grepl("\\]", orig.text) & is.na(obj.new), tag := "table.end"] -file.dt[obj.new & is.na(tag), tag := "one.line"] -# grab info -file.dt[,index := .I] -field.loc <- file.dt[!is.na(tag),.(tag, index)] -# clean up -file.dt[,c("obj.new", "index", "tag") := NULL] -return(field.loc) -} -func.build.list <- function(dt, obj.locs) { -# from already created tables, pull out elements and add them to a list -# of all objects in the .m file -if (nrow(obj.locs[!(tag %in% c("one.line", "table.beg", "table.end"))])) { -stop(paste("Input obj.locs table has at least one tag that isn't", -"one.line, table.beg, table.end. Don't know how to handle", -"that. Please check the table")) -} -for (i in seq(nrow(obj.locs))) { -# get object name -i.str <- dt[obj.locs[i, index], orig.text] -i.name <- tstrsplit(i.str, "=")[[1]] -i.name <- gsub(paste0(" |", struct.name, "\\."), "", i.name) -# get value, which changes based on type -if (obj.locs[i,tag] == "one.line") { -i.value <- tstrsplit(i.str, "=")[[2]] -i.value <- gsub(" |;|\\'", "", i.value) -struct.list[[i.name]] <- i.value -} else if (obj.locs[i, tag] == "table.beg") { -# get and clean table (collapse spaces into tabs, break into table) -i.table <- dt[obj.locs[i, index+1]:obj.locs[i+1, index-1], -# tstrsplit(gsub(";","",gsub("\\s+", "\t", orig.text)), "\t| ")] -tstrsplit(gsub(";","", orig.text), "\t|\\s+")] -# deal with nans if needed -for (j in names(i.table)) { -set(i.table, which(i.table[[j]] == "nan"), j, NA) -} -# remove first col if is blank -if (all(i.table[[1]] == "" | is.na(i.table[[1]]))) { -i.table[, (1) := NULL] # this is sloppy -} -# get prev line, assuming is colnames -i.colnames <- dt[obj.locs[i, index-1]] -if (!grepl("%", i.colnames)) { -message(paste0("couldn't find column names in line above ", -"table beginning. please check format of table ", -struct.name, ".", i.name, ". In the mean time, ", -"labelling columns with generic names.")) -i.colnames <- paste("V", 1:ncol(i.table), -sep = "", collapse = "\t") -} -# set new column nanes -i.colnames <- unlist(tstrsplit(i.colnames, "\t|\\s+")) -i.colnames <- i.colnames[!grepl("%", i.colnames)] -# treat gencost as a special case -if (i.name == "gencost") { -# set names for first four (non-variable) columns -setnames(i.table, -names(i.table[,.SD, .SDcols = 1:4]), -c("model", "startup", "shutdown", "n")) -# determine if is piecewise linear or polynomial -if (length(unique(i.table$model)) > 1) { -stop("I don't know how to handle multiple gen cost types yet") -} -if (unique(i.table$model) == "1") { -# name using model "1" scheme (n = number of piecewise -# linear segments and subsequent cols are named -# p0, c0,...,p(n-1),c(n-1) -tab.names <- names(i.table) -tab.names <- tab.names[grepl("^V", tab.names)] -if (length(tab.names) != 2 * as.numeric(i.table[,max(n)])) { -stop(paste0("max2 * (n) is not equal to the number of extra ", -"columns in gen.cost. Please verify data is", -"in the right format")) -} -new.names.p <- paste0("p", 0:(length(tab.names)/2 - 1)) -new.names.f <- paste0("f", 0:(length(tab.names)/2 - 1)) -new.names <- c(rbind(new.names.p, new.names.f)) -setnames(i.table, -tab.names, -new.names) -} -if (unique(i.table$model) == "2") { -# name using model "2" scheme (n = number of coefficients -# in polynomial and subsequent cols are named c(n-1) - c0) -tab.names <- names(i.table) -tab.names <- tab.names[grepl("^V", tab.names)] -if (length(tab.names) != i.table[,max(n)]) { -stop(paste0("max(n) is not equal to the number of extra ", -"columns in gen.cost. Please verify data is", -"in the right format")) -} -new.names <- paste0("c", (length(tab.names) - 1):0) -setnames(i.table, -tab.names, -new.names) -} -} else { # any table but gencost -# adjust name setting if there is an error -tryCatch(setnames(i.table, colnames(i.table), i.colnames), -error = function(e) { -if (length(i.colnames) < length(colnames(i.table))) { -message(paste0("not enough new colnames for table ", -struct.name, ".", i.name, -". leaving some cols unnameed. ", -"please check table.")) -setnames(i.table, -colnames(i.table)[seq_along(i.colnames)], -i.colnames) -} else { -message(paste0("too many new colnames for table ", -struct.name, ".", i.name, -". exluding extras. ", -"please check table.")) -setnames(i.table, -colnames(i.table), -i.colnames[seq_along(colnames(i.table))]) -}} -) -} -# now that table is created and renames, add to list -struct.list[[i.name]] <- i.table -} else if (obj.locs[i, tag] == "table.beg") { -next -} -} -return(struct.list) -} -#------------------------------------------------------------------------------| -# read in and parse file ---- -#------------------------------------------------------------------------------| -# read in file -file.dt <- data.table(orig.text = readLines(file.p)) -# get rid of blanks at beginnings of lines and blank lines -file.dt[,orig.text := gsub("^\\s+|^\t", "", orig.text)] -file.dt <- file.dt[orig.text != ""] -# pull out name, object name -# creates vars case.name, struct.name, struct.list -func.obj.init(file.dt) -# prep for real parsing -if (!(exists("case.name")|exists("struct.name")|exists("struct.list"))) { -stop(paste("At least one of case.name, struct.name, and struct.list", -"doesn't exist. Please initialize these by running", -"func.obj.init")) -} -# id where new objects are -field.loc <- func.find.objs(file.dt, struct.name) -# for all objects, grab and put in list -struct.list <- func.build.list(file.dt, field.loc) -#------------------------------------------------------------------------------| -# reformat for use in scripts ---- -#------------------------------------------------------------------------------| -# regions -region.refnode.data <- struct.list$areas[,.(Region = area, -`Region.Reference Node` = refbus)] -# nodes -node.data <- struct.list$bus[,.(Node = bus_i, Voltage = baseKV, Region = area, -Zone = zone)] -# node lpf TODO -node.data <- struct.list$bus[,.(Node = bus_i, Voltage = baseKV, Region = area, -Zone = zone)] -# generators -generator.data <- struct.list$gen[,.(Node = bus, Units = status, -`Max Capacity` = Pmax)] -generator.data[,id := 1:.N, by = Node] -generator.data[,Generator := paste0(Node, "_", id)] -generator.data[,id := NULL] -# change synchronous condenser max capacity from NA to 0 -generator.data[is.na(`Max Capacity`), `Max Capacity` := '0'] -gencost <- struct.list$gencost -# lines -line.data <- struct.list$branch[,.(`Node From` = fbus, `Node To` = tbus, -Resistance = r, Reactance = x, -`Max Flow` = rateA, rateA, rateB, rateC, -Units = status)] -line.data[,id := 1:.N, by = .(`Node To`, `Node From`)] -line.data[,Line := paste0(`Node From`, "_", `Node To`, "_", id)] -line.data[,id := NULL] -line.data[,`Min Flow` := -1 * as.numeric(`Max Flow`)] -setcolorder(line.data,unique(c("Line",names(line.data)))) -all.tabs <- c() -View(generator.data) -View(generator.data) -if (!exists("all.tabs")) all.tabs <- c() -gen.params = fread('../../RTS_Data/gen_params.csv') -gen.id = fread('../../RTS_Data/gen_id.csv') -View(gen.params) -View(gen.id) -node.data[, Zone := as.numeric(Region)] -node.data[, Region := substr(Region, 1, 1)] # just tens digit -# add to all.tabs -all.tabs <- c(all.tabs, "node.data") -#------------------------------------------------------------------------------| -# add load datafile pointers to nodes that have load on them ---- -#------------------------------------------------------------------------------| -# hardcode reigonal filepointers -region.load.da <- data.table(Region = c(1:3), -Load = "timeseries_data_files\\load\\DA_hourly.csv") -region.load.rt <- data.table(Region = c(1:3), -Load = "timeseries_data_files\\load\\RT_5min.csv") -# add to all.tabs -all.tabs <- c(all.tabs, "region.load.da", "region.load.rt") -#------------------------------------------------------------------------------| -# add load participation factor for load nodes ---- -#------------------------------------------------------------------------------| -# can leave node load in MW; psse2plexos will normalize -node.lpf <- struct.list$bus[,.(Node = bus_i, Load = Pd, Status = 1)] -# add to all.tabs -all.tabs <- c(all.tabs, "node.lpf") -gen.fuel <- gen.id[,Generator := paste0(Bus, "_", ID)] -View(gen.fuel) -gen.fuel <- gen.fuel[,.(Generator, Type = Unit)] -View(gen.fuel) -fuels <- gen.params[,.(Type = Unit, Fuel)] -gen.fuel <- merge(gen.fuel, fuels, all.x = TRUE, by='Type') -View(gen.fuel) -gen.fuel <- gen.id[,Generator := paste0(Bus, "_", ID)] -gen.fuel <- gen.fuel[,.(Generator, Type = Unit)] -# fuel type to unit type -fuels <- gen.params[,.(Type = Unit, Fuel)] -# fuel type to generator -gen.fuel <- merge(gen.fuel, fuels, all.x = TRUE, by='Type') -# leave only Generator and Fuel columns -gen.fuel[,Type := NULL] -# add fuel to synchronous condensers -gen.fuel[is.na(Fuel), Fuel := 'SynchCond'] -# get fuel price (2010$/MMBtu) -fuel.price <- fread("../../RTS_Data/fuel_prices.csv") -fuel.price[, Price := round(Price, 3)] -# add to get written out -all.tabs <- c(all.tabs, "gen.fuel", "fuel.price") -#------------------------------------------------------------------------------| -# generator outages by type ---- -#------------------------------------------------------------------------------| -# read gen types -gen.type = gen.id[,Generator := paste0(Bus, "_", ID)] -gen.type = gen.type[,.(Generator, Unit)] -# read generator outage info -gen.outages = gen.params[, .(Unit, `Forced Outage Rate` = Outage, `Mean Time to Repair` = MTTR)] -# combine gens to outage -gen.outages = merge(gen.type, gen.outages, all.x=TRUE, by='Unit') -gen.outages = gen.outages[, .(Generator, `Forced Outage Rate`, `Mean Time to Repair`)] -# set outage for synchronous condensers to 0 -gen.outages[is.na(`Forced Outage Rate`), ":=" (`Forced Outage Rate` = 0, `Mean Time to Repair` = 0)] -# add to get written out -all.tabs <- c(all.tabs, "gen.outages") -#------------------------------------------------------------------------------| -# cost data ---- -#------------------------------------------------------------------------------| -# -gen.cost.data <- cbind(generator.data[,.(Generator)], -struct.list$gencost) -# remove unneeded cols -model <- gen.cost.data[1,model] -gen.cost.data[,c("model", "startup", "shutdown", "n") := NULL] -if (model == 1) { -# assumes heat rates are in $/hr and calculated using a dummy fuel price -# of $1/MMBtu -# converts from $/hr to MMBtu/MWh by diving by load point -# ($/hr * 1 MMBtu/$1 * 1 hr/[loadpoint MW * hr = MWh]) -# reset names to be load point and heat rate for plexos -names.to.convert <- names(gen.cost.data) -names.to.convert <- names.to.convert[names.to.convert != "Generator"] -names.converted <- gsub("p", "Load Point", names.to.convert) -names.converted <- gsub("f", "Heat Rate", names.converted) -setnames(gen.cost.data, names.to.convert, names.converted) -# names are almost plexos names, but have band id numbers at end -# now, melt down, get band id, convert to real MMBtu/MWh -# there is probably a fancier/more efficient way to do this -gen.cost.data <- melt(gen.cost.data, id.vars = "Generator", -variable.factor = FALSE, value.factor = FALSE) -gen.cost.data[, Band := as.numeric(substr(variable, -nchar(variable), -nchar(variable))) + 1] -gen.cost.data[,variable := substr(variable, 1, nchar(variable) - 1)] -# recast to get all Load Point and Heat Rate back as column names -gen.cost.data[,value := as.numeric(value)] -gen.cost.data <- data.table(dcast(gen.cost.data, Generator+Band~variable)) -gen.cost.data = merge(gen.cost.data, -merge(gen.fuel, -fuel.price,by='Fuel')[,.(Generator,Price)], -by='Generator') -# heat rate is in $/hr, assuming fuel is $1/MMBtu. Convert to MMBtu/MWh -gen.cost.data[, `Heat Rate` := `Heat Rate`/(`Load Point`*`Price`)][,Price:=NULL] -# TODO HMMM SOMETHING IS WRONG WITH THESE HEAT RATE NUMBERS -} else if (model == 2) { -# polynomial function coefficients -# reset names to be plexos properties -setnames(gen.cost.data, "c0", "Heat Rate Base") -# reset numbering of c to index from 1 -names.to.convert <- names(gen.cost.data) -names.converted <- names.to.convert[grepl("^c", names.to.convert)] -names.converted <- gsub("c", "", names.converted) -names.converted <- as.character(as.numeric(names.converted) + 1) -names.converted <- paste("Heat Rate Incr", names.converted) -setnames(gen.cost.data, -names.to.convert, -names.converted) -} else { -stop("heat rate model is not 1 or 2. not sure how to treat this data.") -} -# Round values to 1 decimal place -gen.cost.data[, c('Heat Rate', 'Load Point') := list(round(`Heat Rate`, 1), round(`Load Point`, 1))] -all.tabs <- c(all.tabs, "gen.cost.data") -#------------------------------------------------------------------------------| -# start costs ---- -#------------------------------------------------------------------------------| -# requires that fuel.price and gen.fuel tables have been read in (see section -# "fuels and fuel price") -gen.startshut <- cbind(generator.data[,.(Generator)], -struct.list$gencost[,.(startup, shutdown)]) -# start and shutdown costs are given in MMBtu. convert this to $ by -# adding gen fuel type and fuel prices, then multiplying by fuel price -gen.startshut <- merge(gen.startshut, gen.fuel, by = "Generator", all.x = TRUE) -gen.startshut <- merge(gen.startshut, fuel.price, by = "Fuel", all.x = TRUE) -gen.startshut[, Price := as.numeric(Price)] -# Use Oil/Steam price to calculate coal gen start and shutdown costs -gen.startshut[Fuel == "Coal/Steam", Price := fuel.price[Fuel == "Oil/Steam", Price]] -# calculate and add start and shutdown costs based on input heat and fuel price -gen.startshut[, `Start Cost` := Price * as.numeric(startup)] -gen.startshut[, `Shutdown Cost` := Price * as.numeric(shutdown)] -# save only plexos property columns -gen.startshut <- gen.startshut[,.(Generator, `Start Cost`, `Shutdown Cost`)] -# round values to integers -gen.startshut[, c('Start Cost', 'Shutdown Cost') := list(round(`Start Cost`), round(`Shutdown Cost`))] -all.tabs <- c(all.tabs, "gen.startshut") -#------------------------------------------------------------------------------| -# attach VG ---- -#------------------------------------------------------------------------------| -# two things: 1. pass through filepointerts to VG rating files (these files -# are created manually based on site selection) 2. add a new generator for all -# the VG gens -# assumes generator.data has already been made -# assumes gen.fuel is already made -# read in profiles to read out again (inefficient but at least treated the -# same as all other property files...) -gen.da.vg <- fread("../../RTS_Data/vg_gens_DA.csv") -gen.rt.vg <- fread("../../RTS_Data/vg_gens_RT.csv") -gen.csp <- fread("../../RTS_Data/csp_gens.csv") -storage.csp <- fread("../../RTS_Data/storage.csp.csv") -storage.props <- fread("../../RTS_Data/storage.props.csv") -storage.props.rt <- fread("../../RTS_Data/storage.props.rt.csv") -# get vg max cap and add to total generator.table -vg.gens <- fread("../../RTS_Data/vg_gens_maxMW.csv", colClasses = "character") -# add node -vg.gens[,Node := tstrsplit(Generator, "_")[[1]]] -vg.gens[, `Min Stable Level` := "0"] -generator.data <- merge(generator.data, -vg.gens, -by = c("Generator", "Max Capacity", "Node"), -all = TRUE) -# add units since don't have this in mpc file -generator.data[, Units := "1"] -# get rid of some PV/Wind units -disappear.units <- c("101_pv","101_pv_2","101_pv_3","101_pv_4","102_pv","103_pv","104_pv", -"119_pv","310_pv","310_pv_2","312_pv","314_pv","314_pv_2","314_pv_3", -"314_pv_4","319_pv","324_pv","324_pv_2","324_pv_3","118_rtpv","118_rtpv_8", -"308_rtpv","313_rtpv_11","320_rtpv","320_rtpv_2","320_rtpv_3","320_rtpv_4", -"320_rtpv_5","320_rtpv_6","314_pv_5","303_wind","317_wind", "212_csp") -generator.data[Generator %in% disappear.units, Units:="0"] -# add fuel types to these gens -vg.gen.fuel <- vg.gens[,.(Generator)] -vg.gen.fuel[grepl("_pv", Generator), Fuel := "PV"] -vg.gen.fuel[grepl("_csp", Generator), Fuel := "CSP"] -vg.gen.fuel[grepl("_rtpv", Generator), Fuel := "RTPV"] -vg.gen.fuel[grepl("_wind", Generator), Fuel := "Wind"] -gen.fuel <- rbind(gen.fuel, vg.gen.fuel) -# add these to all.tabs to be written out at the end -all.tabs <- c(all.tabs, "gen.da.vg", "gen.rt.vg", "gen.csp", "storage.csp", "storage.props", "storage.props.rt") -#------------------------------------------------------------------------------| -# min gen ---- -#------------------------------------------------------------------------------| -# have to match min gen to individual unit, because these are by size and fuel -gen.mingen <- merge(gen.id[, .(Generator, Unit, PG)], -gen.params[,.(Unit, MinGen)], -by = "Unit", -all.x = TRUE) -# keep only relevant columns -gen.mingen <- gen.mingen[,.(Generator, `Min Stable Level` = MinGen)] -gen.mingen.rtpv <- gen.fuel[Fuel=='RTPV'] -gen.mingen.rtpv[, Fuel := NULL] -gen.mingen.rtpv0 <- gen.mingen.rtpv -gen.mingen.rtpv0[, `Min Stable Level` := 0] -gen.mingen.rtpv = merge(gen.mingen.rtpv, generator.data[,.(Generator, `Max Capacity`)], by='Generator') -gen.mingen.rtpv[, `Min Stable Level` := as.numeric(`Max Capacity`)*0.6 ] -gen.mingen.rtpv[, `Max Capacity` := NULL] -gen.mingen.rtpv[, `Min Stable Level` := round(`Min Stable Level`)] -gen.mingen = rbind(gen.mingen, gen.mingen.rtpv) -# add to all.tabs to be written out -all.tabs <- c(all.tabs, "gen.mingen", "gen.mingen.rtpv0") -#------------------------------------------------------------------------------| -# attach hydro ---- -#------------------------------------------------------------------------------| -# just like vg profiles, just read in to write out (to keep in same format as -# everything else and also in case we need to do something with it eventually) -gen.hydro <- fread("../../RTS_Data/hydro_profiles.csv") -# add to all.tabs -all.tabs <- c(all.tabs, "gen.hydro") -#------------------------------------------------------------------------------| -eligible.gens <- c("Oil/Steam","Coal/Steam","Oil/CT","NG/CC","NG/CT","PV","Wind", "CSP") -# add reserve(s) which is x% of load in each region and what gens can provide it -l.reserve <- c("Spin Up") -l.is.enabled <- c(-1) -l.reserve.type <- c(1) -l.reserve.percent <- c(3.0) -l.scenario.name <- c("Add Spin Up") -l.reserve.violation <- c(4000.0) -l.reserve.timeframe.sec <- c(600.0) -l.mutually.exclusive <- c(1) -# add reserve(s) in which risk is defined with data file -d.reserve <- c("Flex Up","Flex Down","Reg Up","Reg Down") -d.is.enabled <- c(-1,-1,-1,-1) -d.reserve.type <- c(1,2,1,2) -d.scenario.name <- c("Add Flex Reserves","Add Flex Reserves","Add Regulation Reserves","Add Regulation Reserves") -d.reserve.violation <- c(4100,4100,3900,3900) -d.reserve.timeframe.sec <- c(1200.0,1200.0,300.0,300.0) -d.mutually.exclusive <- c(1,1,1,1) -reserve.data <- data.table('Reserve' = c(l.reserve,d.reserve), -'Is Enabled' = c(l.is.enabled,d.is.enabled), -'Type' = c(l.reserve.type,d.reserve.type), -'Scenario' = c(l.scenario.name,d.scenario.name), -'Timeframe' = c(l.reserve.timeframe.sec,d.reserve.timeframe.sec), -'VoRS' = c(l.reserve.violation,d.reserve.violation), -'Mutually Exclusive' = c(l.mutually.exclusive,d.mutually.exclusive)) -View(reserve.data) -reserve.generators <- gen.fuel[Fuel %in% eligible.gens,] -View(reserve.generators) -reserve.generators <- reserve.generators[,.(Reserve = c(rep(l.reserve,length(Generator)*length(l.reserve)),rep(d.reserve,each = length(Generator))), -Generator = c(rep(Generator,times = length(l.reserve)+length(d.reserve))))] -View(reserve.generators) -reserve.regions <- region.refnode.data[] -View(reserve.regions) -reserve.regions <- reserve.regions[,.(Reserve = l.reserve,Region,`Load Risk` = l.reserve.percent)] -View(reserve.regions) -reserve.generators <- gen.fuel[Fuel %in% eligible.gens,] -reserve.generators <- reserve.generators[,.(Reserve = c(rep(l.reserve,length(Generator)*length(l.reserve)),rep(d.reserve,each = length(Generator))), -Generator = c(rep(Generator,times = length(l.reserve)+length(d.reserve))))] -reserve.generators <- gen.fuel[Fuel %in% eligible.gens,] -View(reserve.generators) -reserve.generators <- reserve.generators[,.(Reserve = c(rep(l.reserve,length(Generator)*length(l.reserve)),rep(d.reserve,each = length(Generator))), -Generator = c(rep(Generator,times = length(l.reserve)+length(d.reserve))))] -View(reserve.generators) -l.reserve -d.reserve -reserve.generators <- gen.fuel[Fuel %in% eligible.gens,] -View(reserve.generators) -reserve.generators.noCSP <- reserve.generators[!Fuel == 'CSP',] -reserve.generators <- reserve.generators[,.(Reserve = c(rep(l.reserve,length(Generator)*length(l.reserve)),rep(d.reserve,each = length(Generator))), -Generator = c(rep(Generator,times = length(l.reserve)+length(d.reserve))))] -reserve.generators.noCSP <- reserve.generators.noCSP[,.(Reserve = c(rep(l.reserve,length(Generator)*length(l.reserve)),rep(d.reserve,each = length(Generator))), -Generator = c(rep(Generator,times = length(l.reserve)+length(d.reserve))))] -View(reserve.generators) -View(reserve.generators.noCSP) diff --git a/Create_PLEXOS_database/2-create-excel/.Rhistory b/Create_PLEXOS_database/2-create-excel/.Rhistory deleted file mode 100644 index b4f2c5a..0000000 --- a/Create_PLEXOS_database/2-create-excel/.Rhistory +++ /dev/null @@ -1,70 +0,0 @@ -sessionInfo() -as.posixct("2024-01-01 00:01:00") -as.POSIXct("2024-01-01 00:01:00") -seq(as.POSIXct("2024-01-01 00:01:00"),as.POSIXct("2025-01-01 00:00:00"),by = '5 minute') -seq(as.POSIXct("2024-01-01 00:01:00"),as.POSIXct("2025-01-01 00:00:00"),by = 'hour') -seq(as.POSIXct("2024-01-01 01:00:00"),as.POSIXct("2025-01-01 00:00:00"),by = 'hour') -seq(as.POSIXct("2024-01-01 01:00:00"),as.POSIXct("2025-01-01 00:00:00"),by = 5) -seq(as.POSIXct("2024-01-01 01:00:00"),as.POSIXct("2025-01-01 00:00:00"),by = 5*60) -x - seq(as.POSIXct("2024-01-01 01:00:00"),as.POSIXct("2025-01-01 00:00:00"),by = 5*60) -x = seq(as.POSIXct("2024-01-01 01:00:00"),as.POSIXct("2025-01-01 00:00:00"),by = 5*60) -x -hour(x) -as.hour(x) -pacman::p_load(lubridate) -hour(x) -ts = data.table(Year = year(x),Hour = hour(x),Minute=minute(x),value=0) -pacman::p_load(lubridate,data.table) -ts = data.table(Year = year(x),Hour = hour(x),Minute=minute(x),value=0) -ts -x = seq(as.POSIXct("2024-01-01 01:00:00"),as.POSIXct("2024-12-31 23:55:00"),by = 5*60) -ts = data.table(Year = year(x),Hour = hour(x),Minute=minute(x),value=0) -ts -write.csv(ts,file='Documents/repos/RTS-GMLC/RTS_Data/timeseries_data_files/hydro/region1_Hydro_RT.csv') -write.csv(ts,file='Documents/repos/RTS-GMLC/RTS_Data/timeseries_data_files/hydro/region2_Hydro_RT.csv') -write.csv(ts,file='Documents/repos/RTS-GMLC/RTS_Data/timeseries_data_files/hydro/region3_Hydro_RT.csv') -write.csv(ts,file='Documents/repos/RTS-GMLC/RTS_Data/timeseries_data_files/hydro/region3_Hydro_RT.csv',row.names = F) -write.csv(ts,file='Documents/repos/RTS-GMLC/RTS_Data/timeseries_data_files/hydro/region3_Hydro_RT.csv',row.names = F,quote = F) -ts = data.table(Year = year(x),Hour = hour(x),Minute=minute(x),value=0) -ts = data.table(Year = year(x),Month = month(x),Day=day(x),Period = (hour(x)-1)*12 + minute(x),value=0) -ts -ts = data.table(Year = year(x),Month = month(x),Day=day(x),Period = (hour(x)-1)*12 + minute(x)/5,value=0) -ts -5*12*12 -5*12*24 -max(ts$Period) -ts = data.table(Year = year(x),Month = month(x),Day=day(x),Period = (hour(x))*12 + minute(x)/5,value=0) -ts -ts = data.table(Year = year(x),Month = month(x),Day=day(x),Period = ((hour(x)-1)*12 + minute(x))/5,value=0) -ts -mintue(x)-1 -minute(x)-1 -minute(x) -minute(x)/5 -minute(x)/5+1 -(minute(x)/5+1)*(hour(x)) -(minute(x)/5+1) -(minute(x)/5+1)+(12*hour(x)) -12*24 -ts = data.table(Year = year(x),Month = month(x),Day=day(x),Period = (minute(x)/5+1)+(12*hour(x)),value=0) -ts -ts = data.table(Year = year(x),Month = month(x),Day=day(x),Period = (minute(x)/5+1)+(12*(hour(x)-1)),value=0) -ts -24*12 -View(ts) -x = seq(as.POSIXct("2024-01-01 00:00:00"),as.POSIXct("2024-12-31 23:55:00"),by = 5*60) -ts = data.table(Year = year(x),Month = month(x),Day=day(x),Period = (minute(x)/5+1)+(12*(hour(x))),value=0) -View(ts) -write.csv(ts,file='Documents/repos/RTS-GMLC/RTS_Data/timeseries_data_files/hydro/region3_Hydro_RT.csv',row.names = F,quote = F) -write.csv(ts,file='Documents/repos/RTS-GMLC/RTS_Data/timeseries_data_files/hydro/region2_Hydro_RT.csv',row.names = F,quote = F) -write.csv(ts,file='Documents/repos/RTS-GMLC/RTS_Data/timeseries_data_files/hydro/region1_Hydro_RT.csv',row.names = F,quote = F) -setwd('Documents/repos/RTS-GMLC/') -setwd('Create_PLEXOS_database/') -source('create_RTS_spreadsheet.R') -source('1-parse-matpower/parse_matpower.R') -setwd('../2-create-excel') -source('run_PSSE2PLEXOS.R') -prop.table() -Properties.sheet -Properties.sheet[property=='Heat Rate'] -Properties.sheet[property=='Heat Rate'] diff --git a/RTS_Data/timeseries_data_files/load/.Rhistory b/RTS_Data/timeseries_data_files/load/.Rhistory deleted file mode 100644 index 8708cdc..0000000 --- a/RTS_Data/timeseries_data_files/load/.Rhistory +++ /dev/null @@ -1,512 +0,0 @@ -library(rplexos) -db = plexos_open('//plexossql/data/moconnel/CA Flexibility Paper/Mark_runs/html') -props = query_property(db) -View(props) -prices = query_interval(db, 'Region', 'Price') -View(prices) -region.prices = dcast(prices, time~name, value.var = 'value') -library(dplyr) -region.prices = dcast(prices, time~name, value.var = 'value') -library(reshape2) -region.prices = dcast(prices, time~name, value.var = 'value') -View(region.prices) -count(region.prices$AESO==0) -count(region.prices$AESO=0) -region.prices$AESO=0 -region.prices$AVA==0 -sum(region.prices$AVA==0) -region.prices = dcast(prices, time~name, value.var = 'value') -sum(region.prices[,2:44]==0) -zero.prices.hours[1:43] = 0 -zero.prices.hours = data.frame(1:43) -View(zero.prices.hours) -zero.price.hours = data.frame(1:43) -zero.price.hours = 0 -for ( i in 1:ncol(region.prices)) { -zero.price.hours[i] = sum(region.prices[,i+1]==0) -} -zero.price.hours = data.frame(1:43=0) -zero.price.hours = list() -rm(zero.price.hours) -rm(zero.prices.hours) -zero.price.hours = list() -for ( i in 1:ncol(region.prices)) { -zero.price.hours[i] = sum(region.prices[,i+1]==0) -} -ncol(region.prices) -zero.price.hours = list() -for ( i in 1:ncol(region.prices)-2) { -zero.price.hours[i] = sum(region.prices[,i+1]==0) -} -zero.price.hours = data.frame(matrix(1,43)) -View(zero.price.hours) -for ( i in 1:ncol(region.prices)-2) { -zero.price.hours[i] = sum(region.prices[,i+1]==0) -} -ncol(region.prices)-2 -zero.price.hours = data.frame(matrix(1,43)) -for ( i in 1:ncol(region.prices)-1) { -zero.price.hours[i] = sum(region.prices[,i+1]==0) -} -zero.price.hours = data.frame(matrix(1,43)=0) -zero.price.hours = data.frame(matrix(1,43)) -View(zero.price.hours) -zero.price.hours[3] -for ( i in 1:ncol(region.prices)-1) { -zero.price.hours[i,1] = sum(region.prices[,i+1]==0) -} -View(zero.price.hours) -write.csv(region.prices, 'region_prices.csv', row.names = FALSE) -getwd() -View(props) -dc = read.csv('//plexossql/data/moconnel/RTS-2016 Stuff/Davis_PLEXOSformat.csv') -View(dc) -replace = read.csv('//plexossql/data/moconnel/RTS-2016 Stuff/HydroProfiles/DevilCanyon_2024_RTS2016_Hydro.csv') -View(replace) -new = rbind(dc[1:3], replace[2]) -new = replace[2] -new2 = new -rm(new2) -rm(new) -str(replace) -replace['Year'] = year(replace[1]) -library(lubridate) -replace['Year'] = year(replace[1]) -replace[1] = as.POSIXct(replace[1]) -replace[1] = as.character(replace[1]) -replace = read.csv('//plexossql/data/moconnel/RTS-2016 Stuff/HydroProfiles/DevilCanyon_2024_RTS2016_Hydro.csv') -new = replace -?strptime() -new[1] = strptime(new[1], '%Y-%m-%d %H:%M') -new[1] = strptime(as.character(new[1]), '%Y-%m-%d %H:%M') -test = strptime(as.character(new[1]), '%Y-%m-%d %H:%M') -test -as.character(new[1]) -test = as.character(new[1]) -View(new) -View(replace) -new= replace -test = as.character(new[1]) -test -View(new) -test = strptime(as.character(new[1]), '%Y-%m-%d %H:%M') -test -new -dates = strptime(as.character(new[,1]), '%Y-%m-%d %H:%M') -dates = strptime(as.character(new[,1]), '%Y-%m-%d %H:%M:%S') -View(dc) -View(new) -new[1,1] -str(new) -replace = read.csv('//plexossql/data/moconnel/RTS-2016 Stuff/HydroProfiles/DevilCanyon_2024_RTS2016_Hydro.csv', stringsAsFactors = FALSE) -str(replace) -new = replace -str(new) -new[1] = strptime(new[1], "%Y-%d-%m %H:%M") -new[1] = strptime(new[1], "%Y-%d-%m %H:%M", tz='') -new[1] = strptime(new[1], "%m/%d/%Y %H:%M") -as.character(new[1,1]) -new=replace -as.character(new[1,1]) -new[1] = strptime(new[1], "%m/%d/%Y %H:%M") -new = replace -date = strptime(new[,1], "%m/%d/%Y %H:%M") -date -new[,1] = strptime(new[,1], "%m/%d/%Y %H:%M") -new=replace -new[1] = date -View(new) -new=replace -date = as.date.frame(date) -date = as.data.frame(date) -new[1] = date -View(new) -new['Year'] = year(new[1]) -new['Year'] = year(as.POSIXct(new[1])) -View(new) -str(new) -year(date) -str(date) -?year -year(date[1]) -day(new[1,1]) -new['Year'] = year(new[,1]) -new['Month'] = month(new[,1]) -new['Day'] = day(new[,1]) -?select -library(reshape2) -?select -library(dplyr) -?select -new = select(new, Year, Month, Day, Value) -View(date) -new = rbind(new, date) -new = cbind(new, date) -new['Hour'] = hour(date) -new['Hour'] = hour(date[,1]) -new = select(new, Year, Month, Day, Hour, Value) -formatted = dcast(new, Year+Month+Day~Hour, value.var = 'Value') -View(dc) -View(formatted) -new['Hour'==0] = 24 -new['Hour'==0] -new[,'Hour'==0] -new[,'Hour'] -new[,'Hour']==0 -new[,new$Hour==0]=24 -new[,new$Hour==0]=24 -new[,new$Hour==0] -new[,'Hour'][new$Hour==0] -new[,'Hour'][new$Hour==0]=24 -formatted = dcast(new, Year+Month+Day~Hour, value.var = 'Value') -colnames(formatted) -formatted$NA = NULL -formatted['NA'] = NULL -formatted[367] = NULL -formatted[367,] = NULL -formatted = formatted[1:366,] -View(formatted) -getwd() -write.csv(formatted, 'DC.csv', row.names=FALSE) -View(replace) -sum(replace[2]) -View(replace) -sum(replace[2]) -new = replace -str(new) -new[,1] = strptime(new[,1], '%m/%d/%Y %H:%M') -new = replace -date = strptime(new[,1], '%m/%d/%Y %H:%M') -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -View(date) -new['Year'] = year(date) -new['Year'] = year(date[,1]) -new['Month'] = month(date[,1]) -new['Day'] = day(date[,1]) -new['Hour'] = hour(date[,1]) -new = select(new, Year, Month, Day, Hour, Value) -new = dcast(new, Year+Month+Day~Hour, value.var='Value') -new = replace -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -new['Year'] = year(date[,1]) -new['Month'] = month(date[,1]) -new['Day'] = day(date[,1]) -new['Hour'] = hour(date[,1]) -new = select(new, Year, Month, Day, Hour, Value) -new[,'Hour'][new$Hour==0]=24 -sum(new$Value) -new = dcast(new, Year+Month+Day~Hour, value.var='Value') -sum(new[,4:27]) -sum(new) -new = replace -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -new['Year'] = year(date[,1]) -new['Month'] = month(date[,1]) -new['Day'] = day(date[,1]) -new['Hour'] = hour(date[,1]) -new = select(new, Year, Month, Day, Hour, Value) -new[,'Hour'][new$Hour==0]=24 -is.na(new) -sum(is.na(new)) -new = replace -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -new['Year'] = year(date[,1]) -new['Month'] = month(date[,1]) -new['Day'] = day(date[,1]) -new['Hour'] = hour(date[,1]) -new = select(new, Year, Month, Day, Hour, Value) -new = replace -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -new['Year'] = year(date[,1]) -new['Month'] = month(date[,1]) -new['Day'] = day(date[,1]) -new['Hour'] = hour(date[,1]) -new = replace -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -new[1658,] -new['Year'] = year(date[,1]) -new['Year'] = year(date[,1]) -new[1658,] -new = replace -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -new['Year'] = year(date[,1]) -new['Month'] = month(date[,1]) -new['Day'] = day(date[,1]) -new['Hour'] = hour(date[,1]) -new[1658,] -new[1658,'Year']=2024 -new[1658,] -new[1658,'Year']=2024 -new[1657,'Year']=2024 -new[1657,] -new[1659,] -new = replace -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -new['Year'] = year(date[,1]) -new['Month'] = month(date[,1]) -new['Day'] = day(date[,1]) -new['Hour'] = hour(date[,1]) -new[1658,'Year']=2024 -new[1658,'Month']=3 -new[1658,'Day']=10 -new[1658,'Hour']=2 -new = select(new, Year, Month, Day, Hour, Value) -new[,'Hour'][new$Hour==0]=24 -new = dcast(new, Year+Month+Day~Hour, value.var='Value') -new = replace -date = as.data.frame(strptime(new[,1], '%m/%d/%Y %H:%M')) -new['Year'] = year(date[,1]) -new['Month'] = month(date[,1]) -new['Day'] = day(date[,1]) -new['Hour'] = hour(date[,1]) -new[1658,'Year']=2024 -new[1658,'Month']=3 -new[1658,'Day']=10 -new[1658,'Hour']=2 -new = select(new, Year, Month, Day, Hour, Value) -new[,'Hour'][new$Hour==0]=24 -hear(new) -head(new) -sum(new$Value) -new = dcast(new, Year+Month+Day~Hour, value.var='Value') -write.csv(new, 'DC.csv', row.names=FALSE) -sum(replace[2]) -mingen = read.csv('C:/users/moconnel/documents/RTS-2016/mingen.csv') -mingen = read.csv('C:\users\moconnel\documents\RTS-2016\mingen.csv') -mingen = read.csv('C:/users/moconnel/documents/RTS-2016/gen_updates/mingen.csv') -head(mingen) -library(dplyr) -?reshape -?dcast -library(reshape2) -?dcast -avg_mingen = dcast(mingen, Type~Percent.of.Max.Cap, value.var = 'Percent.of.Max.Cap', fun.aggregate=sum) -View(avg_mingen) -avg_mingen = dcast(mingen, Type~Percent.of.Max.Cap, fun.aggregate=sum) -avg_mingen = dcast(mingen, Type~., value.var='Percent.of.Max.Cap', fun.aggregate=sum) -View(avg_mingen) -avg_mingen = dcast(mingen, Type~., value.var='Percent.of.Max.Cap', fun.aggregate=mean) -View(avg_mingen) -View(mingen) -sum_mingen = dcast(mingen, Type~., value.var='MaxCap', fun.aggregate=sum) -View(sum_mingen) -View(mingen) -View(sum_mingen) -mingen = join(mingen, sum_mingen, by="Type") -library(plyr) -?join -mingen = join(mingen, sum_mingen, by="Type") -View(mingen) -View(sum_mingen) -View(mingen) -mingen$PercentTotalCap = mingen$MaxCap/mingen$. -mingen$WeightedMaxCapPercent = mingen$Min.Stable / mingen$MaxCap * mingen$PercentTotalCap -weighted_mingen = dcast(mingen, Type~., variable.var="WeightedMaxCapPercent", fun.aggregate=sum) -weighted_mingen = dcast(mingen, Type~., value.var="WeightedMaxCapPercent", fun.aggregate=sum) -View(weighted_mingen) -region_lpf = read.csv('region_lpf.csv') -library(data.table) -library(dplyr) -library(reshape2) -setwd('//nrelqnap01d/PLEXOS/Projects/IM3') -region_lpf = read.csv('region_lpf.csv') -head(region_lpf) -region_lpf_spring = filter(region_lpf, Time=='Spring') -region_lpf_spring = filter(region_lpf, Time=='2024_Spring') -region_lpf_autumn = filter(region_lpf, Time=='2024_Autumn') -head(region_lpf) -region_lpf_winter = filter(region_lpf, Time=='2024_Winter') -region_lpf_summer = filter(region_lpf, Time=='2024_Summer') -View(region_lpf_autumn) -View(region_lpf_spring) -View(region_lpf_summer) -View(region_lpf_winter) -View(region_lpf_autumn) -node_zone = read.csv('node_zone.csv') -View(node_zone) -View(node_zone) -View(region_lpf_autumn) -region_lpf_spring = join(region_lpf_spring, node_zone, by='Node') -library(plyr) -region_lpf_spring = join(region_lpf_spring, node_zone, by='Node') -View(region_lpf_spring) -region_lpf = read.csv('region_lpf.csv') -region_lpf_spring = filter(region_lpf, Time=='2024_Spring') -region_lpf_autumn = filter(region_lpf, Time=='2024_Autumn') -region_lpf_winter = filter(region_lpf, Time=='2024_Winter') -region_lpf_summer = filter(region_lpf, Time=='2024_Summer') -node_zone = read.csv('node_zone.csv') -region_lpf_spring = join(region_lpf_spring, node_zone, by='Node') -region_lpf_autumn = join(region_lpf_autumn, node_zone, by='Node') -region_lpf_winter = join(region_lpf_winter, node_zone, by='Node') -region_lpf_summer = join(region_lpf_summer, node_zone, by='Node') -View(region_lpf_autumn) -region_lpf = read.csv('region_lpf.csv') -region_lpf = data.table(region_lpf) -region_lpf_spring = filter(region_lpf, Time=='2024_Spring') -region_lpf_autumn = filter(region_lpf, Time=='2024_Autumn') -region_lpf_winter = filter(region_lpf, Time=='2024_Winter') -region_lpf_summer = filter(region_lpf, Time=='2024_Summer') -node_zone = read.csv('node_zone.csv') -node_zone = data.table(node_zone) -region_lpf_spring = join(region_lpf_spring, node_zone, by='Node') -region_lpf_autumn = join(region_lpf_autumn, node_zone, by='Node') -region_lpf_winter = join(region_lpf_winter, node_zone, by='Node') -region_lpf_summer = join(region_lpf_summer, node_zone, by='Node') -region_sum = region_lpf_spring[,.(LPF_sum=sum(LPF)), by=.(Region)] -View(region_sum) -sum(region_sum$LPF_sum -) -region_sum_spring = region_lpf_spring[,.(LPF_sum=sum(LPF)), by=.(Region)] -region_sum_summer = region_lpf_summer[,.(LPF_sum=sum(LPF)), by=.(Region)] -region_sum_autumn = region_lpf_autumn[,.(LPF_sum=sum(LPF)), by=.(Region)] -region_sum_winter = region_lpf_winter[,.(LPF_sum=sum(LPF)), by=.(Region)] -sum(region_sum_spring$LPF_sum) -sum(region_sum_summer$LPF_sum) -sum(region_sum_winter$LPF_sum) -sum(region_sum_autumn$LPF_sum) -View(node_zone) -region_sum_spring = region_sum_spring[,.(region_sum=sum(LPF)), by=.(Region)] -region_sum_summer = region_sum_summer[,.(region_sum=sum(LPF)), by=.(Region)] -region_sum_autumn = region_sum_autumn[,.(region_sum=sum(LPF)), by=.(Region)] -region_sum_winter = region_sum_winter[,.(region_sum=sum(LPF)), by=.(Region)] -View(region_lpf_spring) -region_sum_spring[,.(region_sum=sum(LPF)), by=.(Region)] -View(region_sum_spring) -str(region_sum_spring) -rm(region_sum) -county_sum_spring = region_lpf_spring[,.(LPF_sum=sum(LPF)), by=.(County, State, Country)] -View(county_sum_spring) -unique(region_lpf_autumn[,c('County', 'State', 'Country')]) -View(region_lpf_autumn) -write.csv(region_lpf_autumn, 'test.csv', row.names=FALSE) -View(county_sum_spring) -lpf = read.csv('region_lpf.csv') -lpf = data.table(lpf) -lpf_spring = filter(lpf, Time=='2024_Spring') -lpf_autumn = filter(lpf, Time=='2024_Autumn') -lpf_winter = filter(lpf, Time=='2024_Winter') -lpf_summer = filter(lpf, Time=='2024_Summer') -node_zone = read.csv('node_zone.csv') -node_zone = data.table(node_zone) -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_autumn = join(lpf_autumn, node_zone, by='Node') -lpf_winter = join(lpf_winter, node_zone, by='Node') -lpf_summer = join(lpf_summer, node_zone, by='Node') -View(lpf_autumn) -View(lpf_autumn) -lpf_sum_spring = lpf_spring[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -View(lpf_sum_spring) -county_sum_spring = lpf_spring[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -lpf = read.csv('region_lpf.csv') -lpf = data.table(lpf) -lpf_spring = filter(lpf, Time=='2024_Spring') -lpf_autumn = filter(lpf, Time=='2024_Autumn') -lpf_winter = filter(lpf, Time=='2024_Winter') -lpf_summer = filter(lpf, Time=='2024_Summer') -node_zone = read.csv('node_zone.csv') -node_zone = data.table(node_zone) -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_autumn = join(lpf_autumn, node_zone, by='Node') -lpf_winter = join(lpf_winter, node_zone, by='Node') -lpf_summer = join(lpf_summer, node_zone, by='Node') -region_sum_spring = lpf_spring[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -region_sum_summer = lpf_summer[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -region_sum_autumn = lpf_autumn[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -region_sum_winter = lpf_winter[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -county_sum_spring = lpf_spring[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -county_sum_summer = lpf_summer[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -county_sum_autumn = lpf_autumn[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -county_sum_winter = lpf_winter[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -View(county_sum_autumn) -View(region_sum_autumn) -lpf = read.csv('region_lpf.csv') -lpf = data.table(lpf) -lpf_spring = filter(lpf, Time=='2024_Spring') -lpf_autumn = filter(lpf, Time=='2024_Autumn') -lpf_winter = filter(lpf, Time=='2024_Winter') -lpf_summer = filter(lpf, Time=='2024_Summer') -node_zone = read.csv('node_zone.csv') -node_zone = data.table(node_zone) -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_autumn = join(lpf_autumn, node_zone, by='Node') -lpf_winter = join(lpf_winter, node_zone, by='Node') -lpf_summer = join(lpf_summer, node_zone, by='Node') -region_sum_spring = lpf_spring[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -region_sum_summer = lpf_summer[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -region_sum_autumn = lpf_autumn[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -region_sum_winter = lpf_winter[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -county_sum_spring = lpf_spring[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -county_sum_summer = lpf_summer[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -county_sum_autumn = lpf_autumn[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -county_sum_winter = lpf_winter[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -View(county_sum_autumn) -lpf_spring = filter(lpf, Time=='2024_Spring') -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_spring = lpf_spring[,.(LPF_region_sum=sum(LPF)), by=.(Region)] -lpf_spring = lpf_spring[,.(LPF_county_sum=sum(LPF)), by=.(County, State, Country)] -lpf_spring = filter(lpf, Time=='2024_Spring') -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_spring = lpf_spring[,.(LPF_region_sum:=sum(LPF)), by=.(Region)] -lpf_spring = lpf_spring[, LPF_region_sum := sum(LPF), by=.(Region)] -View(region_sum_spring) -lpf_spring = filter(lpf, Time=='2024_Spring') -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_spring_test = lpf_spring[,LPF_region_sum := sum(LPF), by=Region] -View(lpf_spring_test) -View(region_sum_autumn) -View(lpf_spring_test) -View(region_sum_autumn) -lpf_spring_test = lpf_spring[,LPF_region_sum = sum(LPF), by=Region] -lpf_spring_test = lpf_spring[, .(LPF_region_sum = sum(LPF)), by=Region] -View(lpf_spring_test) -lpf_spring_test = join(lpf_spring, lpf_spring[, .(LPF_region_sum = sum(LPF)), by=Region], by='Node') -lpf_spring = filter(lpf, Time=='2024_Spring') -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_spring_test = join(lpf_spring, lpf_spring[, .(LPF_region_sum = sum(LPF)), by=Region], by='Node') -lpf_spring[, .(LPF_region_sum = sum(LPF)), by=Region] -View(lpf_spring) -View(lpf_spring) -lpf_spring_test = join(lpf_spring, lpf_spring[, .(LPF_region_sum = sum(LPF)), by=Region], by='Region') -View(lpf_spring_test) -lpf_spring[, .(LPF_county_sum = sum(LPF)), by=.(County, State, Country)] -lpf_spring[, .(LPF_county_sum = sum(LPF)), by=.(County, State, Country)] -View(lpf_spring) -lpf_spring = join(lpf_spring, lpf_spring[, .(LPF_county_sum = sum(LPF)), by=.(County, State, Country)], by=c('County', 'State', 'Country')) -View(lpf_spring) -f -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_spring = filter(lpf, Time=='2024_Spring') -lpf_spring = join(lpf_spring, node_zone, by='Node') -lpf_spring = join(lpf_spring, lpf_spring[, .(LPF_region_sum = sum(LPF)), by=Region], by='Region') -lpf_spring = join(lpf_spring, lpf_spring[, .(LPF_county_sum = sum(LPF)), by=.(County, State, Country)], by=c('County', 'State', 'Country')) -View(lpf_spring) -View(county_sum_autumn) -library(data.table) -setwd(dirname(sys.frame(1)$ofile)) -setwd('C:/users/moconnel/documents/RTS-2016') -setwd('RTS2016_test_plexos_database/') -setwd('data_files/load') -da.reg1 <- fread("Melted_APS_2020.csv") -da.reg1[,c("V1", "Hour", "Minutes") := NULL] -View(da.reg1) -da.reg1[,Period := 1:24, by = .(Year, Month, Day)] -View(da.reg1) -setnames(da.reg1, "Load", "1") -View(da.reg1) -da.reg2 <- fread("Melted_NEVP_2020.csv") -da.reg3 <- fread("Melted_LDWP_2020.csv") -da.reg2[,c("V1", "Hour", "Minutes") := NULL] -da.reg2[,Period := 1:24, by = .(Year, Month, Day)] -setnames(da.reg2, "Load", "2") -da.reg3[,c("V1", "Hour", "Minutes") := NULL] -da.reg3[,Period := 1:24, by = .(Year, Month, Day)] -setnames(da.reg3, "Load", "3") -# DA: combine -da.load <- Reduce(function(...) merge(..., all = TRUE), -list(da.reg1, da.reg2, da.reg3)) -View(da.reg1) -View(da.reg2) -View(da.reg3) -da.load <- Reduce(function(...) merge(..., all = TRUE), -list(da.reg1, da.reg2, da.reg3))