diff --git a/.gitignore b/.gitignore index b362b37..807ea25 100644 --- a/.gitignore +++ b/.gitignore @@ -1,20 +1,3 @@ -<<<<<<< HEAD -<<<<<<< HEAD -======= -.DS_Store ->>>>>>> a6f59f203de28cf40b2ba12cb8e33b68b6052274 .Rproj.user .Rhistory .RData -======= -.Rhistory -.RData -*.swp -*.csv -*.png -*.rst -*.rdb -*.rdx -*.o -*.dll ->>>>>>> 600f742dfa647ab4bb65c5f3b29bd6222ae6a4ae diff --git a/README.md b/README.md index c448e8d..7f86d08 100644 --- a/README.md +++ b/README.md @@ -2,4 +2,3 @@ teamcode ======== The github repository for TEAM. This will be the high level repository to share code amongst the TEAM team. These should be functions or scripts that you may feel others will need to use at some point. Make sure these are adequately commented. Functions for comments should describe the arguments (in and return) as well as the key objectives. ->>>>>>> a6f59f203de28cf40b2ba12cb8e33b68b6052274 diff --git a/cameratrapping/TV_editor_events.r b/cameratrapping/TV_editor_events.r new file mode 100644 index 0000000..f2ba0be --- /dev/null +++ b/cameratrapping/TV_editor_events.r @@ -0,0 +1,85 @@ +# TV_editor_events.r +# + +rm(list = ls()) +require(lubridate) +require(ggplot2) +library(RPostgreSQL) # Need to explore RJDBC to connect with vertica + +# SQL to update tv_photo table with events_all column. This colun will store +# all the events. event_id is what is used by the Taxonomic Editor app and can +# be assigned to experts. + +# Load R Object created directly from DB or replace by query to database. +load("ct_data2014-04-02.gzip") +# Small Dataset +#animals = cam_trap_data[which(cam_trap_data$Photo.Type == 'Animal' & cam_trap_data$Site.Name == 'Volcán Barva'),] + + +# Order data by: Smapling Period, Sampling unit name and Photo Taken Time +## Temp code to to generate data for one site +order_data <- f.order.data(animals) # Small dataset for testing +#order_data <- f.order.data(cam_trap_data) +# Seperate into events +#CAUTION: this function removes records that are NOT images (e.g. Sampling Date records) +data1<-f.separate.events(order_data,5) #the entire grp column is what makes it unique +# Save data1 so don't need to rerun separate events. +#save(data1,file="data1.gzip",compress="gzip") +# Create small subset for viewing +view_data <- data.frame(data1$Site.Name,data1$Sampling.Period,data1$Sampling.Unit.Name,data1$Photo.Taken.Time,data1$grp) + +drv <- dbDriver("PostgreSQL") +con <- dbConnect(drv,user="teamuser",password="", + dbname="team_2.0_devel",port="5444", + host="data.team.sdsc.edu") + +# Will need to explore whether we want to include already reviewed images/events. + +for (i in 1:nrow(data1)) { + update_sql <- paste("UPDATE tv_photo SET events_all=",shQuote(data1$grp[i])," where id=",data1$ID[i]) + a <-dbSendQuery(con,update_sql) + i +} + +# Start from loading ct_groups or run from the beginning +#load("ct_groups") +# Create a new column with Family, Genus, Species +data1$sp_all <- paste(data1$Family,data1$Genus,data1$Species) +# Crate a new column to store the random sample of events that will be used in +# taxonomic editor +data1[,'event'] <- NA +sites <- unique(data1$Site.Name) +# Create a dataframe to store the tv_photo.ID and the event_id's to be used in the taxonomic app. +final_event_df<- data.frame(final_event=character(0)) +for (i in 1:length(unique(data1$Site.Name))) { + site_index <- which(data1$Site.Name == sites[i]) + length(site_index) + temp_sp_unique <- unique(paste(data1$Family[site_index],data1$Genus[site_index],data1$Species[site_index])) + for (j in 1: length(temp_sp_unique)){ + #Data by TEAM Site by species + site_data <- data1[site_index,] + sp_by_site <- site_data[which(temp_sp_unique[j] == site_data$sp_all),] + # Get events, determine if there are 50 or more and insert into database + num_events = length(unique(sp_by_site$grp)) + # Get a list of unique events to select from if more than 50 or assign + # to the editor if less than 50 + unique_events = unique(sp_by_site$grp) + if (num_events > 50) { + # Get a random sample w/o replacement + event_index <- sample(1:num_events,50 , replace=F) + final_event <- unique_events[event_index] + } else { + final_event <- unique(sp_by_site$grp) + } + final_event_df <-rbind(final_event_df,data.frame(final_event=final_event)) + } +} +# SQL to insert the final events into the tv_photo table. This will add the event +# to the event_id as selected in the final_event_df dataframe. +for (i in 1:nrow(final_event_df)) { + update_sql <- paste("UPDATE tv_photo SET event_id=", + shQuote(final_event_df$final_event[i])," where events_all=", + shQuote(final_event_df$final_event[i])) + a <-dbSendQuery(con,update_sql) +} +Sys.time() diff --git a/cameratrapping/ct_groups b/cameratrapping/ct_groups new file mode 100644 index 0000000..1a5b1b1 Binary files /dev/null and b/cameratrapping/ct_groups differ diff --git a/climate/climate_quality_control.R b/climate/climate_quality_control.R new file mode 100644 index 0000000..5493981 --- /dev/null +++ b/climate/climate_quality_control.R @@ -0,0 +1,130 @@ +rm(list = ls()) +load("cl_data_new2014-04-24.gzip") + + +# Add columns to store Pass/Fails from Tests +cl_data_all$StepRH1 <-NA +cl_data_all$StepRH2 <-NA +cl_data_all$StepRH3 <-NA +cl_data_all$StepSR1 <-NA +cl_data_all$StepSR2 <-NA +cl_data_all$StepT1 <-NA +cl_data_all$StepT2 <-NA +cl_data_all$StepT3 <-NA +cl_data_all$StepT4 <-NA +cl_data_all$StepT5 <-NA +cl_data_all$Sensor1_RelativeHumidity <-as.numeric(cl_data_all$Sensor1_RelativeHumidity) +cl_data_all$Sensor2_RelativeHumidity <-as.numeric(cl_data_all$Sensor2_RelativeHumidity) +cl_data_all$Sensor3_RelativeHumidity <-as.numeric(cl_data_all$Sensor3_RelativeHumidity) +cl_data_all$Sensor1_AvgTemp <-as.numeric(cl_data_all$Sensor1_AvgTemp) +cl_data_all$Sensor2_AvgTemp <-as.numeric(cl_data_all$Sensor2_AvgTemp) +cl_data_all$Sensor3_AvgTemp <-as.numeric(cl_data_all$Sensor3_AvgTemp) +cl_data_all$Sensor1_TotalSolarRadiation <-as.numeric(cl_data_all$Sensor1_TotalSolarRadiation) +cl_data_all$Sensor2_TotalSolarRadiation <-as.numeric(cl_data_all$Sensor2_TotalSolarRadiation) + + + + + + +# Get a unique list of sites and loop through them +unique_sites <- unique(cl_data_all$TEAMSiteName) +for (i in 1:1) { #length(unqiue(cl_data_all$TEAMSiteName)) { + site_data = cl_data_all[which(cl_data_all$TEAMSiteName == unique_sites[i]),] + unique_stations <- unique(paste(site_data$SamplingUnitName)) + # get a unique list of stations..handle sites that have more than one station + for (j in 1:1) { #lengh(unique_stations)) { + # if more than 1 do something + station_data = site_data + # Perform tests on a particular climate station data. + for (k in 1:nrow(station_data)) { + # Find records 30 minutes,1 hour, 2hour, 3hour, 6hour and 12 hours ahead. + new_date = station_data$Observation[k] + 60*30 + new_date_hour = station_data$Observation[k] + 60*60 + new_date_2hour = station_data$Observation[k] + 2*60*60 + new_date_3hour = station_data$Observation[k] + 3*60*60 + new_date_6hour = station_data$Observation[k] + 6*60*60 + new_date_12hour = station_data$Observation[k] + 12*60*60 + # Create thirty min index and check to make sure it exists.. + thirty_min_index <-which(station_data$Observation == new_date) + if (length(thirty_min_index) == 0) { + thirty_min_index <- 'a' + } + sixty_min_index <- which(station_data$Observation == new_date_hour) + if (length(sixty_min_index) == 0) { + sixty_min_index <- 'a' + } + two_hour_index <- which(station_data$Observation == new_date_2hour) + if (length(two_hour_index) == 0) { + two_hour_index <- 'a' + } + three_hour_index <- which(station_data$Observation == new_date_3hour) + if (length(three_hour_index) == 0) { + three_hour_index <- 'a' + } + six_hour_index <- which(station_data$Observation == new_date_6hour) + if (length(six_hour_index) == 0) { + six_hour_index <- 'a' + } + twelve_hour_index <- which(station_data$Observation == new_date_12hour) + if (length(twelve_hour_index) == 0) { + twelve_hour_index <- 'a' + } + + + # Sensor 1 + # RH - a value of 1 is equal to fail. + if(is.numeric(station_data$Sensor1_RelativeHumidity[k]) & is.numeric(station_data$Sensor1_RelativeHumidity[thirty_min_index])) { + station_data$StepRH1[k] <- ifelse (station_data$Sensor1_RelativeHumidity[thirty_min_index] - + station_data$Sensor1_RelativeHumidity[k] > 45,1,0) + } + # Solar radiation + # What are our units? + if(is.numeric(station_data$Sensor1_TotalSolarRadiation[k]) & is.numeric(station_data$Sensor1_TotalSolarRadiation[sixty_min_index])) { + station_data$StepSR1[k] <- ifelse (station_data$Sensor1_TotalSolarRadiation[sixty_min_index] - + station_data$Sensor1_TotalSolarRadiation[k] >= 0 & station_data$Sensor1_TotalSolarRadiation[sixty_min_index] - + station_data$Sensor1_TotalSolarRadiation[k] < 555,1,0) + } + # Temperature + # 1 hour + if(is.numeric(station_data$Sensor1_AvgTemp[k]) & is.numeric(station_data$Sensor1_AvgTemp[sixty_min_index])) { + station_data$StepT1[k] <- ifelse (station_data$Sensor1_AvgTemp[sixty_min_index] - + station_data$Sensor1_AvgTemp[k] >= 4,1,0) + } + # 2hour + if(is.numeric(station_data$Sensor1_AvgTemp[k]) & is.numeric(station_data$Sensor1_AvgTemp[two_hour_index])) { + station_data$StepT2[k] <- ifelse (station_data$Sensor1_AvgTemp[two_hour_index] - + station_data$Sensor1_AvgTemp[k] >= 7,1,0) + } + # 3hour + if(is.numeric(station_data$Sensor1_AvgTemp[k]) & is.numeric(station_data$Sensor1_AvgTemp[three_hour_index])) { + station_data$StepT3[k] <- ifelse (station_data$Sensor1_AvgTemp[three_hour_index] - + station_data$Sensor1_AvgTemp[k] >= 9,1,0) + } + #6 hour + if(is.numeric(station_data$Sensor1_AvgTemp[k]) & is.numeric(station_data$Sensor1_AvgTemp[six_hour_index])) { + station_data$StepT4[k] <- ifelse (station_data$Sensor1_AvgTemp[six_hour_index] - + station_data$Sensor1_AvgTemp[k] >= 15,1,0) + } + #12 hour + if(is.numeric(station_data$Sensor1_AvgTemp[k]) & is.numeric(station_data$Sensor1_AvgTemp[twelve_hour_index])) { + station_data$StepT5[k] <- ifelse (station_data$Sensor1_AvgTemp[twelve_hour_index] - + station_data$Sensor1_AvgTemp[k] >= 25,1,0) + } + ############### + # Sensor 2 + # RH + if(is.numeric(station_data$Sensor2_RelativeHumidity[k]) & is.numeric(station_data$Sensor2_RelativeHumidity[thirty_min_index])) { + station_data$StepRH2[k] <- ifelse (station_data$Sensor2_RelativeHumidity[thirty_min_index] - + station_data$Sensor2_RelativeHumidity[k] > 45,1,0) + } + # Sensor 3 + # RH + if(is.numeric(station_data$Sensor3_RelativeHumidity[k]) & is.numeric(station_data$Sensor3_RelativeHumidity[thirty_min_index])) { + station_data$StepRH3[k] <- ifelse (station_data$Sensor3_RelativeHumidity[thirty_min_index] - + station_data$Sensor3_RelativeHumidity[k] > 45,1,0) + } + } + + } +} diff --git a/climate/new_climate_dataset.R b/climate/new_climate_dataset.R new file mode 100644 index 0000000..8564880 --- /dev/null +++ b/climate/new_climate_dataset.R @@ -0,0 +1,68 @@ +rm(list = ls()) +library(RPostgreSQL) # Need to explore RJDBC to connect with vertica +library(data.table) +drv <- dbDriver("PostgreSQL") +con <- dbConnect(drv,user="teamuser",password="", + dbname="team_2.0_production",port="5444", + host="data.team.sdsc.edu") + +cl_3_data <- dbSendQuery(con,"SELECT * FROM dqa_climate3") +data_3 <- fetch(cl_3_data, n = -1) +#data_3["protocol"] <- 3 # Mark records as climate protocol 3 + +#** Get Clamate 2.0 data from climate_samples table +cl_2_data <- dbSendQuery(con,"SELECT * FROM dqa_climate2_analysis") +data_2 <- fetch(cl_2_data, n = -1) +# Get data_2 to match the structure of data_3 +# Create new columns +data_2$collected_at <- paste(data_2$collected_at,data_2$collected_time) +data_2$Observation <-data_2$collected_at +setnames(data_2,"Id","ReferenceID") +setnames(data_2,"TotalSolarRadiation","Sensor1_TotalSolarRadiation") +setnames(data_2,"Precipitation","Rainfall") +setnames(data_2,"DryTemperature","Sensor1_AvgTemp") +setnames(data_2,"RelativeHumidity","Sensor1_RelativeHumidity") +setnames(data_2,"Site Name","TEAMSiteName") +data_2["RecordID"]<- NA +data_2["MinimumBatteryVoltage"]<- NA +data_2["Sensor1_TempStdDeviation"]<- NA +data_2["Sensor2_AvgTemp"]<- NA +data_2["Sensor2_TempStdDeviation"]<- NA +data_2["Sensor2_RelativeHumidity"]<- NA +data_2["Sensor3_AvgTemp"]<- NA +data_2["Sensor3_TempStdDeviation"]<- NA +data_2["Sensor3_RelativeHumidity"]<- NA +data_2["Sensor1_AvgSolarRadiation"]<- NA +data_2["Sensor1_SolarRadiationStdDeviation"]<- NA +data_2["Sensor2_AvgSolarRadiation"]<- NA +data_2["Sensor2_SolarRadiationStdDeviation"]<- NA +data_2["Sensor2_TotalSolarRadiation"]<- NA +data_2["SerialNumber"]<- NA +data_2["ProgramName"]<- NA +data_2["OperatingSystem"]<- NA +data_2["Tachometer_RPM"]<- NA +# Get rid of these two coolumns +data_2$ObservationDate <- NULL +data_2$ObservationTime <- NULL +data_2_new <-data.frame(cbind(data_2$ReferenceID, data_2$Observation, data_2$RecordID, data_2$MinimumBatteryVoltage, + data_2$Sensor1_AvgTemp, data_2$Sensor1_TempStdDeviation, data_2$Sensor1_RelativeHumidity, + data_2$Sensor2_AvgTemp, data_2$Sensor2_TempStdDeviation, data_2$Sensor2_RelativeHumidity, + data_2$Sensor3_AvgTemp, data_2$Sensor3_TempStdDeviation, data_2$Sensor3_RelativeHumidity, + data_2$Sensor1_AvgSolarRadiation, data_2$Sensor1_SolarRadiationStdDeviation,data_2$Sensor1_TotalSolarRadiation, + data_2$Sensor2_AvgSolarRadiation, data_2$Sensor2_SolarRadiationStdDeviation,data_2$Sensor2_TotalSolarRadiation, + data_2$Rainfall,data_2$SerialNumber,data_2$ProgramName,data_2$OperatingSystem, + data_2$Tachometer_RPM, data_2$ProtocolVersion, data_2$SamplingUnitName, data_2$Latitude, + data_2$Longitude, data_2$TEAMSiteName)) +colnames(data_2_new)<-c('ReferenceID','Observation','RecordID','MinimumBatteryVoltage','Sensor1_AvgTemp','Sensor1_TempStdDeviation', + 'Sensor1_RelativeHumidity','Sensor2_AvgTemp','Sensor2_TempStdDeviation','Sensor2_RelativeHumidity', + 'Sensor3_AvgTemp','Sensor3_TempStdDeviation','Sensor3_RelativeHumidity','Sensor1_AvgSolarRadiation', + 'Sensor1_SolarRadiationStdDeviation','Sensor1_TotalSolarRadiation','Sensor2_AvgSolarRadiation','Sensor2_SolarRadiationStdDeviation', + 'Sensor2_TotalSolarRadiation','Rainfall','SerialNumber','ProgramName','OperatingSystem', + 'Tachometer_RPM','ProtocolVersion','SamplingUnitName','Latitude', + 'Longitude','TEAMSiteName') +#### +# Combine climate 2.0 and 3.0 dataasets +cl_data_all = rbind(data_3,data_2_new) +sysdate = Sys.Date() +filename= paste("cl_data_new",sysdate,".gzip",sep="") +save(cl_data_all, file=filename,compress="gzip") \ No newline at end of file diff --git a/teamcode-forkedrepo.Rproj b/teamcode-forkedrepo.Rproj new file mode 100644 index 0000000..8e3c2eb --- /dev/null +++ b/teamcode-forkedrepo.Rproj @@ -0,0 +1,13 @@ +Version: 1.0 + +RestoreWorkspace: Default +SaveWorkspace: Default +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +UseSpacesForTab: Yes +NumSpacesForTab: 2 +Encoding: UTF-8 + +RnwWeave: Sweave +LaTeX: pdfLaTeX diff --git a/tools/team_db_query.R b/tools/team_db_query.R index 89c0589..cc80b6f 100644 --- a/tools/team_db_query.R +++ b/tools/team_db_query.R @@ -1,6 +1,5 @@ # team_db_query.r # -#Hello! f.teamdb.query <- function(dataset) { # Database query function of TEAM production database. Returns a R Object for # each dataset and saves to the workspace. The function contains views or pre- @@ -9,18 +8,20 @@ f.teamdb.query <- function(dataset) { # the R object. This will minimize hits on the database. # Example usage: ctdata <- f.teamdb.query("camera trap") # Args: - # Dataset: The dataset you want. Acceptable args: "vegetation", "climate", - # "camera trap" + # Dataset: The dataset you want. Acceptable args: "vegetation", "climate", + # "camera trap", "camera trap adv". # Returns: - # A dataframe or list with the desired datasets. - # Camera trap dataset: cam_trap_data R object and dataframe - # Climate dataset: Returns a list with two dataframes in it. - # cl_data: All climate 2.0 and 3.0 data - # cl_temp_maxmin: The temperature maxmin associated with each TEAM Site. - # This is used for quality control - # Vegetation dataset: Returns a list wtih two dataframes in it: - # tree: The entire tree dataset - # liana: The entire liana dataset + # A dataframe or list with the desired datasets. + # Camera trap datasets: cam_trap_data and cam_trap_data_adv R object and dataframe. + # Camera trap adv will include much more camera trap metadata and image EXIF + # information. + # Climate dataset: Returns a list with two dataframes in it. + # cl_data: All climate 2.0 and 3.0 data + # cl_temp_maxmin: The temperature maxmin associated with each TEAM Site. + # This is used for quality control + # Vegetation dataset: Returns a list wtih two dataframes in it: + # tree: The entire tree dataset + # liana: The entire liana dataset library(RPostgreSQL) # Need to explore RJDBC to connect with vertica drv <- dbDriver("PostgreSQL") @@ -28,7 +29,17 @@ f.teamdb.query <- function(dataset) { dbname="team_2.0_production",port="5444", host="data.team.sdsc.edu") - if (dataset == "camera trap") { + if (dataset == "camera trap adv") { + cam_trap_query <- dbSendQuery(con,"select * from dqa_ct_advanced") + cam_trap_data_adv <- fetch(cam_trap_query, n = -1) + # Setup the filename with the Date. + sysdate = Sys.Date() + filename= paste("ct_data_adv",sysdate,".gzip",sep="") + save(cam_trap_data_adv, file=filename,compress="gzip") + return(cam_trap_data_adv) # Return dataframe with CT data + + + } else if (dataset == "camera trap") { cam_trap_query <- dbSendQuery(con,"select * from dqa_ct_ordered") cam_trap_data <- fetch(cam_trap_query, n = -1) # Setup the filename with the Date. @@ -37,32 +48,70 @@ f.teamdb.query <- function(dataset) { save(cam_trap_data, file=filename,compress="gzip") return(cam_trap_data) # Return dataframe with CT data - } else if (dataset == "climate") { - cl_3_data <- dbSendQuery(con,"select collection_sampling_units.id, collection_sampling_units.sampling_unit_id, collections.site_id, cl_datalog_record.id, cl_datalog_record.collection_sampling_unit_id, cl_datalog_record.collected_at::timestamp without time zone, cl_datalog_record.batt_volt_min, cl_datalog_record.airtc_avg, cl_datalog_record.airtc_2_avg, cl_datalog_record.rain_mm_tot,cl_datalog_record.temp1_keep,cl_datalog_record.temp2_keep, cl_datalog_record.temp_clean,cl_datalog_record.precip_clean,cl_datalog_record.climate_review from collection_sampling_units,collections, cl_datalog_record where collection_sampling_units.collection_id = collections.id and collection_sampling_units.id = cl_datalog_record.collection_sampling_unit_id") + cl_3_data <- dbSendQuery(con,"SELECT * FROM dqa_climate3") data_3 <- fetch(cl_3_data, n = -1) - data_3["protocol"] <- 3 # Mark records as climate protocol 3 + #data_3["protocol"] <- 3 # Mark records as climate protocol 3 #** Get Clamate 2.0 data from climate_samples table - cl_2_data <- dbSendQuery(con,"select collection_sampling_units.id, collection_sampling_units.sampling_unit_id, collections.site_id, climate_samples.id, climate_samples.collection_sampling_unit_id, climate_samples.collected_at, climate_samples.collected_time, climate_samples.dry_temperature as airtc_avg, climate_samples.precipitation as rain_mm_tot, climate_samples.temp1_keep,climate_samples.temp2_keep, climate_samples.temp_clean, climate_samples.precip_clean, climate_samples.climate_review from collection_sampling_units, collections, climate_samples where collection_sampling_units.collection_id = collections.id and collection_sampling_units.id = climate_samples.collection_sampling_unit_id") + cl_2_data <- dbSendQuery(con,"SELECT * FROM dqa_climate2_analysis") data_2 <- fetch(cl_2_data, n = -1) - data_2["protocol"] <- 2 # Mark records as climate protocol 2 - data_2["airtc_2_avg"]<- NA # Add a new column to match what is in climate 3 data + # Get data_2 to match the structure of data_3 + # Create new columns data_2$collected_at <- paste(data_2$collected_at,data_2$collected_time) - colnames(data_2)[7]<- "batt_volt_min" - data_2$batt_volt_min <- 0 # There are no battery values so climate 2.0 so make 0 and make column type = 'num' - data_2_new <-cbind(data_2[1:8],data_2[16],data_2[9:15]) # Rearrange columns so two datasets can be easily combined. - + data_2$Observation <-data_2$collected_at + setnames(data_2,"Id","ReferenceID") + setnames(data_2,"TotalSolarRadiation","Sensor1_TotalSolarRadiation") + setnames(data_2,"Precipitation","Rainfall") + setnames(data_2,"DryTemperature","Sensor1_AvgTemp") + setnames(data_2,"RelativeHumidity","Sensor1_RelativeHumidity") + setnames(data_2,"SiteName","TEAMSiteName") + data_2["RecordID"]<- NA + data_2["MinimumBatteryVoltage"]<- NA + data_2["Sensor1_TempStdDeviation"]<- NA + data_2["Sensor2_AvgTemp"]<- NA + data_2["Sensor2_TempStdDeviation"]<- NA + data_2["Sensor2_RelativeHumidity"]<- NA + data_2["Sensor3_AvgTemp"]<- NA + data_2["Sensor3_TempStdDeviation"]<- NA + data_2["Sensor3_RelativeHumidity"]<- NA + data_2["Sensor1_AvgSolarRadiation"]<- NA + data_2["Sensor1_SolarRadiationStdDeviation"]<- NA + data_2["Sensor2_AvgSolarRadiation"]<- NA + data_2["Sensor2_SolarRadiationStdDeviation"]<- NA + data_2["Sensor2_TotalSolarRadiation"]<- NA + data_2["SerialNumber"]<- NA + data_2["ProgramName"]<- NA + data_2["OperatingSystem"]<- NA + data_2["Tachometer_RPM"]<- NA + # Get rid of these two coolumns + #data_2$ObservationDate <- NULL + #data_2$ObservationTime <- NULL + data_2_new <-data.frame(cbind(data_2$ReferenceID, data_2$Observation, data_2$RecordID, data_2$MinimumBatteryVoltage, + data_2$Sensor1_AvgTemp, data_2$Sensor1_TempStdDeviation, data_2$Sensor1_RelativeHumidity, + data_2$Sensor2_AvgTemp, data_2$Sensor2_TempStdDeviation, data_2$Sensor2_RelativeHumidity, + data_2$Sensor3_AvgTemp, data_2$Sensor3_TempStdDeviation, data_2$Sensor3_RelativeHumidity, + data_2$Sensor1_AvgSolarRadiation, data_2$Sensor1_SolarRadiationStdDeviation,data_2$Sensor1_TotalSolarRadiation, + data_2$Sensor2_AvgSolarRadiation, data_2$Sensor2_SolarRadiationStdDeviation,data_2$Sensor2_TotalSolarRadiation, + data_2$Rainfall,data_2$SerialNumber,data_2$ProgramName,data_2$OperatingSystem, + data_2$Tachometer_RPM, data_2$ProtocolVersion, data_2$SamplingUnitName, data_2$Latitude, + data_2$Longitude, data_2$TEAMSiteName)) + colnames(data_2_new)<-c('ReferenceID','Observation','RecordID','MinimumBatteryVoltage', + 'Sensor1_AvgTemp','Sensor1_TempStdDeviation','Sensor1_RelativeHumidity', + 'Sensor2_AvgTemp','Sensor2_TempStdDeviation','Sensor2_RelativeHumidity', + 'Sensor3_AvgTemp','Sensor3_TempStdDeviation','Sensor3_RelativeHumidity', + 'Sensor1_AvgSolarRadiation','Sensor1_SolarRadiationStdDeviation','Sensor1_TotalSolarRadiation', + 'Sensor2_AvgSolarRadiation','Sensor2_SolarRadiationStdDeviation','Sensor2_TotalSolarRadiation', + 'Rainfall','SerialNumber','ProgramName','OperatingSystem', + 'Tachometer_RPM','ProtocolVersion','SamplingUnitName','Latitude', + 'Longitude','TEAMSiteName') + #### + data_2_new$Observation <-ymd_hms(data_2_new$Observation) # Combine climate 2.0 and 3.0 dataasets cl_data_all = rbind(data_3,data_2_new) - - # SQL to query from sites_climate_rules - cl_rules <- dbSendQuery(con,"select * from sites_climate_rules") - cl_temp_maxmin <- fetch(cl_rules, n = -1) - result <- list(cl_data = cl_data_all, cl_temp_maxmin = cl_temp_maxmin) - # Setup the filename with the Date. - sysdate = Sys.Date() - filename= paste("cl_data",sysdate,".gzip",sep="") + result <- cl_data_all + sysdate <- Sys.Date() + filename <- paste("cl_data_new",sysdate,".gzip",sep="") save(result, file=filename,compress="gzip") return(result)