Skip to content

Commit 3df8442

Browse files
committed
update stream_ids
Merge branch 'master' of github.com:terraref/tutorials into clowder-met # Conflicts: # sensors/01-meteorological-data.Rmd
2 parents a3a42ed + 5219cd9 commit 3df8442

8 files changed

Lines changed: 516 additions & 25 deletions

sensors/01-meteorological-data.Rmd

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ output: html_document
77
knitr::opts_chunk$set(echo = FALSE, cache = TRUE)
88
99
api_url <- "https://terraref.ncsa.illinois.edu/clowder/api"
10-
output_dir <- "~/data/downloads/"
10+
output_dir <- file.path(tempdir(), "downloads")
1111
dir.create(output_dir, showWarnings = FALSE, recursive = TRUE)
1212
1313
```
@@ -124,27 +124,33 @@ Here is the json representation of a single five-minute observation:
124124

125125
The data represent 5 minute summaries aggregated from 1/s observations.
126126

127+
#### Using Curl
127128

129+
First, this is what the API looks like as a URL. Try pasting it into your browser
128130

129-
#### Using Curl
131+
132+
https://terraref.ncsa.illinois.edu/clowder/api/geostreams/datapoints?user=USER&pass=PASSWORD&stream_id=746&since=2017-01-02&until=2017-01-31
133+
134+
This is how you can automatically download the met data to a local file:
130135

131136
```{sh eval=FALSE}
132-
curl -O spectra.json -X GET https://terraref.ncsa.illinois.edu/clowder/api/geostreams/datapoints?user=USER&pass=PASSWORD&stream_id=743
137+
curl -O spectra.json -X GET https://terraref.ncsa.illinois.edu/clowder/api/geostreams/datapoints?user=USER&pass=PASSWORD&stream_id=746
133138
```
134139

140+
And this is how you can access the data in R:
141+
142+
135143
```{r met-geostream}
136144
library(dplyr)
137145
library(ggplot2)
138146
library(jsonlite)
139147
140148
url = ""
141-
mac_weather.list <- jsonlite::fromJSON('https://terraref.ncsa.illinois.edu/clowder/api/geostreams/datapoints?user=USER&pass=PASS&stream_id=300', flatten = FALSE)
149+
mac_weather.list <- jsonlite::fromJSON('https://terraref.ncsa.illinois.edu/clowder/api/geostreams/datapoints?user=USER&pass=PASSWORD&stream_id=746&since=2017-01-02&until=2017-01-31', flatten = FALSE)
142150
143151
# change time to human-readable
144152
mac_weather <- mac_weather.list$properties %>%
145153
mutate(time = lubridate::ymd_hms(mac_weather.list$end_time))
146-
147-
148154
```
149155

150156
### Using
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
# Using the PEcAn atmospheric data utilities
2+
3+
Explain what these are
4+
5+
github.com/pecanproject/pecan
6+
7+
insert slide from talks ...
8+
9+
## Dependencies
10+
11+
```{r install-pecan-dependencies, message=FALSE, eval = FALSE}
12+
13+
devtools::install_github("pecanproject/pecan",
14+
subdir = 'base/utils', ref = 'develop', dependencies = FALSE)
15+
devtools::install_github("pecanproject/pecan",
16+
subdir = 'base/db')
17+
devtools::install_github("rforge/reddyproc",
18+
subdir = "pkg/REddyProc")
19+
devtools::install_github("pecanproject/pecan",
20+
subdir = 'modules/data.atmosphere',
21+
ref = 'develop')
22+
23+
source("https://raw.githubusercontent.com/PecanProject/pecan/develop/models/biocro/R/met2model.BIOCRO.R")
24+
```
25+
26+
27+
## PEcAn Met Workflow
28+
29+
```{r write-clowder, eval = FALSE}
30+
writeLines("
31+
<pecan>
32+
<clowder>
33+
<hostname>terraref.ncsa.illinois.edu</hostname>
34+
<user>user@illinois.edu</user>
35+
<password>ask</password>
36+
</clowder>
37+
</pecan>",
38+
con = "~/.pecan.clowder.xml")
39+
```
40+
41+
![](pecan.clowder.xml.png)
42+
43+
```{r pecan-met-workflow, message=FALSE, warning=FALSE, eval = FALSE}
44+
library("PEcAn.data.atmosphere")
45+
library("dplyr")
46+
47+
## download raw data
48+
ne <- download.Geostreams(
49+
outfolder="data",
50+
sitename="EnvironmentLogger sensor_weather_station",
51+
start_date="2016-02-28",
52+
end_date="2016-04-01",
53+
overwrite = TRUE)
54+
55+
## convert to standard
56+
ne_cf <- met2CF.Geostreams(
57+
in.path = "data/",
58+
in.prefix = ne$dbfile.name,
59+
outfolder = "data/cf",
60+
start_date = "2016-03-01", # note date shift to avoid TZ issues
61+
end_date = "2016-04-01",
62+
overwrite = TRUE)
63+
64+
## convert to model specific input
65+
met2model.BIOCRO(
66+
overwrite = TRUE,
67+
in.path = "data/cf",
68+
in.prefix = ne_cf$dbfile.name,
69+
outfolder = "data/biocromet",
70+
lat = 40,
71+
lon = -88,
72+
start_date = "2016-03-01",
73+
end_date = "2016-03-30")
74+
75+
met <- readr::read_csv('data/biocromet/Clowder.UIUC Energy Farm - NE.2016-02-28.2016-04-01.2016.csv')
76+
```
Lines changed: 24 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -7,25 +7,26 @@ from the workbench or Globus.
77
## Getting started
88

99
After installing terrautils, you should be able to import the *product* module.
10-
```
10+
```{python}
1111
from terrautils.products import get_sensor_list, unique_sensor_names
1212
from terrautils.products import get_file_listing, extract_file_paths
1313
```
1414

15-
The get\_sensor\_list and get\_file\_listing both require connection, url,
15+
The `get_sensor_list` and `get_file_listing` functions both require connection, url,
1616
and key parameters. *Connection* can be 'None', the *url* (called host in the
1717
code) should be something like https://terraref.ncsa.illinois.edu/clowder/.
1818
The *key* is a unique access key for the Clowder api.
1919

2020
## Getting the sensor list
21+
2122
The first thing to get is the sensor name. This can be retreived using the
22-
get\_sensor\_list function. This function returns the full record which may
23+
`get_sensor_list` function. This function returns the full record which may
2324
be useful in some cases but primarily includes sensor names that include
24-
a plot id number. The utility function unique_sensor_names accpets the
25+
a plot id number. The utility function `unique_sensor_names` accpets the
2526
sensor list and provides a list of names suitable for use in the
26-
get_file_listing function.
27+
`get_file_listing` function.
2728

28-
```
29+
```{python}
2930
sensors = get_sensor_list(None, url, key)
3031
names = unique_sensor_names(sensors)
3132
```
@@ -36,16 +37,16 @@ geostreams API. The currently available sensors are:
3637
* IR Surface Temperature
3738
* Thermal IR GeoTIFFs Datasets
3839
* flirIrCamera Datasets
39-
* (EL) sensor\_weather\_station
40+
* (EL) sensor_weather_station
4041
* Irrigation Observations
4142
* Canopy Cover
4243
* Energy Farm Observations SE
43-
* (EL) sensor\_par
44+
* (EL) sensor_par
4445
* scanner3DTop Datasets
4546
* Weather Observations
4647
* Energy Farm Observations NE
4748
* RGB GeoTIFFs Datasets
48-
* (EL) sensor\_co2
49+
* (EL) sensor_co2
4950
* stereoTop Datasets
5051
* Energy Farm Observations CEN
5152

@@ -55,28 +56,30 @@ The geostreams API can be used to get a list of datasets that overlap a
5556
specific plot boundary and, optionally, limited by a time range. Iterating
5657
over the datasets allows the paths to all the files to be extracted.
5758

58-
```
59+
```{python}
5960
sensor = 'Thermal IR GeoTIFFs Datasets'
6061
sitename = 'MAC Field Scanner Season 1 Field Plot 101 W'
6162
datasets = get_file_listing(None, url, key, sensor, sitename)
6263
files = extract_file_paths(datasets)
6364
```
6465

6566
Datasets can be further filtered using the *since* and *until* parameters
66-
of get\_file\_listing with a date string.
67+
of `get_file_listing` with a date string.
6768

68-
```
69+
```{python}
6970
dataset = get_file_listing(None, url, key, sensor, sitename,
7071
since='2016-06-01', until='2016-06-10')
7172
```
7273

7374

7475
# Alternative method
76+
7577
The following method demonstrates the same approach using the Clowder API. This
7678
approach is useful for understanding the data layout and when the Python
7779
terrautils package is not available.
7880

7981
## Finding plot ID
82+
8083
```
8184
SENSOR_NAME = "MAC Field Scanner Season 1 Field Plot 101 W"
8285
GET https://terraref.ncsa.illinois.edu/clowder/api/geostreams/sensors?sensor_name={SENSOR_NAME}
@@ -85,7 +88,9 @@ GET https://terraref.ncsa.illinois.edu/clowder/api/geostreams/sensors?sensor_nam
8588
This returns a JSON object with an 'id' parameter. You can use this ID parameter to specify the right data stream.
8689

8790
## Finding stream ID within a plot
91+
8892
The names are formatted as "<Sensor Group> Datasets (<Sensor ID>)".
93+
8994
```
9095
SENSOR_ID = 3355
9196
STREAM_NAME = "Thermal IR GeoTIFFs Datasets ({SENSOR_ID})"
@@ -95,13 +100,15 @@ GET https://terraref.ncsa.illinois.edu/clowder/api/geostreams/streams?stream_nam
95100
This returns a JSON object with an 'id' parameter. You can use this ID parameter to get the right datapoints.
96101

97102
## Listing Clowder file IDs for that plot & sensor stream
103+
98104
```
99105
STREAM_ID = "11586"
100106
GET https://terraref.ncsa.illinois.edu/clowder/api/geostreams/datapoints?stream_id={STREAM_ID}
101107
```
102108

103109
This returns a list of datapoint JSON objects, each with a 'properties' parameter that looks like:
104-
```
110+
111+
```{python}
105112
properties: {
106113
dataset_name: "Thermal IR GeoTIFFs - 2016-05-09__12-07-57-990",
107114
source_dataset: "https://terraref.ncsa.illinois.edu/clowder/datasets/59fc9e7d4f0c3383c73d2905"
@@ -111,19 +118,23 @@ properties: {
111118
The source_dataset URL can be used to view the dataset in Clowder.
112119

113120
You can also filter the datapoints by date:
121+
114122
```
115123
GET https://terraref.ncsa.illinois.edu/clowder/api/geostreams/datapoints?stream_id={STREAM_ID}&since=2017-01-02&until=2017-06-10
116124
```
117125

118126
## Getting ROGER file path from dataset
127+
119128
Given a source dataset URL, we can call the API to get the files and their paths.
129+
120130
```
121131
SOURCE_DATASET = "https://terraref.ncsa.illinois.edu/clowder/datasets/59fc9e7d4f0c3383c73d2905"
122132
# Add /api after /clowder, and add /files at the end of the URL
123133
GET "https://terraref.ncsa.illinois.edu/clowder/api/datasets/59fc9e7d4f0c3383c73d2905/files"
124134
```
125135

126136
This returns a list of files in the dataset and their paths if available:
137+
127138
```
128139
[
129140
{
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
2+
## Hyperspectral Data
3+
4+
### Calibration Targets
5+
6+
These were collected on April 15 2017 every ~15 minutes
7+
8+
9+
```{r get-vnir-calibration, eval=FALSE}
10+
library(ncdf4)
11+
library(dplyr)
12+
13+
hsi_calibration_dir <- '/data/terraref/sites/ua-mac/Level_1/hyperspectral/2017-04-15'
14+
hsi_calibration_files <- dir(hsi_calibration_dir,
15+
recursive = TRUE,
16+
full.names = TRUE)
17+
18+
fileinfo <- bind_rows(lapply(hsi_calibration_files, file.info)) %>%
19+
mutate(size_gb = size/1073741824)
20+
21+
calibration_nc <- nc_open(hsi_calibration_files[200])
22+
a <- calibration_nc$var$rfl_img
23+
24+
25+
#calibration_nc$dim$x$len 1600
26+
#calibration_nc$dim$y$len
27+
x_length <- round(calibration_nc$dim$x$len / 10)
28+
y_length <- round(calibration_nc$dim$y$len * 3/4)
29+
30+
xstart <- ceiling(calibration_nc$dim$x$len / 2) - floor(x_length / 2) + 1
31+
32+
ystart <- ceiling(calibration_nc$dim$y$len / 2) - floor(y_length / 2) + 1
33+
34+
rfl <- ncvar_get(calibration_nc, 'rfl_img',
35+
#start = c(1, xstart, ystart),
36+
#count = c(955, x_length, y_length)
37+
start = c(2, 2, 2),
38+
count = c(1320, 10, 954)
39+
)
40+
x <- ncvar_get(calibration_nc, 'x', start = 100, count = 160)
41+
y <- ncvar_get(calibration_nc, 'y', start = 100, count = 1324)
42+
lambda <- calibration_nc$dim$wavelength$vals
43+
for(i in 1 + 0:10*95){
44+
image(x = x, y = y, z = rfl[i,,],
45+
xlab = 'x (m)', ylab = 'y (m)',
46+
col = rainbow(n=100),
47+
main = paste('wavelength',
48+
udunits2::ud.convert(lambda[i],'m','nm')))
49+
}
50+
51+
```

0 commit comments

Comments
 (0)