-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLastname-Firstname_Project3.Rmd
583 lines (498 loc) · 26.2 KB
/
Lastname-Firstname_Project3.Rmd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
---
title: "Lastname-Firstname_Project3"
author:
- Guillermo Delgado
- Katie Guillen
- Leanne Harper
- Nicolas Schoonmaker
date: "`r format(Sys.time(), '%m/%d/%Y')`"
output:
html_document:
toc: true
toc_float: true
word_document: default
pdf_document: default
subtitle: 'Project #3: Escapades in Market Risk'
header-includes:
- \usepackage{fancyhdr}
- \pagestyle{fancy}
- \fancyhead[CO,CE]{\thetitle}
- \fancyfoot[CO,CE]{Copyright 2018, William G. Foote}
- \fancyhead[RE,RO]{\thepage}
- \renewcommand{\footrulewidth}{0.5pt}
---
# Setup
Using RMD from: https://wgfoote.github.io/fin-alytics/HTML/PR03_market-risk.html
## GitHub
Our code is on github at: https://github.com/nschoonm/FIN654-Project3
## Disable inline preview
To disable inline preview of the Markdown file, go in RStudio, Tools > Global Options... > R Markdown > Show equations and Image previews (Never)
## Knit Notes
This must be knit as an HTML file because this contains an animated gif that can not easily be added to a PDF
## Setup chunk defaults
```{r Chunk setup}
require(knitr)
# Echo the output
knitr::opts_chunk$set(echo = TRUE, eval = TRUE, warning=FALSE, message=FALSE)
knitr::opts_chunk$set(tidy = TRUE)
knitr::opts_chunk$set(tidy.opts=list(width.cutoff=36))
knitr::opts_chunk$set(size = "small")
knitr::opts_hooks$set(fig.width = function(options) {
if (options$fig.width < options$fig.height) {
options$fig.width = options$fig.height
}
options
})
knitr::knit_hooks$set(mysize = function(before, options, envir) {
if (before)
return(options$size)
})
```
## Packages to install
```{r Packages and Installers}
# Install RTools
# https://cran.rstudio.com/bin/windows/Rtools/
#
# Install Tinytex
# tinytex::install_tinytex()
#
# Restart R Studio
#
# Install packages
#install.packages("dplyr")
#install.packages("rstudioapi")
#install.packages("tinytex")
#install.packages("magick")
#install.packages("plotly")
#install.packages("xts")
#install.packages("ggplot2")
#install.packages("moments")
#install.packages("matrixStats")
#install.packages("quantreg")
#install.packages("flexdashboard")
#install.packages("QRM")
#install.packages("qrmdata")
```
## Libraries to include
```{r Library Includes}
# The list of libraries to include
library(stats)
library(dplyr)
library(rstudioapi)
library(flexdashboard)
```
## Get Working Directory
Get the currenet working directory so we can run this file from anywhere. This will allow this script to work from RStudio or from the command line
```{r Get current directory}
# Get the directory so we can run this from anywhere
# Get the script directory from R when running in R
if(rstudioapi::isAvailable())
{
print("Running in RStudio")
script.path <- rstudioapi::getActiveDocumentContext()$path
(script.path)
script.dir <- dirname(script.path)
}
if(!exists("script.dir"))
{
print("Running from command line")
script.dir <- getSrcDirectory(function(x) {x})
}
(script.dir)
```
## Set Working Directory
Set the working directory based on where the script is
```{r Working Directory and Data setup}
# Set my working directory
# There is a "data" folder here with the files and the script
setwd(script.dir)
# Double check the working directory
getwd()
# Error check to ensure the working directory is set up and the data
# directory exists inside it. Its required for this file
if(dir.exists(paste(getwd(),"/data", sep = "")) == FALSE) {
stop("Data directory does not exist. Make sure the working directory
is set using setwd() and the data folder exists in it.")
} else {
print("Working directory and data set up correctly")
}
```
# Purpose, Process, Product
This group assignment will cover Sessions 5 and 6 (two weeks). We explore market risk with existing commodities in conventional metal spot markets. Specifically we will build upon our previous practice reading in data, exploring time series, estimating auto and cross correlations, and investigating volatility clustering in financial time series. Further, we will estimaet VaR and ES and use GPD to explore the extremes of distributions of financial returns. We will summarize our experiences in a dashboard format.
# Assignment
Submit into **Coursework > Assignments and Grading > Project 3 > Submission** an `RMD` file with filename **lastname_firstname_Assignment3.Rmd** as well as the HTML file generated with a Knit. If you have difficulties submitting a `.Rmd` file, then submit a `.txt` file..
1. Use headers (##), r-chunks for code, and text to build a dashboard report that addresses the two parts of this project.
2. Complete the Report with a Conclusion section at the end: List the essential skills needed (e.g., `fit.GPD()`), the data insights and what policies or practices the businees could adopt to address risk?
3. **Organize all the code & documentation in the flex_dashboard.**
4. **Some coding is required in this Assignment - search for lines with keyword "CODE:" in upper case.**
5. **Knit to flex_dashboard - this will produce html output.**
6. **If the Knit to flex_dashboard gives errors, try adding "runtime: shiny" in the YAML header, and 'Run Document' instead **
# Flexdashboard
We begin to expand our range of production capabilities. we started with Knitting .Rmd to PDF file in Assignment1, then moved to knitting .Rmd to HTML file in Assignment2. Now we introduce ourselves to the `flexdashboard` package to start building a web application with multi-page and multi-column design. Later, we will add shiny for interactive pages.
1. Make sure that the `flexdashboard` package is installed. In Rstudop console type `library("flexdashboard") on the command line to verify that it will load.
2. Go to the RStudio `flexdashboard` site to learn about the basics of building a web/dashboard application.
3. Go to `File` and click on `New File` where you will choose `R Markdown`. Because you already loaded `flexdashboard` this next step will work. In the dialog box choose `From Template`. You should see a `Flex Dashboard` template choice in the list box, which you will select and then click `OK`. A new `Rmd` file will then appear in the scripts pane of RStudio.
4. `Knit` this new file. A dialog box will appear for you to name the file.
5. Start to modify this template to document your own data analysis journey from here on out.
6. **Join any Office hour prior to Live Session 8 to discuss installation & testing issues.**
# Problem
A freight forwarder with a fleet of bulk carriers wants to optimize their portfolio in metals markets with entry into the nickel business and use of the tramp trade. Tramp ships are the company's "swing" option without any fixed charter or other constraint. They allow the company flexibility in managing several aspects of freight uncertainty. The call for tramp transportation is a _derived demand_ based on the value of the cargoes. This value varies widely in the spot markets. The company allocates \$250 million to manage receivables. The company wants us to:
1. Retrieve and begin to analyze data about potential commodities for diversification,
2. Compare potential commodities with existing commodities in conventional metal spot markets,
3. Begin to generate economic scenarios based on events that may, or may not, materialize in the commodities.
4. The company wants to mitigate their risk by diversifying their cargo loads. This risk measures the amount of capital the company needs to maintain its portfolio of services.
Here is some additional detail.
1. Product: Metals commodities and freight charters
2. Metal, Company, and Geography:
a. Nickel: MMC Norilisk, Russia
b. Copper: Codelco, Chile and MMC Norilisk, Russia
c. Aluminium: Vale, Brasil and Rio Tinto Alcan, Australia
3. Customers: Ship Owners, manufacturers, traders
4. All metals are traded on the London Metal Exchange
### Key business questions
Include as Business Perspectives at the end.
1. How would the performance of these commodities affect the size and timing of shipping arrangements?
2. How would the value of new shipping arrangements affect the value of our business with our current customers?
3. How would we manage the allocation of existing resources given we have just landed in this new market?
### Getting to a reponse
These detailed questions are answered in part by the tables, graphs and models developed - add commentary as needed to explain the outputs
1. What is the decision the freight-forwarder must make? List key business questions and data needed to help answer these questions and support the freight-forwarder's decision. Retrieve data and build financial market detail into the data story behind the questions.
2. Develop the stylized facts of the markets the freight-forwarder faces. Include level, returns, size times series plots. Calculate and display in a table the summary statistics, including quantiles, of each of these series. Use autocorrelation, partial autocorrelation, and cross correlation functions to understand some of the persistence of returns including leverage and volatility clustering effects. Use quantile regressions to develop the distribution of sensitivity of each market to spill-over effects from other markets. Interpret these stylized "facts" in terms of the business decision the freight-forwarder makes.
3. How much capital would the freight-forwarder need? Determine various measures of risk in the tail of each metal's distribution. Then figure out a loss function to develop the portfolio of risk, and the determination of risk capital the freight-forwarder might need. Confidence intervals might be used to create a risk management plan with varying tail experience thresholds.
```{r }
library(ggplot2)
library(flexdashboard)
library(shiny)
library(QRM)
library(qrmdata)
library(xts)
library(zoo)
library(psych)
rm(list = ls())
# PAGE: Exploratory Analysis
data <- na.omit(read.csv("data/metaldata.csv", header = TRUE))
prices <- data
# Compute log differences percent using as.matrix to force numeric type
data.r <- diff(log(as.matrix(data[, -1]))) * 100
# Create size and direction
size <- na.omit(abs(data.r)) # size is indicator of volatility
#head(size)
colnames(size) <- paste(colnames(size),".size", sep = "") # Teetor
direction <- ifelse(data.r > 0, 1, ifelse(data.r < 0, -1, 0)) # another indicator of volatility
colnames(direction) <- paste(colnames(direction),".dir", sep = "")
# Convert into a time series object:
# 1. Split into date and rates
dates <- as.Date(data$DATE[-1], "%m/%d/%Y")
dates.chr <- as.character(data$DATE[-1])
#str(dates.chr)
values <- cbind(data.r, size, direction)
# for dplyr pivoting and ggplot2 need a data frame also known as "tidy data"
data.df <- data.frame(dates = dates, returns = data.r, size = size, direction = direction)
data.df.nd <- data.frame(dates = dates.chr, returns = data.r, size = size, direction = direction, stringsAsFactors = FALSE)
#non-coerced dates for subsetting on non-date columns
# 2. Make an xts object with row names equal to the dates
data.xts <- na.omit(as.xts(values, dates)) #order.by=as.Date(dates, "%d/%m/%Y")))
#str(data.xts)
data.zr <- as.zooreg(data.xts)
returns <- data.xts # watch for this data below!
# PAGE: Market risk
corr_rolling <- function(x) {
dim <- ncol(x)
corr_r <- cor(x)[lower.tri(diag(dim), diag = FALSE)]
return(corr_r)
}
vol_rolling <- function(x){
library(matrixStats)
vol_r <- colSds(x)
return(vol_r)
}
ALL.r <- data.xts[, 1:3]
window <- 90 #reactive({input$window})
corr_r <- rollapply(ALL.r, width = window, corr_rolling, align = "right", by.column = FALSE)
colnames(corr_r) <- c("nickel.copper", "nickel.aluminium", "copper.aluminium")
vol_r <- rollapply(ALL.r, width = window, vol_rolling, align = "right", by.column = FALSE)
colnames(vol_r) <- c("nickel.vol", "copper.vol", "aluminium.vol")
year <- format(index(corr_r), "%Y")
r_corr_vol <- merge(ALL.r, corr_r, vol_r, year)
# Market dependencies
#library(matrixStats)
R.corr <- apply.monthly(as.xts(ALL.r), FUN = cor)
R.vols <- apply.monthly(ALL.r, FUN = colSds) # from MatrixStats
# Form correlation matrix for one month
R.corr.1 <- matrix(R.corr[20,], nrow = 3, ncol = 3, byrow = FALSE)
rownames(R.corr.1) <- colnames(ALL.r[,1:3])
colnames(R.corr.1) <- rownames(R.corr.1)
R.corr.1
R.corr <- R.corr[, c(2, 3, 6)]
colnames(R.corr) <- c("nickel.copper", "nickel.aluminium", "copper.aluminium")
colnames(R.vols) <- c("nickel.vols", "copper.vols", "aluminium.vols")
R.corr.vols <- na.omit(merge(R.corr, R.vols))
R.corr.vols.logs <- na.omit(log(R.corr.vols))
year <- format(index(R.corr.vols), "%Y")
R.corr.vols.y <- data.frame(nickel.correlation = R.corr.vols[,1], copper.volatility = R.corr.vols[,5], year = year)
nickel.vols <- as.numeric(R.corr.vols[,"nickel.vols"])
copper.vols <- as.numeric(R.corr.vols[,"copper.vols"])
aluminium.vols <- as.numeric(R.corr.vols[,"aluminium.vols"])
#data = R.corr.vols
#marketRisk = nickel.copper
#standardDeviation = copper.vols
#correlationName = "nickel-copper"
#volatilityName = "copper"
run_correlation_sensitivity <- function(corrAndVols, marketRisk, standardDeviation, correlationName, volatilityName){
library(quantreg)
taus <- seq(.05,.95,.05) # Roger Koenker UI Bob Hogg and Allen Craig
new_formula <- reformulate(response = marketRisk, termlabels = standardDeviation)
fit.rq.a <- rq(new_formula, tau = taus, data = corrAndVols)
fit.lm.a <- lm(new_formula, data = corrAndVols)
plot(summary(fit.rq.a), parm = standardDeviation, main = paste(correlationName, "correlation sensitivity to", volatilityName, "volatiltiy", sep=" "))
#(ni.cu.summary <- summary(fit.rq.a, se = "boot"))
}
run_correlation_sensitivity(R.corr.vols, "nickel.copper", "copper.vols", "nickel-copper", "copper")
run_correlation_sensitivity(R.corr.vols, "nickel.aluminium", "aluminium.vols", "nickel-aluminium", "aluminium")
"copper.aluminium"
run_correlation_sensitivity(R.corr.vols, "copper.aluminium", "aluminium.vols", "copper-aluminium", "copper")
## Nickel-Copper correlation sensitivity to copper volatility
library(quantreg)
taus <- seq(.05,.95,.05) # Roger Koenker UI Bob Hogg and Allen Craig
fit.rq.nickel.copper <- rq(nickel.copper ~ copper.vols, tau = taus, data = R.corr.vols)
fit.lm.nickel.copper <- lm(nickel.copper ~ copper.vols, data = R.corr.vols)
plot(summary(fit.rq.nickel.copper), parm = "copper.vols", main = "nickel-copper correlation sensitivity to copper volatility")
#(ni.cu.summary <- summary(fit.rq.nickel.copper, se = "boot"))
## Nickel-Aluminium correlation sensitivity to aluminium to volatility
library(quantreg)
taus <- seq(.05,.95,.05) # Roger Koenker UI Bob Hogg and Allen Craig
fit.rq.nickel.aluminium <- rq(nickel.aluminium ~ aluminium.vols, tau = taus, data = R.corr.vols)
fit.lm.nickel.aluminium <- lm(nickel.aluminium ~ aluminium.vols, data = R.corr.vols)
plot(summary(fit.rq.nickel.aluminium), parm = "aluminium.vols", main = "nickel-aluminium correlation sensitivity to aluminium volatility")
#(ni.cu.summary <- summary(fit.rq.nickel.aluminium, se = "boot"))
## Copper-Aluminium correlation sensitivity to copper volatility
library(quantreg)
taus <- seq(.05,.95,.05) # Roger Koenker UI Bob Hogg and Allen Craig
fit.rq.copper.aluminium <- rq(copper.aluminium ~ copper.vols, tau = taus, data = R.corr.vols)
fit.lm.copper.aluminium <- lm(copper.aluminium ~ copper.vols, data = R.corr.vols)
plot(summary(fit.rq.copper.aluminium), parm = "copper.vols", main = "copper-aluminium correlation sensitivity to copper volatility")
#(ni.cu.summary <- summary(fit.rq.copper.aluminium, se = "boot"))
## Copper-Aluminium correlation sensitivity to aluminium volatility
library(quantreg)
taus <- seq(.05,.95,.05) # Roger Koenker UI Bob Hogg and Allen Craig
fit.rq.copper.aluminium <- rq(copper.aluminium ~ aluminium.vols, tau = taus, data = R.corr.vols)
fit.lm.copper.aluminium <- lm(copper.aluminium ~ aluminium.vols, data = R.corr.vols)
plot(summary(fit.rq.copper.aluminium), parm = "aluminium.vols", main = "copper-aluminium correlation sensitivity to aluminium volatility")
#(ni.cu.summary <- summary(fit.rq.copper.aluminium, se = "boot"))
```
**CODE: TRY THE OTHER TWO COMBINATIONS: "nickel.aluminium", "copper.aluminium"**
```{r }
title.chg <- "Metals Market Percent Changes"
autoplot.zoo(data.xts[,1:3]) + ggtitle(title.chg) + ylim(-5, 5)
autoplot.zoo(data.xts[,4:6]) + ggtitle(title.chg) + ylim(-5, 5)
acf(coredata(data.xts[,1:3])) # returns
acf(coredata(data.xts[,4:6])) # sizes
#pacf here
one <- ts(data.df$returns.nickel)
two <- ts(data.df$returns.copper)
# or
one <- ts(data.zr[,1])
two <- ts(data.zr[,2])
title.chg <- "Nickel vs. Copper"
ccf(one, two, main = title.chg, lag.max = 20, xlab = "", ylab = "", ci.col = "red")
# build function to repeat these routines
run_ccf <- function(one, two, main = title.chg, lag = 20, color = "red"){
# one and two are equal length series
# main is title
# lag is number of lags in cross-correlation
# color is color of dashed confidence interval bounds
stopifnot(length(one) == length(two))
one <- ts(one)
two <- ts(two)
main <- main
lag <- lag
color <- color
ccf(one, two, main = main, lag.max = lag, xlab = "", ylab = "", ci.col = color)
#end run_ccf
}
title <- "nickel-copper"
run_ccf(one, two, main = title, lag = 20, color = "red")
# now for volatility (sizes)
one <- abs(data.zr[,1])
two <- abs(data.zr[,2])
title <- "Nickel-Copper: volatility"
run_ccf(one, two, main = title, lag = 20, color = "red")
#
# Load the data_moments() function
# data_moments function
# INPUTS: r vector
# OUTPUTS: list of scalars (mean, sd, median, skewness, kurtosis)
data_moments <- function(data){
library(moments)
library(matrixStats)
mean.r <- colMeans(data)
median.r <- colMedians(data)
sd.r <- colSds(data)
IQR.r <- colIQRs(data)
skewness.r <- skewness(data)
kurtosis.r <- kurtosis(data)
result <- data.frame(mean = mean.r, median = median.r, std_dev = sd.r, IQR = IQR.r, skewness = skewness.r, kurtosis = kurtosis.r)
return(result)
}
# Run data_moments()
answer <- data_moments(data.xts[, 4:6])
# Build pretty table
answer <- round(answer, 4)
knitr::kable(answer)
mean(data.xts[,4])
#Return on nickel
returns1 <- returns[,1]
colnames(returns1) <- "Returns" #kluge to coerce column name for df
returns1.df <- data.frame(Returns = returns1[,1], Distribution = rep("Historical", each = length(returns1)))
alpha <- 0.95 # reactive({ifelse(input$alpha.q>1,0.99,ifelse(input$alpha.q<0,0.001,input$alpha.q))})
# Value at Risk
VaR.hist <- quantile(returns1,alpha)
VaR.text <- paste("Value at Risk =", round(VaR.hist, 2))
# Determine the max y value of the density plot.
# This will be used to place the text above the plot
VaR.y <- max(density(returns1.df$Returns)$y)
# Expected Shortfall
ES.hist <- median(returns1[returns1 > VaR.hist])
ES.text <- paste("Expected Shortfall =", round(ES.hist, 2))
p <- ggplot(returns1.df, aes(x = Returns, fill = Distribution)) + geom_density(alpha = 0.5) +
geom_vline(aes(xintercept = VaR.hist), linetype = "dashed", size = 1, color = "firebrick1") +
geom_vline(aes(xintercept = ES.hist), size = 1, color = "firebrick1") +
annotate("text", x = 2+ VaR.hist, y = VaR.y*1.05, label = VaR.text) +
annotate("text", x = 1.5+ ES.hist, y = VaR.y*1.1, label = ES.text) + scale_fill_manual( values = "dodgerblue4")
p
#Return on copper
returns2 <- returns[,2]
colnames(returns2)<- "Returns" #kluge to coerce column name for df
returns2.df <- data.frame(Returns = returns2[,1], Distribution = rep("Historical", each = length(returns2)))
# Value at Risk
VaR2.hist <- quantile(returns2,alpha)
VaR2.text <- paste("Value at Risk =", round(VaR2.hist, 2))
VaR2.y <- max(density(returns2.df$Returns)$y)
# Expected Shortfall
ES2.hist <- median(returns2[returns2 > VaR2.hist])
ES2.text <- paste("Expected Shortfall =", round(ES2.hist, 2))
p <- ggplot(returns2.df, aes(x = Returns, fill = Distribution)) + geom_density(alpha = 0.5) +
geom_vline(aes(xintercept = VaR2.hist), linetype = "dashed", size = 1, color = "firebrick1") +
geom_vline(aes(xintercept = ES2.hist), size = 1, color = "firebrick1") +
annotate("text", x = 2+ VaR2.hist, y = VaR.y*1.05, label = VaR2.text) +
annotate("text", x = 1.5+ ES2.hist, y = VaR.y*1.1, label = ES2.text) + scale_fill_manual( values = "dodgerblue4")
p
#Return on aluminium
returns3 <- returns[,3]
colnames(returns3)<- "Returns" #kluge to coerce column name for df
returns3.df <- data.frame(Returns = returns3[,1], Distribution = rep("Historical", each = length(returns3)))
# Value at Risk
VaR3.hist <- quantile(returns3,alpha)
VaR3.text <- paste("Value at Risk =", round(VaR3.hist, 2))
VaR3.y <- max(density(returns3.df$Returns)$y)
# Expected Shortfall
ES3.hist <- median(returns3[returns3 > VaR3.hist])
ES3.text <- paste("Expected Shortfall =", round(ES3.hist, 2))
p <- ggplot(returns3.df, aes(x = Returns, fill = Distribution)) + geom_density(alpha = 0.5) +
geom_vline(aes(xintercept = VaR3.hist), linetype = "dashed", size = 1, color = "firebrick1") +
geom_vline(aes(xintercept = ES3.hist), size = 1, color = "firebrick1") +
annotate("text", x = 2+ VaR3.hist, y = VaR.y*1.05, label = VaR3.text) +
annotate("text", x = 1.5+ ES3.hist, y = VaR.y*1.1, label = ES3.text) + scale_fill_manual( values = "dodgerblue4")
p
```
**CODE: DO THE SAME FOR RETURNS 2 AND 3**
```{r }
# Now for Loss Analysis
# Get last prices
price.last <- as.numeric(tail(data[, -1], n=1))
# Specify the positions
position.rf <- c(1/3, 1/3, 1/3)
# And compute the position weights
w <- position.rf * price.last
# Fan these the length and breadth of the risk factor series
weights.rf <- matrix(w, nrow=nrow(data.r), ncol=ncol(data.r), byrow=TRUE)
#head(rowSums((exp(data.r/100)-1)*weights.rf), n=3)
# We need to compute exp(x) - 1 for very small x: expm1 accomplishes this
#head(rowSums((exp(data.r/100)-1)*weights.rf), n=4)
loss.rf <- -rowSums(expm1(data.r/100) * weights.rf)
loss.rf.df <- data.frame(Loss = loss.rf, Distribution = rep("Historical", each = length(loss.rf)))
# Simple Value at Risk and Expected Shortfall
alpha.tolerance <- .95
VaR.hist <- quantile(loss.rf, probs=alpha.tolerance, names=FALSE)
# Just as simple Expected shortfall
ES.hist <- median(loss.rf[loss.rf > VaR.hist])
VaR.text <- paste("Value at Risk =\n", round(VaR.hist, 2)) # ="VaR"&c12
ES.text <- paste("Expected Shortfall \n=", round(ES.hist, 2))
title.text <- paste(round(alpha.tolerance*100, 0), "% Loss Limits")
# using histogram bars instead of the smooth density
p <- ggplot(loss.rf.df, aes(x = Loss, fill = Distribution)) + geom_histogram(alpha = 0.8) + geom_vline(aes(xintercept = VaR.hist), linetype = "dashed", size = 1, color = "blue") + geom_vline(aes(xintercept = ES.hist), size = 1, color = "blue") + annotate("text", x = VaR.hist, y = 40, label = VaR.text) + annotate("text", x = ES.hist, y = 20, label = ES.text) + xlim(0, 500) + ggtitle(title.text)
p
# mean excess plot to determine thresholds for extreme event management
data <- as.vector(loss.rf) # data is purely numeric
umin <- min(data) # threshold u min
umax <- max(data) - 0.1 # threshold u max
nint <- 100 # grid length to generate mean excess plot
grid.0 <- numeric(nint) # grid store
e <- grid.0 # store mean exceedances e
upper <- grid.0 # store upper confidence interval
lower <- grid.0 # store lower confidence interval
u <- seq(umin, umax, length = nint) # threshold u grid
alpha <- 0.95 # confidence level
for (i in 1:nint) {
data <- data[data > u[i]] # subset data above thresholds
e[i] <- mean(data - u[i]) # calculate mean excess of threshold
sdev <- sqrt(var(data)) # standard deviation
n <- length(data) # sample size of subsetted data above thresholds
upper[i] <- e[i] + (qnorm((1 + alpha)/2) * sdev)/sqrt(n) # upper confidence interval
lower[i] <- e[i] - (qnorm((1 + alpha)/2) * sdev)/sqrt(n) # lower confidence interval
}
mep.df <- data.frame(threshold = u, threshold.exceedances = e, lower = lower, upper = upper)
loss.excess <- loss.rf[loss.rf > u]
# Voila the plot => you may need to tweak these limits!
p <- ggplot(mep.df, aes( x= threshold, y = threshold.exceedances)) + geom_line() + geom_line(aes(x = threshold, y = lower), colour = "red") + geom_line(aes(x = threshold, y = upper), colour = "red") + annotate("text", x = 400, y = 200, label = "upper 95%") + annotate("text", x = 200, y = 0, label = "lower 5%")
#
# GPD to describe and analyze the extremes
#
#library(QRM)
alpha.tolerance <- 0.95
u <- quantile(loss.rf, alpha.tolerance , names=FALSE)
fit <- fit.GPD(loss.rf, threshold=u) # Fit GPD to the excesses
xi.hat <- fit$par.ests[["xi"]] # fitted xi
beta.hat <- fit$par.ests[["beta"]] # fitted beta
data <- loss.rf
n.relative.excess <- length(loss.excess) / length(loss.rf) # = N_u/n
VaR.gpd <- u + (beta.hat/xi.hat)*(((1-alpha.tolerance) / n.relative.excess)^(-xi.hat)-1)
ES.gpd <- (VaR.gpd + beta.hat-xi.hat*u) / (1-xi.hat)
n.relative.excess <- length(loss.excess) / length(loss.rf) # = N_u/n
VaR.gpd <- u + (beta.hat/xi.hat)*(((1-alpha.tolerance) / n.relative.excess)^(-xi.hat)-1)
ES.gpd <- (VaR.gpd + beta.hat-xi.hat*u) / (1-xi.hat)
# Plot away
VaRgpd.text <- paste("GPD: Value at Risk =", round(VaR.gpd, 2))
ESgpd.text <- paste("Expected Shortfall =", round(ES.gpd, 2))
title.text <- paste(VaRgpd.text, ESgpd.text, sep = " ")
loss.plot <- ggplot(loss.rf.df, aes(x = Loss, fill = Distribution)) + geom_density(alpha = 0.2)
loss.plot <- loss.plot + geom_vline(aes(xintercept = VaR.gpd), colour = "blue", linetype = "dashed", size = 0.8)
loss.plot <- loss.plot + geom_vline(aes(xintercept = ES.gpd), colour = "blue", size = 0.8)
#+ annotate("text", x = 300, y = 0.0075, label = VaRgpd.text, colour = "blue") + annotate("text", x = 300, y = 0.005, label = ESgpd.text, colour = "blue")
loss.plot <- loss.plot + xlim(0,500) + ggtitle(title.text)
#
# Confidence in GPD
#
showRM(fit, alpha = 0.99, RM = "VaR", method = "BFGS")
showRM(fit, alpha = 0.99, RM = "ES", method = "BFGS")
#
# Generate overlay of historical and GPD; could also use Gaussian or t as well from the asynchronous material
```
# Conclusion
**Organize within flex_dashboard**
## Skills and Tools
:
(REPLACE text here)
What skills contributed towards data exploration and anaytics?
:
## Data Insights
:
(REPLACE text here)
Explain some of the data exploration, statistics and graphs here - include GPD.
:
## Business Remarks
:
(REPLACE text here)
How would the performance of these commodities affect the size and timing of shipping arrangements?
How would the value of new shipping arrangements affect the value of our business with our current customers?
How would we manage the allocation of existing resources given we have just landed in this new market?