First backtests.

First: packages & data.

library(tidyverse)
library(lubridate)
library(xgboost)
library(keras)

load("data_ml.RData")
data_ml <- data_ml %>% arrange(date, stock_id)
dates <- levels(as.factor(data_ml$date))

Second: auxiliary variables.

depth <- c(6, 12, 24, 48, 96)                               # Looking back
label <- c("R1M_Usd", "R3M_Usd", "R6M_Usd", "R12M_Usd")     # Label
label_num <- c(1, 3, 6, 12)
sep_oos <- as.Date("2009-01-01")                            # Starting point for backtest
ticks <- data_ml$stock_id %>%                               # List of all asset ids
    as.factor() %>%
    levels()
N <- length(ticks)                                          # Max number of assets
dates <- data_ml$date %>%  unique()                         # All dates
t_oos <- dates[dates > sep_oos] %>%                         # Out-of-sample dates
    as.Date(origin = "1970-01-01")           
    
Tt <- length(t_oos)                      
features <- colnames(data_ml[3:95]) # Keep the feature's column names (hard-coded, beware!)

Third: the predictive engines.

pred_fun <- function(train_data, test_data, features, label, j){ 
    if(j == 1){
        train_label <- train_data %>% select(label) %>% as.matrix()               # Dependent variable
        train_features <- train_data %>% select(features) %>% as.matrix()         # Indep. variable
        train_matrix <- xgb.DMatrix(data = train_features, label = train_label)   # XGB format
        fit <- train_matrix %>% 
            xgb.train(data = .,                       # Data source (pipe input)
                      eta = 0.2,                      # Learning rate
                      objective = "reg:linear",       # Number of random trees
                      max_depth = 4,                  # Maximum depth of trees
                      nrounds = 80                    # Number of trees used
            )
        xgb_test <- test_data %>%                     # Test sample => XGB format
            select(features) %>% 
            as.matrix() %>%
            xgb.DMatrix()
        pred <- c()                                   # Initialize output
        pred$ret <- predict(fit, xgb_test)            # Fill output with predicted returns
        pred$names <- unique(test_data$stock_id)
        return(pred)                                  # Best predictions, equally-weighted
    }
    if(j == 2){
        NN_train_features <- select(train_data, features) %>% as.matrix()  # Matrix = important
        NN_train_labels <- train_data %>% select(label) %>% as.matrix()
        NN_test_features <- select(test_data, features) %>% as.matrix()    # Matrix = important
        model <- keras_model_sequential()
        model %>%   # This defines the structure of the network, i.e. how layers are organized
            layer_dense(units = 16, activation = 'relu', input_shape = ncol(NN_train_features)) %>%
            layer_dense(units = 8, activation = 'tanh') %>%
            layer_dense(units = 1) # No activation means linear activation: f(x) = x.
        model %>% compile(                             # Model specification
            loss = 'mean_squared_error',               # Loss function
            optimizer = optimizer_rmsprop(),           # Optimisation method (weight updating)
            metrics = c('mean_absolute_error')         # Output metric
        )
        fit_NN <- model %>% 
            fit(NN_train_features,                                       # Training features
                NN_train_labels,                                         # Training labels
                epochs = 10, batch_size = 512, verbose = 0)              # Training parameters
        pred <- c()                                   # Initialize output
        pred$ret <- predict(model, NN_test_features)  # Fill output with predicted returns
        pred$names <- unique(test_data$stock_id)
        return(pred)
    }
}

Fourth: backtesting loop! NOTE: it takes dozens of hours to run on CPUs.

pred <- array(NA, dim = c(Tt-1, length(depth), length(label), 2, N))
realized <- array(NA, dim = c(Tt-1,length(depth), length(label), 2, N))

for(t in 1:121){ #(Tt-1)){                              # Stop before the last date: no fwd return!
    if(t%%2==0){print(t_oos[t])}                        # Just checking the date status
    for(k in 1:length(depth)){                          # Estimation window
        for(i in 1:length(label)){                      # Label horizon
            train_buffer <- label_num[i]                # Training buffer size in months
            label_used <- label[i]
            ind_date = which(dates==t_oos[t])
            train_data <- data_ml %>% filter(date <= dates[ind_date-train_buffer],   # Rolling window with buffer
                                             date >= dates[ind_date-train_buffer-depth[k]])    
            test_data <- data_ml %>% filter(date == t_oos[t])          # Common with past info! 
            for(j in 1:2){                                             # Predictor
                pred_temp <- pred_fun(train_data, test_data, features, label_used, j)
                ind2 <- match(pred_temp$names, ticks) %>% na.omit()    # Index between all & test
                pred[t,k,i,j,ind2] <- pred_temp$ret  
                real_tmp <- data_ml %>% filter(date == t_oos[t]) %>% select(label_used) %>% as.matrix()
                realized[t,k,i,j, ind2] <- real_tmp 
            }
        }
    }
}

# save(pred, file = "pred.RData")
# save(realized, file = "realized.RData")

Fifth: analysis. Below we show how to compute the R^2 and Mariano-Dieblod statistics. An initial step is to clean and re-order the data.

library(reshape)
pred_df <- melt(pred)
realized_df <- melt(realized)
error_df <- cbind(pred_df[,1:5], realized_df[,6], realized_df[,6] - pred_df[,6])
colnames(error_df) <- c("Date", "Depth", "Label", "Method", "Stock", "Realized", "Error")
error_df$Depth[error_df$Depth == 5] <- 96
error_df$Depth[error_df$Depth == 4] <- 48
error_df$Depth[error_df$Depth == 3] <- 24
error_df$Depth[error_df$Depth == 2] <- 12
error_df$Depth[error_df$Depth == 1] <- 6

error_df$Label[error_df$Label == 4] <- 12
error_df$Label[error_df$Label == 3] <- 6
error_df$Label[error_df$Label == 2] <- 3

error_df$Method[error_df$Method == 1] <- "Boosted_trees"
error_df$Method[error_df$Method == 2] <- "Neural_network"

res <- error_df %>% 
    mutate(Error = Error / sqrt(Label),
           abs_err = abs(Error),
           sq_err = Error^2,
           sq_realized = Realized^2) %>%
    group_by(Depth, Label, Method) %>%
    summarise(mae = mean(abs_err, na.rm = T),
              r2 = 1-sum(sq_err, na.rm = T)/sum(sq_realized, na.rm = T)) %>%
    ungroup() %>%
    mutate(Label = as.factor(Label),
           Label = recode_factor(Label,
                                 "1"= "1 month label",
                                 "3"= "3 month label",
                                 "6"= "6 month label",
                                 "12"= "12 month label",
                                 .ordered = T)) 

Then, the R^2.

res <- error_df %>%
    group_by(Depth, Label, Method) %>%
    summarise(R2 = 1-mean(Error^2, na.rm = T)/mean(Realized^2, na.rm = T)) %>%
    ungroup() %>%
    mutate(Depth = as.factor(Depth),
           Method = as.factor(Method)) 
res %>%
    ggplot(aes(x = Depth, y = R2, fill = Method)) + geom_col(position = "dodge") + 
    facet_grid(. ~Label, scales = "free") +
    ylab("Out-of-sample R^2") + xlab("Sample depth (months)") + 
    scale_fill_manual(values = c("#133E7E","#0FD581"), 
                      name = "Method", 
                      labels = c("Boosted trees", "Neural networks")) +
    theme(legend.position = "top") 

And finally, the MD series.

library(viridis)
error_df %>%
    spread(key = Method, value = Error) %>%
    group_by(Date, Depth, Label) %>%
    summarise(MD = mean(Boosted_trees^2-Neural_network^2, na.rm=T) / sd(Boosted_trees^2-Neural_network^2, na.rm=T)) %>%
    ggplot(aes(x=t_oos[Date], y = MD, color = as.factor(Depth))) + geom_line() + facet_grid(Label ~.) +
    ylab("Diebold-Mariano statistic") + xlab("Date") + scale_color_viridis(option="plasma", discrete=TRUE) +
    labs(color = "Depth (months)")

---
title: Baseline results
output: html_notebook
---

# First backtests.

**First**: packages & data.

```{r, message = FALSE, warning = FALSE}
library(tidyverse)
library(lubridate)
library(xgboost)
library(keras)

load("data_ml.RData")
data_ml <- data_ml %>% arrange(date, stock_id)
dates <- levels(as.factor(data_ml$date))
```

**Second**: auxiliary variables.

```{r}
depth <- c(6, 12, 24, 48, 96)                               # Looking back
label <- c("R1M_Usd", "R3M_Usd", "R6M_Usd", "R12M_Usd")     # Label
label_num <- c(1, 3, 6, 12)
sep_oos <- as.Date("2009-01-01")                            # Starting point for backtest
ticks <- data_ml$stock_id %>%                               # List of all asset ids
    as.factor() %>%
    levels()
N <- length(ticks)                                          # Max number of assets
dates <- data_ml$date %>%  unique()                         # All dates
t_oos <- dates[dates > sep_oos] %>%                         # Out-of-sample dates
    as.Date(origin = "1970-01-01")           
    
Tt <- length(t_oos)                      
features <- colnames(data_ml[3:95]) # Keep the feature's column names (hard-coded, beware!)
```

**Third**: the predictive engines.

```{r}
pred_fun <- function(train_data, test_data, features, label, j){ 
    if(j == 1){
        train_label <- train_data %>% select(label) %>% as.matrix()               # Dependent variable
        train_features <- train_data %>% select(features) %>% as.matrix()         # Indep. variable
        train_matrix <- xgb.DMatrix(data = train_features, label = train_label)   # XGB format
        fit <- train_matrix %>% 
            xgb.train(data = .,                       # Data source (pipe input)
                      eta = 0.2,                      # Learning rate
                      objective = "reg:linear",       # Number of random trees
                      max_depth = 4,                  # Maximum depth of trees
                      nrounds = 80                    # Number of trees used
            )
        xgb_test <- test_data %>%                     # Test sample => XGB format
            select(features) %>% 
            as.matrix() %>%
            xgb.DMatrix()
        pred <- c()                                   # Initialize output
        pred$ret <- predict(fit, xgb_test)            # Fill output with predicted returns
        pred$names <- unique(test_data$stock_id)
        return(pred)                                  # Best predictions, equally-weighted
    }
    if(j == 2){
        NN_train_features <- select(train_data, features) %>% as.matrix()  # Matrix = important
        NN_train_labels <- train_data %>% select(label) %>% as.matrix()
        NN_test_features <- select(test_data, features) %>% as.matrix()    # Matrix = important
        model <- keras_model_sequential()
        model %>%   # This defines the structure of the network, i.e. how layers are organized
            layer_dense(units = 16, activation = 'relu', input_shape = ncol(NN_train_features)) %>%
            layer_dense(units = 8, activation = 'tanh') %>%
            layer_dense(units = 1) # No activation means linear activation: f(x) = x.
        model %>% compile(                             # Model specification
            loss = 'mean_squared_error',               # Loss function
            optimizer = optimizer_rmsprop(),           # Optimisation method (weight updating)
            metrics = c('mean_absolute_error')         # Output metric
        )
        fit_NN <- model %>% 
            fit(NN_train_features,                                       # Training features
                NN_train_labels,                                         # Training labels
                epochs = 10, batch_size = 512, verbose = 0)              # Training parameters
        pred <- c()                                   # Initialize output
        pred$ret <- predict(model, NN_test_features)  # Fill output with predicted returns
        pred$names <- unique(test_data$stock_id)
        return(pred)
    }
}
```


**Fourth**: backtesting loop! **NOTE**: it takes *dozens* of hours to run on CPUs.


```{r}
pred <- array(NA, dim = c(Tt-1, length(depth), length(label), 2, N))
realized <- array(NA, dim = c(Tt-1,length(depth), length(label), 2, N))

for(t in 1:121){ #(Tt-1)){                              # Stop before the last date: no fwd return!
    if(t%%2==0){print(t_oos[t])}                        # Just checking the date status
    for(k in 1:length(depth)){                          # Estimation window
        for(i in 1:length(label)){                      # Label horizon
            train_buffer <- label_num[i]                # Training buffer size in months
            label_used <- label[i]
            ind_date = which(dates==t_oos[t])
            train_data <- data_ml %>% filter(date <= dates[ind_date-train_buffer],   # Rolling window with buffer
                                             date >= dates[ind_date-train_buffer-depth[k]])    
            test_data <- data_ml %>% filter(date == t_oos[t])          # Common with past info! 
            for(j in 1:2){                                             # Predictor
                pred_temp <- pred_fun(train_data, test_data, features, label_used, j)
                ind2 <- match(pred_temp$names, ticks) %>% na.omit()    # Index between all & test
                pred[t,k,i,j,ind2] <- pred_temp$ret  
                real_tmp <- data_ml %>% filter(date == t_oos[t]) %>% select(label_used) %>% as.matrix()
                realized[t,k,i,j, ind2] <- real_tmp 
            }
        }
    }
}

# save(pred, file = "pred.RData")
# save(realized, file = "realized.RData")
```


**Fifth**: analysis. Below we show how to compute the R^2 and Mariano-Dieblod statistics.
An initial step is to clean and re-order the data.

```{r}
library(reshape)
pred_df <- melt(pred)
realized_df <- melt(realized)
error_df <- cbind(pred_df[,1:5], realized_df[,6], realized_df[,6] - pred_df[,6])
colnames(error_df) <- c("Date", "Depth", "Label", "Method", "Stock", "Realized", "Error")
error_df$Depth[error_df$Depth == 5] <- 96
error_df$Depth[error_df$Depth == 4] <- 48
error_df$Depth[error_df$Depth == 3] <- 24
error_df$Depth[error_df$Depth == 2] <- 12
error_df$Depth[error_df$Depth == 1] <- 6

error_df$Label[error_df$Label == 4] <- 12
error_df$Label[error_df$Label == 3] <- 6
error_df$Label[error_df$Label == 2] <- 3

error_df$Method[error_df$Method == 1] <- "Boosted_trees"
error_df$Method[error_df$Method == 2] <- "Neural_network"

res <- error_df %>% 
    mutate(Error = Error / sqrt(Label),
           abs_err = abs(Error),
           sq_err = Error^2,
           sq_realized = Realized^2) %>%
    group_by(Depth, Label, Method) %>%
    summarise(mae = mean(abs_err, na.rm = T),
              r2 = 1-sum(sq_err, na.rm = T)/sum(sq_realized, na.rm = T)) %>%
    ungroup() %>%
    mutate(Label = as.factor(Label),
           Label = recode_factor(Label,
                                 "1"= "1 month label",
                                 "3"= "3 month label",
                                 "6"= "6 month label",
                                 "12"= "12 month label",
                                 .ordered = T)) 
```

Then, the R^2.

```{r}
res <- error_df %>%
    group_by(Depth, Label, Method) %>%
    summarise(R2 = 1-mean(Error^2, na.rm = T)/mean(Realized^2, na.rm = T)) %>%
    ungroup() %>%
    mutate(Depth = as.factor(Depth),
           Method = as.factor(Method)) 
res %>%
    ggplot(aes(x = Depth, y = R2, fill = Method)) + geom_col(position = "dodge") + 
    facet_grid(. ~Label, scales = "free") +
    ylab("Out-of-sample R^2") + xlab("Sample depth (months)") + 
    scale_fill_manual(values = c("#133E7E","#0FD581"), 
                      name = "Method", 
                      labels = c("Boosted trees", "Neural networks")) +
    theme(legend.position = "top") 
```

And finally, the MD series.

```{r}
library(viridis)
error_df %>%
    spread(key = Method, value = Error) %>%
    group_by(Date, Depth, Label) %>%
    summarise(MD = mean(Boosted_trees^2-Neural_network^2, na.rm=T) / sd(Boosted_trees^2-Neural_network^2, na.rm=T)) %>%
    ggplot(aes(x=t_oos[Date], y = MD, color = as.factor(Depth))) + geom_line() + facet_grid(Label ~.) +
    ylab("Diebold-Mariano statistic") + xlab("Date") + scale_color_viridis(option="plasma", discrete=TRUE) +
    labs(color = "Depth (months)")
```