Search
Saving Models
from ngboost import NGBRegressor

from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split

X, Y = load_boston(True)
X_reg_train, X_reg_test, Y_reg_train, Y_reg_test = train_test_split(X, Y, test_size=0.2)

Saving ngboost models is easy with the pickle package:

ngb = NGBRegressor().fit(X_reg_train, Y_reg_train)
[iter 0] loss=3.6313 val_loss=0.0000 scale=0.5000 norm=3.3049
[iter 100] loss=3.0325 val_loss=0.0000 scale=1.0000 norm=3.5726
[iter 200] loss=2.3759 val_loss=0.0000 scale=2.0000 norm=3.8621
[iter 300] loss=2.0014 val_loss=0.0000 scale=2.0000 norm=3.1113
[iter 400] loss=1.8321 val_loss=0.0000 scale=1.0000 norm=1.4179
import pickle
from pathlib import Path

file_path = Path.home()/'Desktop'/'ngbtest.p'

with file_path.open("wb") as f:
    pickle.dump(ngb, f)
with file_path.open("rb") as f:
    ngb_unpickled = pickle.load(f)
Y_preds = ngb_unpickled.predict(X_reg_test)
Y_dists = ngb_unpickled.pred_dist(X_reg_test)

Y_dists[0:5].params
{'loc': array([18.44476218, 16.99878369, 30.36098956, 20.47954671, 18.82795883]),
 'scale': array([1.43416057, 1.59187052, 1.16261407, 1.47979359, 1.83866049])}