LSTM

데이터 불러오기

from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt


import torch
import torch.nn as nn
import torch.optim as optim

from sklearn.preprocessing import MinMaxScaler

import warnings
warnings.filterwarnings("ignore")

path = '/content/drive/MyDrive/sun/'

energy = pd.read_csv(path + 'energy.csv')
sample_submission = pd.read_csv(path + 'sample_submission.csv')
energy.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 25632 entries, 0 to 25631
Data columns (total 5 columns):
 #   Column             Non-Null Count  Dtype  
---  ------             --------------  -----  
 0   time               25632 non-null  object 
 1   dangjin_floating   25608 non-null  float64
 2   dangjin_warehouse  25584 non-null  float64
 3   dangjin            25632 non-null  int64  
 4   ulsan              25632 non-null  int64  
dtypes: float64(2), int64(2), object(1)
memory usage: 1001.4+ KB
energy.fillna(energy.mean(),inplace = True)
energy = energy.set_index('time')
energy.head()
dangjin_floating dangjin_warehouse dangjin ulsan
time
2018-03-01 1:00:00 0.0 0.0 0 0
2018-03-01 2:00:00 0.0 0.0 0 0
2018-03-01 3:00:00 0.0 0.0 0 0
2018-03-01 4:00:00 0.0 0.0 0 0
2018-03-01 5:00:00 0.0 0.0 0 0

결측치를 채우고 시간을 인덱스로 바꿨습니다.

ulsan = energy['ulsan'].values.astype(float)
dangjin_floating = energy['dangjin_floating'].values.astype(float)
dangjin_warehouse = energy['dangjin_warehouse'].values.astype(float)
dangjin	 = energy['dangjin'].values.astype(float)

데이터를 각각 뽑아냅니다.

dangjin_floating

learning_rate = 0.0001 
sequence_length = 12 # 24 일때가 가장 좋았음. but 코렙 램이 터지는 관계로 12 사용
epochs = 2000
def make_batch(input_data, sl):
    train_x = []
    train_y = []
    
    L = len(input_data)

    for i in range(L-sl):
        # sl기간 만큼 있는 데이터에서 다음 시점 맞추기.
        train_seq = input_data[i:i+sl]
        train_label = input_data[i+sl:(i+sl+1)]
        # 리스트 값을 train_x에 어팬드함.
        train_x.append(train_seq)
        train_y.append(train_label)
    
    return train_x, train_y
class simple_lstm(nn.Module):

    def __init__(self):
        super().__init__()
        self.input_vector = 1 # 입력 벡터 길이
        self.sequence_length = 12 # 데이터 묶음 길이(24개 데이터 사용)
        self.output_vector = 100 # 은닉층 사이즈
        self.num_layers = 4 # 층 개수

        self.lstm = nn.LSTM(input_size = self.input_vector, hidden_size = self.output_vector,
                            num_layers = self.num_layers, batch_first = True)
        self.linear = nn.Sequential(
            nn.Linear(self.output_vector, 50),
            nn.Linear(50, 30),
            nn.Linear(30, 10),
            nn.Linear(10, 1)
        )

    def forward(self, x):
        output, _ = self.lstm(x)
        return self.linear(output[:, -1, :])

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
train_x, train_y = make_batch(dangjin_floating.reshape(-1,1), sequence_length)

# 텐서에 데이터 실기
tensor_x = torch.Tensor(train_x)
tensor_y = torch.Tensor(train_y)

dangjin_floatings = simple_lstm()
# 모델을 디바이스에 실음 (디바이스에는 모델과 데이터를 실어야함)
dangjin_floatings = dangjin_floatings.to(device)

# 아담 옵티마이저 사용 (학습하려는 모델 파라미터, 학습률)
optimizer = torch.optim.Adam(dangjin_floatings.parameters(), lr = learning_rate)
criterion = nn.MSELoss()


for i in range(epochs):
    # 모델 학습 모드
    dangjin_floatings.train()
    tensor_x = tensor_x.to(device)
    tensor_y = tensor_y.to(device)
    output = dangjin_floatings(tensor_x)
    loss = criterion(output, tensor_y.view(-1,1))

    # 옵티마이저 초기화 (배치마다 해줘야함)
    optimizer.zero_grad()
    # 로스함수를 사용해 역전파
    loss.backward()
    # 옵티마이저를 이용해 가중치 업데이트
    optimizer.step()

    # 100번째 배치마다 로스 값 출력
    if i % 100 == 0:
        print('Epoch {}, Loss {:.5f}'.format(i, loss.item()))
Epoch 0, Loss 51745.66797
Epoch 100, Loss 51092.85156
Epoch 200, Loss 47514.81641
Epoch 300, Loss 41005.27734
Epoch 400, Loss 37112.99609
Epoch 500, Loss 36839.90625
Epoch 600, Loss 36838.98828
Epoch 700, Loss 36828.25000
Epoch 800, Loss 15298.88477
Epoch 900, Loss 4058.15771
Epoch 1000, Loss 2399.74683
Epoch 1100, Loss 2103.90039
Epoch 1200, Loss 2016.97937
Epoch 1300, Loss 1976.83508
Epoch 1400, Loss 1952.15393
Epoch 1500, Loss 1933.21155
Epoch 1600, Loss 1917.11658
Epoch 1700, Loss 1901.57031
Epoch 1800, Loss 1888.42883
Epoch 1900, Loss 1876.48096
x_input = np.array(energy.dangjin_floating[-12:])
x_input = x_input.reshape((1,12,1))
dangjin_floating_pred = []

for i in range(672):
    
    x_input = torch.Tensor(x_input)
    x_input = x_input.to(device)

    # 모델에 넣고 output값 확인 위해서는 cpu로 돌린 뒤 넘파이로 변환.
    predict = dangjin_floatings(x_input).cpu().detach().numpy()
    # 예측값 배출
    new_input = predict.reshape((1,1,1))

    # 예측값을 실제값인 것 처럼 재사용하여 다시 모델에 넣음(나이브한 방식)
    x_input = np.concatenate((x_input[:,-11:].cpu(), new_input), axis = 1)

    # 예측값은 dangjin_floating_pred 리스트에 계속 저장해둠
    dangjin_floating_pred.append(predict[0][0])

dangjin_warehouse

# train_x = [길이 12 어레이, 길이 12 어레이, ...]
# train_y = [길이 1 어레이, 길이 1 어레이, ..]
train_x, train_y = make_batch(dangjin_warehouse.reshape(-1,1), sequence_length)

# 데이터 텐서로 실기(리스트도 실기 가능)
tensor_x = torch.Tensor(train_x)
tensor_y = torch.Tensor(train_y)

# 모델 불러와서 디바이스에 실기 (GPU 사용 위해선 모델과 데이터를 실어야함)
dangjin_warehouses = simple_lstm()
dangjin_warehouses = dangjin_warehouses.to(device)

# 옵티마이저 아담(많이 사용됨) 사용
optimizer = torch.optim.Adam(dangjin_warehouses.parameters(), lr = learning_rate)

# 손실함수 MSE 사용
criterion = nn.MSELoss()

for i in range(epochs):
    # 학습 모드(가중치 업데이트 됨)로 변환
    dangjin_warehouses.train()

    # 데이터(텐서 형식) 디바이스에 실기
    tensor_x = tensor_x.to(device)
    tensor_y = tensor_y.to(device)

    # 모델에 x 데이터 넣기
    output = dangjin_warehouses(tensor_x)

    # 로스 값 구하기
    loss = criterion(output, tensor_y.view(-1,1))

    # 옵티마이저 초기화(매 배치마다 초기화 해야함)
    optimizer.zero_grad()
    # 손실함수 이용해 역전파
    loss.backward()
    # 옵티마이저 사용해 가중치 업데이트
    optimizer.step()

    # 100번째 배치마다 로스 계산
    if i % 100 == 0:
        # loss.item 함수 사용하면 로스 값 제출해줌
        print('Epoch {}, Loss {:.5f}'.format(i, loss.item()))

# 테스트 데이터 계산 위해 맨 뒤 12개 데이터 사용
x_input = np.array(energy.dangjin_warehouse[-12:])
x_input = x_input.reshape((1,12,1))
dangjin_warehouse_pred = []
Epoch 0, Loss 29900.15234
Epoch 100, Loss 29317.77930
Epoch 200, Loss 26276.70508
Epoch 300, Loss 22257.02539
Epoch 400, Loss 21406.61328
Epoch 500, Loss 21391.32422
Epoch 600, Loss 8447.42188
Epoch 700, Loss 2214.76782
Epoch 800, Loss 1341.09875
Epoch 900, Loss 1182.24084
Epoch 1000, Loss 1131.52197
Epoch 1100, Loss 1107.95605
Epoch 1200, Loss 1092.98474
Epoch 1300, Loss 1083.00134
Epoch 1400, Loss 1074.64624
Epoch 1500, Loss 1068.63171
Epoch 1600, Loss 1064.08704
Epoch 1700, Loss 1058.59131
Epoch 1800, Loss 1054.51086
Epoch 1900, Loss 1050.88635
for i in range(672):
    
    x_input = torch.Tensor(x_input)
    x_input = x_input.to(device)
    predict = dangjin_warehouses(x_input).cpu().detach().numpy()

    new_input = predict.reshape((1,1,1))
    x_input = np.concatenate((x_input[:,-23:].cpu(), new_input), axis = 1)
    dangjin_warehouse_pred.append(predict[0][0])

dangjin

ulsan

나머지 2개 변수 예측은 추후에 lstm 코드 복습 시 사용. (앞 두 변수 방식과 동일)

결과 제출

sample_submission.iloc[:24*28, 1] = dangjin_floating_pred
sample_submission.iloc[:24*28, 2] = dangjin_warehouse_pred
# sample_submission.iloc[:24*28, 3] = dangjin_pred
# sample_submission.iloc[:24*28, 4] = ulsan_pred
# sample_submission

Prophet

필요한 패키지 설치하기

from datetime import datetime
from neuralprophet import NeuralProphet

energy = pd.read_csv(path + 'energy.csv')
sample_submission = pd.read_csv(path + 'sample_submission.csv')
Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/
Collecting neuralprophet
  Downloading neuralprophet-0.3.2-py3-none-any.whl (74 kB)
     |████████████████████████████████| 74 kB 3.7 MB/s 
Requirement already satisfied: LunarCalendar>=0.0.9 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (0.0.9)
Requirement already satisfied: pandas>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (1.3.5)
Requirement already satisfied: tqdm>=4.50.2 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (4.64.0)
Requirement already satisfied: torch>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (1.12.0+cu113)
Collecting torch-lr-finder>=0.2.1
  Downloading torch_lr_finder-0.2.1-py3-none-any.whl (11 kB)
Requirement already satisfied: matplotlib>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (3.2.2)
Requirement already satisfied: python-dateutil>=2.8.0 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (2.8.2)
Requirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (1.21.6)
Requirement already satisfied: convertdate>=2.1.2 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (2.4.0)
Requirement already satisfied: holidays>=0.11.3.1 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (0.14.2)
Requirement already satisfied: ipywidgets>=7.5.1 in /usr/local/lib/python3.7/dist-packages (from neuralprophet) (7.7.1)
Collecting dataclasses>=0.6
  Downloading dataclasses-0.6-py3-none-any.whl (14 kB)
Requirement already satisfied: pymeeus<=1,>=0.3.13 in /usr/local/lib/python3.7/dist-packages (from convertdate>=2.1.2->neuralprophet) (0.5.11)
Requirement already satisfied: korean-lunar-calendar in /usr/local/lib/python3.7/dist-packages (from holidays>=0.11.3.1->neuralprophet) (0.2.1)
Requirement already satisfied: hijri-converter in /usr/local/lib/python3.7/dist-packages (from holidays>=0.11.3.1->neuralprophet) (2.2.4)
Requirement already satisfied: ipython-genutils~=0.2.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->neuralprophet) (0.2.0)
Requirement already satisfied: ipython>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->neuralprophet) (5.5.0)
Requirement already satisfied: traitlets>=4.3.1 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->neuralprophet) (5.1.1)
Requirement already satisfied: ipykernel>=4.5.1 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->neuralprophet) (4.10.1)
Requirement already satisfied: jupyterlab-widgets>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->neuralprophet) (1.1.1)
Requirement already satisfied: widgetsnbextension~=3.6.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->neuralprophet) (3.6.1)
Requirement already satisfied: jupyter-client in /usr/local/lib/python3.7/dist-packages (from ipykernel>=4.5.1->ipywidgets>=7.5.1->neuralprophet) (5.3.5)
Requirement already satisfied: tornado>=4.0 in /usr/local/lib/python3.7/dist-packages (from ipykernel>=4.5.1->ipywidgets>=7.5.1->neuralprophet) (5.1.1)
Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (57.4.0)
Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (1.0.18)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (0.7.5)
Requirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (4.4.2)
Requirement already satisfied: pexpect in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (4.8.0)
Requirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (2.6.1)
Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (0.8.1)
Requirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from LunarCalendar>=0.0.9->neuralprophet) (2022.1)
Requirement already satisfied: ephem>=3.7.5.3 in /usr/local/lib/python3.7/dist-packages (from LunarCalendar>=0.0.9->neuralprophet) (4.1.3)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=2.0.0->neuralprophet) (0.11.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=2.0.0->neuralprophet) (1.4.4)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=2.0.0->neuralprophet) (3.0.9)
Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from kiwisolver>=1.0.1->matplotlib>=2.0.0->neuralprophet) (4.1.1)
Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (1.15.0)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=4.0.0->ipywidgets>=7.5.1->neuralprophet) (0.2.5)
Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from torch-lr-finder>=0.2.1->neuralprophet) (21.3)
Requirement already satisfied: notebook>=4.4.1 in /usr/local/lib/python3.7/dist-packages (from widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (5.3.1)
Requirement already satisfied: Send2Trash in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (1.8.0)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (2.11.3)
Requirement already satisfied: nbconvert in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (5.6.1)
Requirement already satisfied: jupyter-core>=4.4.0 in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (4.11.1)
Requirement already satisfied: nbformat in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (5.4.0)
Requirement already satisfied: terminado>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (0.13.3)
Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.7/dist-packages (from jupyter-client->ipykernel>=4.5.1->ipywidgets>=7.5.1->neuralprophet) (23.2.0)
Requirement already satisfied: ptyprocess in /usr/local/lib/python3.7/dist-packages (from terminado>=0.8.1->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (0.7.0)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (2.0.1)
Requirement already satisfied: bleach in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (5.0.1)
Requirement already satisfied: entrypoints>=0.2.2 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (0.4)
Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (1.5.0)
Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (0.8.4)
Requirement already satisfied: testpath in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (0.6.0)
Requirement already satisfied: defusedxml in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (0.7.1)
Requirement already satisfied: fastjsonschema in /usr/local/lib/python3.7/dist-packages (from nbformat->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (2.16.1)
Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.7/dist-packages (from nbformat->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (4.3.3)
Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema>=2.6->nbformat->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (0.18.1)
Requirement already satisfied: importlib-resources>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema>=2.6->nbformat->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (5.9.0)
Requirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.7/dist-packages (from jsonschema>=2.6->nbformat->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (21.4.0)
Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from jsonschema>=2.6->nbformat->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (4.12.0)
Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.7/dist-packages (from importlib-resources>=1.4.0->jsonschema>=2.6->nbformat->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (3.8.1)
Requirement already satisfied: webencodings in /usr/local/lib/python3.7/dist-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets>=7.5.1->neuralprophet) (0.5.1)
Installing collected packages: torch-lr-finder, dataclasses, neuralprophet
Successfully installed dataclasses-0.6 neuralprophet-0.3.2 torch-lr-finder-0.2.1
def convert_time(x):
    # 2018-03-01 1:00:00 값 변환하기
    Ymd, HMS = x.split(' ')
    H, M, S = HMS.split(':')
    H = str(int(H) - 1)

    # 다시 시간 합치기
    HMS = ':'.join([H, M, S])
    return ' '.join([Ymd, HMS])

energy['time'] = energy['time'].apply(lambda x:convert_time(x))

모델 적용하기

column = 'dangjin_floating'

df = pd.DataFrame()
df['ds'] = energy['time']
df['y'] = energy[column]
model = NeuralProphet()

# 훈련
loss = model.fit(df, freq = 'H')

# 미래 예측용 데이터 프레임 만들기
df_pred = model.make_future_dataframe(df, periods = 18000)

# 미레 예측 하기
predict = model.predict(df_pred)
INFO - (NP.df_utils._infer_frequency) - Major frequency H corresponds to 99.996% of the data.
INFO:NP.df_utils:Major frequency H corresponds to 99.996% of the data.
INFO - (NP.df_utils._infer_frequency) - Defined frequency is equal to major frequency - H
INFO:NP.df_utils:Defined frequency is equal to major frequency - H
INFO - (NP.forecaster.__handle_missing_data) - dropped 24 NAN row in 'y'
INFO:NP.forecaster:dropped 24 NAN row in 'y'
INFO - (NP.config.init_data_params) - Setting normalization to global as only one dataframe provided for training.
INFO:NP.config:Setting normalization to global as only one dataframe provided for training.
INFO - (NP.config.set_auto_batch_epoch) - Auto-set batch_size to 64
INFO:NP.config:Auto-set batch_size to 64
INFO - (NP.config.set_auto_batch_epoch) - Auto-set epochs to 81
INFO:NP.config:Auto-set epochs to 81
INFO - (NP.utils_torch.lr_range_test) - lr-range-test results: steep: 6.76E-02, min: 1.19E+00
INFO:NP.utils_torch:lr-range-test results: steep: 6.76E-02, min: 1.19E+00
INFO - (NP.utils_torch.lr_range_test) - lr-range-test results: steep: 6.76E-02, min: 1.19E+00
INFO:NP.utils_torch:lr-range-test results: steep: 6.76E-02, min: 1.19E+00
INFO - (NP.forecaster._init_train_loader) - lr-range-test selected learning rate: 7.71E-02
INFO:NP.forecaster:lr-range-test selected learning rate: 7.71E-02
Epoch[81/81]: 100%|██████████| 81/81 [01:16<00:00,  1.06it/s, SmoothL1Loss=0.0161, MAE=67.5, RMSE=101, RegLoss=0]
INFO - (NP.df_utils._infer_frequency) - Major frequency H corresponds to 99.996% of the data.
INFO:NP.df_utils:Major frequency H corresponds to 99.996% of the data.
INFO - (NP.df_utils._infer_frequency) - Defined frequency is equal to major frequency - H
INFO:NP.df_utils:Defined frequency is equal to major frequency - H
INFO - (NP.df_utils._infer_frequency) - Major frequency H corresponds to 99.994% of the data.
INFO:NP.df_utils:Major frequency H corresponds to 99.994% of the data.
INFO - (NP.df_utils._infer_frequency) - Defined frequency is equal to major frequency - H
INFO:NP.df_utils:Defined frequency is equal to major frequency - H
INFO - (NP.df_utils._infer_frequency) - Major frequency H corresponds to 99.994% of the data.
INFO:NP.df_utils:Major frequency H corresponds to 99.994% of the data.
INFO - (NP.df_utils._infer_frequency) - Defined frequency is equal to major frequency - H
INFO:NP.df_utils:Defined frequency is equal to major frequency - H
predict_1 = predict.copy()
predict_1 = predict_1.query('ds >= "2021-02-01 00:00:00"')
predict_1 = predict_1.query('ds < "2021-03-01 00:00:00"')

# 2021-06-09 ~ 2021-07-09
predict_2 = predict.copy()
predict_2 = predict_2.query('ds >= "2021-06-09 00:00:00"')
predict_2 = predict_2.query('ds < "2021-07-09 00:00:00"')

# 제출 파일 업데이트
sample_submission[column] = list(predict_1['yhat1']) + list(predict_2['yhat1'])
sample_submission.head()
time dangjin_floating dangjin_warehouse dangjin ulsan
0 2021-02-01 01:00:00 -29.331793 0 0 0
1 2021-02-01 02:00:00 -28.439514 0 0 0
2 2021-02-01 03:00:00 -29.095627 0 0 0
3 2021-02-01 04:00:00 -29.376926 0 0 0
4 2021-02-01 05:00:00 -27.988543 0 0 0

참고 자료