-
Notifications
You must be signed in to change notification settings - Fork 0
/
Module.py
86 lines (84 loc) · 3.07 KB
/
Module.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import pandas_datareader as pdr
from flask import Flask, request
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def merge_models():
if request.method == "POST":
input_data = request.form["input"]
# Your code to merge the machine learning models goes here
result = "Your result here"
return result
return """
<form action="/" method="post">
<label for="input">Input:</label>
<input type="text" id="input" name="input">
<input type="submit" value="Submit">
</form>
"""
if __name__ == "__main__":
app.run()
name_stock=input()
df = pdr.get_data_tiingo(name_stock,api_key='9431424bc44c9f3c4a801bf9615e69d224bbf7e9')
file_name = f'{name_stock}.csv'
df.to_csv(file_name)
import pandas as pd
df = pd.read_csv(file_name)
df1 = df.reset_index()['close']
import matplotlib.pyplot as plt
plt.plot(df1)
import numpy as np
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
df1 = scaler.fit_transform(np.array(df1).reshape(-1,1))
# splitting dataset for training and testing
training_size = int(len(df1)*0.85)
test_size = len(df1)-training_size
train_data,test_data = df1[0:training_size,:],df1[training_size:len(df1),:1]
import numpy
def create_dataset(dataset,time_step=1):
dataX, dataY = [],[]
for i in range(len(dataset)-time_step-1):
a = dataset[i:(i+time_step),0]
dataX.append(a)
dataY.append(dataset[i+time_step,0])
return numpy.array(dataX), numpy.array(dataY)
time_step = 100
X_train,y_train = create_dataset(train_data,time_step)
X_test,ytest = create_dataset(test_data,time_step)
print(X_train)
print(X_test.shape),print(ytest.shape)
# to make it 3 dimensional
X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],1)
X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],1)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
model = Sequential()
model.add(LSTM(50,return_sequences = True, input_shape=(100,1)))
model.add(LSTM(50,return_sequences = True))
model.add(LSTM(50))
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam')
model.fit(X_train,y_train,validation_data=(X_test,ytest),epochs=100,batch_size=64,verbose=1)
import tensorflow as tf
train_predict = model.predict(X_train)
test_predict = model.predict(X_test)
train_predict = scaler.inverse_transform(train_predict)
test_predict = scaler.inverse_transform(test_predict)
import math
from sklearn.metrics import mean_squared_error
math.sqrt(mean_squared_error(y_train,train_predict))
math.sqrt(mean_squared_error(ytest,test_predict))
look_back = 100
trainPredictPlot = numpy.empty_like(df1)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict
testPredictPlot = numpy.empty_like(df1)
testPredictPlot[:, :]=numpy.nan
testPredictPlot[len(train_predict)+(look_back*2)+1:len(df1)-1,:] = test_predict
print(testPredictPlot)
print(trainPredictPlot)
plt.plot(scaler.inverse_transform(df1))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()