-
Notifications
You must be signed in to change notification settings - Fork 3
/
Final_Senti_Stocks.py
109 lines (83 loc) · 3.04 KB
/
Final_Senti_Stocks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import os
import sys
import tweepy
import requests
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from textblob import TextBlob
# First we login into twitter#
#consumer_key =
#consumer_secret =
#access_token =
#access_token_secret =
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
user = tweepy.API(auth)
# Where the csv file will live
FILE_NAME = 'historical.csv'
def stock_sentiment(quote, num_tweets):
# Checks if the sentiment for our quote is
# positive or negative, returns True if
# majority of valid tweets have positive sentiment
list_of_tweets = user.search(quote, count=num_tweets)
positive, null = 0, 0
for tweet in list_of_tweets:
blob = TextBlob(tweet.text).sentiment
if blob.subjectivity == 0:
null += 1
next
if blob.polarity > 0:
positive += 1
if positive > ((num_tweets - null)/2):
return True
def get_historical(quote):
url = 'http://www.google.com/finance/historical?q=NASDAQ%3A'+quote+'&output=csv'
r = requests.get(url, stream=True)
if r.status_code != 400:
with open(FILE_NAME, 'wb') as f:
for chunk in r:
f.write(chunk)
return True
def stock_prediction(quote):
# Collect data points from csv
dataset = []
with open(FILE_NAME) as f:
for n, line in enumerate(f):
if n != 0:
dataset.append(float(line.split(',')[1]))
dataset = np.array(dataset)
# Create dataset matrix (X=t and Y=t+1)
def create_dataset(dataset):
dataX = [dataset[n+1] for n in range(len(dataset)-2)]
return np.array(dataX), dataset[2:]
trainX, trainY = create_dataset(dataset)
# Create and fit Multilinear Perceptron model
model = Sequential()
model.add(Dense(8, input_dim=1, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, nb_epoch=200, batch_size=2, verbose=2)
# Our prediction for tomorrow
prediction = model.predict(np.array([dataset[0]]))
result = 'The price of',quote,' will move from %s to %s' % (dataset[0], prediction[0][0])
plt.plot(dataset[0],prediction[0][0])
plt.show()
return result
# Ask user for a stock quote
StockList = ['AAPL', 'GOOGL', 'FB']
for stock_quote in StockList:
# Check if the stock sentiment is positive
if not stock_sentiment(stock_quote, num_tweets=100):
print ('This stock has bad sentiment, please re-run the script')
sys.exit()
# Check if we got te historical data
if not get_historical(stock_quote):
print ('Google returned a 404, please re-run the script and')
print ('enter a valid stock quote from NASDAQ')
sys.exit()
# We have our file so we create the neural net and get the prediction
print (stock_prediction(stock_quote))
# We are done so we delete the csv file
os.remove(FILE_NAME)