-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSentiment.py
More file actions
242 lines (209 loc) · 8.04 KB
/
Sentiment.py
File metadata and controls
242 lines (209 loc) · 8.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
'''
Created on Aug 2, 2017
@author: tyler reece
'''
import json
import re
import csv
import nltk
import tweepy
import time
import pickle
import codecs
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
from datetime import datetime
# keys and tokens from the Twitter Dev Console
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
#start replaceTwoOrMore
def replaceTwoOrMore(s):
#look for 2 or more repetitions of character and replace with the character itself
pattern = re.compile(r"(.)\1{1,}", re.DOTALL)
return pattern.sub(r"\1\1", s)
#end
def getStopWordList(stopWordListFileName):
#read the stopwords file and build a list
stopWords = []
stopWords.append('AT_USER')
stopWords.append('URL')
with open(stopWordListFileName, 'r') as stopList:
line = stopList.readline()
while line:
word = line.strip()
stopWords.append(word)
line = stopList.readline()
return stopWords
stopWords = getStopWordList('stopwords.txt')
def getFeatureVector(tweet):
featureVector = []
#split tweet into words
words = tweet.split()
for w in words:
#replace two or more with two occurrences
w = replaceTwoOrMore(w)
#strip punctuation
w = w.strip('\'"?,.')
#check if the word stats with an alphabet
val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*$", w)
#ignore if it is a stop word
if(w in stopWords or val is None):
continue
else:
featureVector.append(w.lower())
return featureVector
def processTweet(tweet):
#Convert to lower case
tweet = tweet.lower()
#Convert www.* or https?://* to URL
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','URL',tweet)
#Convert @username to AT_USER
tweet = re.sub('@[^\s]+','AT_USER',tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#trim
tweet = tweet.strip('\'"')
return tweet
def extract_features(tweet):
tweet_words = set(tweet)
features = {}
for word in featureList:
features['contains(%s)' % word] = (word in tweet_words)
return features
featureList = []
tweets = []
print ('Training classfier.....'), str(datetime.now())
inpTweets = csv.reader(open('10kTestSet.csv', 'rU'), delimiter=',', quotechar='|')
for row in inpTweets:
sentiment = row[0];
tweet = row[1]
processedTweet = processTweet(tweet)
featureVector = getFeatureVector(processedTweet)
featureList.extend(featureVector)
tweets.append((featureVector, sentiment));
# Extract feature vector for all tweets in one shot
#training_set = nltk.classify.util.apply_features(extract_features, tweets)
# Train the classifier
#NBClassifier = nltk.NaiveBayesClassifier.train(training_set)
#f = open('my_classifier_10k.pickle', 'wb')
#pickle.dump(NBClassifier, f)
#f.close()
#print ('classifier stored as my_classifier.pickle')
f = open('my_classifier_10k.pickle', 'rb')
NBClassifier = pickle.load(f)
f.close()
print 'Classifier loaded. Now classifying tweets.',str(datetime.now())
#Twitter streaming start ------------------------------------------------------------------------
# attempt authentication
try:
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
except:
print("Error: Authentication Failed")
class MyListener(StreamListener):
def __init__(self, time_limit=600):
self.start_time = time.time()
self.limit = time_limit
self.saveFile = open('TweetSentiment.csv', 'a')
super(StreamListener, self).__init__()
def on_connect(self):
print('Connected to Stream')
pass
def on_data(self, data):
try:
tweet = json.loads(data)
if (time.time() - self.start_time) < self.limit:
if 'text' in tweet:
processedTweet = processTweet(tweet['text'])
tweetSentiment = NBClassifier.classify(extract_features(getFeatureVector(processedTweet)))
print tweetSentiment,',', processedTweet
else:
processedTweet = 'no tweet'
tweetSentiment = 'none'
if 'id_str' in tweet:
idStr = tweet['id_str']
else:
idStr = 'none'
if 'created_at' in tweet:
createdAt = tweet['created_at']
else:
createdAt = 'none'
if 'favorite_count' in tweet:
favoriteCount = tweet['favorite_count']
else:
favoriteCount = 'none'
if 'retweeted_count' in tweet:
retweetedCount = tweet['retweeted_count']
else:
retweetedCount = 'none'
if 'user' in tweet:
user = tweet['user']
if 'screen_name' in user:
screenName = user['screen_name']
else:
screenName = 'none'
if 'statuses_count' in user:
statusesCount = user['statuses_count']
else:
statusesCount = 'none'
if 'followers_count' in user:
followersCount = user['followers_count']
else:
followersCount = 'none'
if 'location' in user:
location = user['location']
else:
location = 'none'
if 'lang' in user:
language = user['lang']
else:
language = 'none'
else:
user = 'none'
self.saveFile.write(tweetSentiment)
self.saveFile.write('|')
self.saveFile.write(str(idStr).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(processedTweet.encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(str(screenName).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(str(createdAt).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(str(favoriteCount).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(str(retweetedCount).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(str(statusesCount).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(str(followersCount).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(str(location).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write(str(language).encode("utf-8"))
self.saveFile.write('|')
self.saveFile.write('\n')
return True
else:
self.saveFile.close()
print('Stream complete!')
return False
except BaseException as e:
print("Error on_data: %s" % str(e))
return True
def on_error(self, status):
print(status)
return True
#change these to stream whatever filter you want
twitter_stream = Stream(auth, MyListener())
twitter_stream.filter(track=['#GoodThingsForLife'])
#Twitter streaming end ------------------------------------------------------------------------
print 'Sentiment complete...',str(datetime.now())
#print extract_features(getFeatureVector(testTweet))
#print NBClassifier.classify(extract_features(getFeatureVector(processedTestTweet)))