-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathablation.py
More file actions
365 lines (257 loc) · 13.2 KB
/
ablation.py
File metadata and controls
365 lines (257 loc) · 13.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
# This file stores a variety of helper functions used in functions that produce
# feature vectors when called. The purpose of the varying feature functions is
# to test the performance of a model using Ablation Analysis, which is the
# process of testing how well a model is impacted by the removal of a single
# feature.
################################################################################
################################## Imports #####################################
################################################################################
import nltk
from datetime import datetime
from nltk.tokenize import word_tokenize
################################################################################
############################## Helper Methods ##################################
################################################################################
# Returns the MSE of a list of preditions & labels
def MSE(predictions, labels):
differences = [(x-y)**2 for x,y in zip(predictions,labels)]
return sum(differences) / len(differences)
# Returns a list the frequencies of a given title's Parts of Speech
def parts_of_speech(title):
# Tokenize the words in the title
tokens = word_tokenize(title)
# Turns each token into a pair with its value and Part of Speech label
# More Info Here: https://realpython.com/nltk-nlp-python/#tagging-parts-of-speech
pos = nltk.pos_tag(tokens)
# Map the generalized Parts of Speech to their frequency in the title
frequencies = {
"Adjectives":0,
"Nouns":0,
"Adverbs":0,
"Pronouns":0,
"Verbs":0,
"Determiners":0
}
# Count the frequencies of each Part of Speech generalizing to 7 categories
for pair in pos:
if pair[1].startswith("JJ"):
frequencies["Adjectives"] += 1
elif pair[1].startswith("NN"):
frequencies["Nouns"] += 1
elif pair[1].startswith("RB"):
frequencies["Adverbs"] += 1
elif pair[1].startswith("PRP"):
frequencies["Pronouns"] += 1
elif pair[1].startswith("VB"):
frequencies["Verbs"] += 1
elif pair[1].startswith("DT"):
frequencies["Determiners"] += 1
else:
pass
return list(frequencies.values())
# Returns a one-hot encoding (OHE) of the hour of day, and weekday
# OHE allows for encoding a n-length list of binary features in n - 1 space
def one_hot_encoding_time(unixtime):
hour = [0] * 23
week = [0] * 6
# Get the local time of the given unix timestamp
time = datetime.fromtimestamp(int(float(unixtime)))
# One hot encode the hour (hour 0 is just a list of 0's)
# https://docs.python.org/3/library/datetime.html#datetime.datetime.hour
if time.hour != 0:
hour[time.hour - 1] = 1
# One hot encode the weekday (day 0 is just a list of 0's)
# https://docs.python.org/3/library/datetime.html#datetime.date.weekday
if time.weekday() != 0:
week[time.weekday() - 1] = 1
return hour + week
# Helper function returns a list that represents the presence of popular words
def popular_words(title, n, n_popular_words):
words = [0] * n
for word in word_tokenize(title):
if word in n_popular_words:
words[n_popular_words.index(word)] = 1
return words
################################################################################
############################# Feature Methods ##################################
################################################################################
# Creates a feature vector for a given row of data
def feature_all(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
feat.append(int(datum['score']))
# Add a feature for the number of comments
feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
feat.append(len(datum['title']))
# Add a feature for word length of title
feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat
# Creates a feature vector for a given row of data
def feature_exc_score(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
# feat.append(int(datum['score']))
# Add a feature for the number of comments
feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
feat.append(len(datum['title']))
# Add a feature for word length of title
feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat
# Creates a feature vector for a given row of data
def feature_exc_num_com(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
feat.append(int(datum['score']))
# Add a feature for the number of comments
# feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
feat.append(len(datum['title']))
# Add a feature for word length of title
feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat
# Creates a feature vector for a given row of data
def feature_exc_len_char(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
feat.append(int(datum['score']))
# Add a feature for the number of comments
feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
# feat.append(len(datum['title']))
# Add a feature for word length of title
feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat
# Creates a feature vector for a given row of data
def feature_exc_len_word(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
feat.append(int(datum['score']))
# Add a feature for the number of comments
feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
feat.append(len(datum['title']))
# Add a feature for word length of title
# feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat
# Creates a feature vector for a given row of data
def feature_exc_oc(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
feat.append(int(datum['score']))
# Add a feature for the number of comments
feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
feat.append(len(datum['title']))
# Add a feature for word length of title
feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
# feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat
# Creates a feature vector for a given row of data
def feature_exc_pos(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
feat.append(int(datum['score']))
# Add a feature for the number of comments
feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
feat.append(len(datum['title']))
# Add a feature for word length of title
feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
# feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat
# Creates a feature vector for a given row of data
def feature_exc_ohe(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
feat.append(int(datum['score']))
# Add a feature for the number of comments
feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
feat.append(len(datum['title']))
# Add a feature for word length of title
feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
# feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat
# Creates a feature vector for a given row of data
def feature_exc_popular_word(datum, n_popular_words, n):
feat = [1]
# Add a feature for the score (price of awards given)
feat.append(int(datum['score']))
# Add a feature for the number of comments
feat.append(int(datum['number_of_comments']))
# Add a feature for character length of title
feat.append(len(datum['title']))
# Add a feature for word length of title
feat.append(len(word_tokenize(datum['title'])))
# Add a binary feature for if the content is declared original (OC)
feat.append(1) if "[oc]" in datum['title'].lower() else feat.append(0)
# Add features for the frequencies of generalized Parts of Speech
feat.extend(parts_of_speech(datum['title']))
# Add features for the one-hot encoding of the Hour and Weekday
feat.extend(one_hot_encoding_time(datum['unixtime']))
# Add feature list for the presence of any of the n-most popular words
# feat.extend(popular_words(datum['title'], n, n_popular_words))
return feat