-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscraper.py
More file actions
191 lines (159 loc) · 7.44 KB
/
scraper.py
File metadata and controls
191 lines (159 loc) · 7.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
"""
Scrapes both Reddit and HN for new comments
to insert into the database.
"""
import praw # reddit api
import multiprocessing as mp # speed up collecting data
import urllib2 # for making requests to the HackerNews API
import re
import psycopg2
import hashlib
import os
from bs4 import BeautifulSoup # for parsing HackerNews
from hackernews import HackerNews # for getting all the HackerNews posting ids
DEFAULT_SUBREDDITS = ["programming", "python", "coding", "java", "webdev", "machinelearning", \
"node", "linux"]
NUM_SUBREDDITS_PROCESSOR = 1 # the number of subreddits each process will process
NUM_HN_THREAD_PROCESSOR = 2 # the number of HN threads each process will process
HN_BASE_API_ENDPOINT = "https://news.ycombinator.com/item?id="
class Scraper(object):
"""
Scrapes various services, namely Reddit and HackerNews
used to gather data to feed into the SentenceGenerator
model.
"""
def __init__(self, logger):
"""
Initializes an instance of Scraper. Requires that a logger
to denote the progress of the Scraper to be passed in.
"""
self.phrases = []
self.hackernews = HackerNews()
self.output = mp.Queue()
self.logger = logger
def gather_reddit_data(self):
"""
Gathers comments and submission titles from Reddit.
Returns an updated list of pharses after the Reddit data has been gathered.
"""
# split the list of subreddits to allow for parallel processing
subreddit_sublists = Scraper._split_into_sublists(DEFAULT_SUBREDDITS, \
NUM_SUBREDDITS_PROCESSOR)
# setup processes, run, and collect results
reddit_processes = [mp.Process(target=self._gather_reddit_data, args=(subreddits,)) \
for subreddits in subreddit_sublists]
self._execute_and_collect_processes(reddit_processes)
def gather_hn_data(self):
"""
Gathers comments and submission titles from HN.
Returns an updated list of pharses after the HN data has been gathered.
"""
# get top stories from HN and split the list
top_stories = self.hackernews.top_stories()[:3]
stories_sublists = Scraper._split_into_sublists(top_stories, NUM_HN_THREAD_PROCESSOR)
hn_processes = [mp.Process(target=self._gather_hn_data, args=(stories,))
for stories in stories_sublists]
self._execute_and_collect_processes(hn_processes)
def _execute_and_collect_processes(self, processes):
"""
Executes and collects the results of the phrases the scraper has gathered.
"""
for p_num, process in enumerate(processes):
self.logger.debug("Starting process %d " % p_num)
process.start()
for p_num, process in enumerate(processes):
self.logger.debug("Joining process %d " % p_num)
process.join()
self.logger.debug("Combining results")
while self.output.qsize():
phrase = self.output.get()
try:
phrase = phrase.decode("utf-8").encode("ascii", "ignore")
self.phrases.append(phrase)
except UnicodeEncodeError:
self.logger.warning("Phrase %s could not be decoded " %phrase)
@classmethod
def _split_into_sublists(cls, lst, size):
"""
Splits the list, lst, into sublists of size 'size'.
Returns a new list consisting of len(l) / size sublists
of size 'size'.
"""
sublists = []
for i in xrange(0, len(lst), size):
sublists.append(lst[i : i + size])
return sublists
def _gather_reddit_data(self, subreddits):
"""
Gathers data from the Reddit API. The param, subreddits,
are all the subreddits this process will gather data from
and output represents the joint result of multiple threads.
"""
reddit = praw.Reddit(user_agent="Scrum Generator")
for subreddit in subreddits:
# force lazy eval by converting to list
top_submissions = list(reddit.get_subreddit(subreddit).get_top(limit=2))
titles = [entry.title.encode("utf8", "ignore") for entry in top_submissions]
comments = sum([[c.body for c in submission.comments \
if not isinstance(c, praw.objects.MoreComments)] \
for submission in top_submissions], [])
for comment in comments:
self.output.put(Scraper._clean_data(comment))
for title in titles:
self.output.put(Scraper._clean_data(title))
def _gather_hn_data(self, entries):
"""
Gathers data from the Hacker News API. The param, entries,
represents all of the posts this process will gather data
from.
"""
for entry in entries:
response = urllib2.urlopen(HN_BASE_API_ENDPOINT + str(entry)).read()
soup = BeautifulSoup(response, "html.parser")
all_comments = soup.findAll("span", {"class" : "comment"})
for comment in all_comments:
cleaned_html = re.sub('<[^<]+?>|reply|\n', "", comment.text)
cleaned_data = Scraper._clean_data(cleaned_html)
self.output.put(cleaned_data)
@classmethod
def _clean_data(cls, phrase):
"""
Cleans each phrase from both Reddit and HackerNews
to be processed by SentenceGenerator.
Returns a cleaned string free of parens, curly and square
brackets, and quotes along with spaces after punctuation.
"""
# replace illegal chracaters
cleaned_phrase = re.sub("[(%~`<>#:@/^*&$\t?=|){}\\[\\]\"\n]", "", phrase)
# make sure each period is proceeded by a space for proper punctuation
cleaned_phrase = re.sub(r'[?!.]([a-zA-Z])', r'. \1', cleaned_phrase)
return cleaned_phrase
def insert_into_db(self):
"""
Inserts the data into the Postgres DB.
"""
self.logger.debug("Inserting data in to the database")
if len(self.phrases) == 0:
self.logger.info("No phrases to insert!")
else:
self.logger.debug("Attempting to insert %d phrases into the database" \
% len(self.phrases))
conn = psycopg2.connect(database=os.environ["DATABASE"], user=os.environ["USER"])
cur = conn.cursor()
successful_insertion = 0
for phrase in self.phrases:
self.logger.debug("Attempting to insert %s..." % phrase)
phrase_hash = int(hashlib.sha1(phrase).hexdigest(), 16) % 10 ** 8
phrase = phrase.replace("'", "''") # escape quotes
sql_string = "INSERT INTO phrases (phrase, phrase_hash) VALUES ('%s', '%d')" \
% (phrase, phrase_hash)
try:
cur.execute(sql_string)
self.logger.debug("Successfully inserted %s" % phrase)
successful_insertion += 1
conn.commit()
except psycopg2.IntegrityError: # duplicate comments not allowed
self.logger.warn("The phrase '%s' is already in the database" % phrase)
conn.rollback()
self.logger.debug("Successfully inserted %d / %d phrases into the db" \
% (successful_insertion, len(self.phrases)))