From b50c2e7acc6ef45eb859acba645b628e444d7939 Mon Sep 17 00:00:00 2001 From: Matt Singleton Date: Tue, 12 Sep 2017 22:53:36 -0400 Subject: new source The Washington Times --- unbiased/sources/washtimes.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 unbiased/sources/washtimes.py diff --git a/unbiased/sources/washtimes.py b/unbiased/sources/washtimes.py new file mode 100644 index 0000000..e344af6 --- /dev/null +++ b/unbiased/sources/washtimes.py @@ -0,0 +1,34 @@ +import urllib + +from unbiased.sources.base import NewsSource + +class TheWashingtonTimes(NewsSource): + + name = 'The Washington Times' + shortname = 'WashTimes' + url = 'http://www.washingtontimes.com/' + + @classmethod + def _fetch_urls(cls): + soup = cls._fetch_content(cls.url) + + h1 = soup.find('article', class_='lead-story')\ + .find(class_='article-headline')\ + .a['href'] + h1 = urllib.parse.urljoin(cls.url, h1) + h1s = (h1,) + + top_articles = soup.find('section', class_='top-news')\ + .find_all('article', recursive=False) + h2s = [] + for a in top_articles: + if a.attrs.get('class') is None: + h2s.append(a.a['href']) + h2s = tuple(urllib.parse.urljoin(cls.url, x) for x in h2s) + + h3s = soup.find('section', class_='more-from desktop-only')\ + .ul.find_all('a') + h3s = [x['href'] for x in h3s] + h3s = tuple(urllib.parse.urljoin(cls.url, x) for x in h3s) + + return h1s, h2s, h3s -- cgit v1.2.3