Skip to content
This repository has been archived by the owner on Oct 4, 2022. It is now read-only.

Commit

Permalink
merge from HoverHell#23
Browse files Browse the repository at this point in the history
  • Loading branch information
rachmadaniHaryono committed Aug 6, 2015
1 parent d609365 commit c57356d
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 5 deletions.
22 changes: 18 additions & 4 deletions reddit.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,27 @@
from json import JSONDecoder


def getitems(subreddit, previd=''):
def getitems(subreddit, multireddit, previd=''):
"""Return list of items from a subreddit."""
url = 'http://www.reddit.com/r/%s.json' % subreddit
if multireddit:
if not '/m/' in subreddit:
warning = ('That doesn\'t look like a multireddit. Are you sure'
'you need that -multireddit flag?')
print warning
sys.exit(1)
url = 'http://www.reddit.com/user/%s.json' % subreddit
if not multireddit:
if '/m/' in subreddit:
warning = ('It looks like you are trying to fetch a multireddit. \n'
'Check the -multireddit flag. '
'Call --help for more info')
print warning
sys.exit(1)
url = 'http://www.reddit.com/r/%s.json' % subreddit
# Get items after item with 'id' of previd.

hdr = { 'User-Agent' : 'RedditImageGrab script.' }

if previd:
url = '%s?after=t3_%s' % (url, previd)
try:
Expand Down
3 changes: 2 additions & 1 deletion redditdownload.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,7 @@ def extract_urls(url):
PARSER = ArgumentParser(description='Downloads files with specified extension from the specified subreddit.')
PARSER.add_argument('reddit', metavar='<subreddit>', help='Subreddit name.')
PARSER.add_argument('dir', metavar='<dest_file>',nargs='?', default=getcwd(), help='Dir to put downloaded files in.')
PARSER.add_argument('-multireddit', default=False, action='store_true', required=False, help='Take multirredit instead of subreddit as input. If so, provide /user/m/multireddit-name as argument')
PARSER.add_argument('-last', metavar='l', default='', required=False, help='ID of the last downloaded file.')
PARSER.add_argument('-score', metavar='s', default=0, type=int, required=False, help='Minimum score of images to download.')
PARSER.add_argument('-num', metavar='n', default=0, type=int, required=False, help='Number of images to download.')
Expand Down Expand Up @@ -263,7 +264,7 @@ def extract_urls(url):
LAST = ARGS.last

while not FINISHED:
ITEMS = getitems(ARGS.reddit, LAST)
ITEMS = getitems(ARGS.reddit, ARGS.multireddit, LAST)
if not ITEMS:
# No more items to process
break
Expand Down

0 comments on commit c57356d

Please sign in to comment.