diff --git a/6-2-pygoogle.py b/6-2-pygoogle.py new file mode 100644 index 0000000..a3d94ba --- /dev/null +++ b/6-2-pygoogle.py @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +# Pygoogle.py -> Opens several Google search results +import sys +import requests +import webbrowser +import bs4 +print('Googling') +res = requests.get('http://google.com/search?q=' + ' '.join(sys.argv[1])) +res.raise_for_status() + +#Retrive top search result links. +#Open a browser tab for each result. +# Retrive top search result links. + +mysoup = bs4.BeautifulSoup(res.text,"html.parser") +# Open a browser tab for each result. +link = mysoup.select('.r a') +numOpen = min(5,len(link)) +# +for i in range(numOpen): + webbrowser.open('http://google.com' + link[i].get('href')) diff --git a/source_code/6-1-wikipy.py b/source_code/6-1-wikipy.py new file mode 100644 index 0000000..460980f --- /dev/null +++ b/source_code/6-1-wikipy.py @@ -0,0 +1,54 @@ +#!/usr/bin/python3 + +import sys +import requests +import bs4 +RED = '\033[31m' +END = '\033[0m' +ascii_art = RED \ + + """ + + + + + iiii kkkkkkkk iiii + i::::i k::::::k i::::i + iiii k::::::k iiii + k::::::k +wwwwwww wwwww wwwwwwwiiiiiii k:::::k kkkkkkkiiiiiiippppp pppppppppyyyyyyy yyyyyyy + w:::::w w:::::w w:::::w i:::::i k:::::k k:::::k i:::::ip::::ppp:::::::::py:::::y y:::::y + w:::::w w:::::::w w:::::w i::::i k:::::k k:::::k i::::ip:::::::::::::::::py:::::y y:::::y + w:::::w w:::::::::w w:::::w i::::i k:::::k k:::::k i::::ipp::::::ppppp::::::py:::::y y:::::y + w:::::w w:::::w:::::w w:::::w i::::i k::::::k:::::k i::::i p:::::p p:::::p y:::::y y:::::y + w:::::w w:::::w w:::::w w:::::w i::::i k:::::::::::k i::::i p:::::p p:::::p y:::::y y:::::y + w:::::w:::::w w:::::w:::::w i::::i k:::::::::::k i::::i p:::::p p:::::p y:::::y:::::y + w:::::::::w w:::::::::w i::::i k::::::k:::::k i::::i p:::::p p::::::p y:::::::::y + w:::::::w w:::::::w i::::::ik::::::k k:::::k i::::::ip:::::ppppp:::::::p y:::::::y + w:::::w w:::::w i::::::ik::::::k k:::::k i::::::ip::::::::::::::::p y:::::y + w:::w w:::w i::::::ik::::::k k:::::k i::::::ip::::::::::::::pp y:::::y + www www iiiiiiiikkkkkkkk kkkkkkkiiiiiiiip::::::pppppppp y:::::y + p:::::p y:::::y + p:::::p y:::::y + p:::::::p y:::::y + p:::::::p y:::::y + p:::::::p yyyyyyy + ppppppppp + + + [++] wikipy is simple wikipedia scraper [++] + Coded By: Ankit Dobhal + Let's Begin To Scrape..! +------------------------------------------------------------------------------- +wikipy version 1.0 +""" \ + + END +print(ascii_art) + +res = requests.get('https://en.wikipedia.org/wiki/' + ' '.join(sys.argv[1:])) + +res.raise_for_status() +#Just to raise the status code +wiki = bs4.BeautifulSoup(res.text,"lxml") +elems = wiki.select('p') +for i in range(len(elems)): + print(elems[i].getText())