-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathdnyaneshwari_scrap.py
48 lines (37 loc) · 1.34 KB
/
dnyaneshwari_scrap.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import urllib.request
from bs4 import BeautifulSoup
def get_html(url_link):
url = urllib.request.urlopen(url_link)
return url.read()
def write_adhyays(lis, directory):
base_name = "adhyay"
no = 1
for li in lis:
if not os.path.isfile(directory + "/" + base_name + str(no)):
f = open(directory + "/" + base_name + str(no) + ".txt", 'wb+')
no = no + 1
html = get_html("https://mr.wikisource.org" + li.a['href'])
soup = BeautifulSoup(html)
ps = soup.find_all('p')
for p in ps:
lines = p.text
lines = lines.replace('<poem>', ' ')
lines = lines.replace('<br>', ' ')
f.write(lines.encode('utf-8') + "\n".encode('utf-8'))
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_adhyay_links():
directory = "./datasets/dnyaneshwari"
if not os.path.exists(directory):
os.makedirs(directory)
url = "https://mr.wikisource.org/wiki/%E0%A4%9C%E0%A5%8D%E0%A4%9E%E0%A4%BE%E0%A4%A8%E0%A5%87%E0%A4%B6%E0%A5%8D%E0%A4%B5%E0%A4%B0%E0%A5%80"
html = get_html(url)
soup = BeautifulSoup(html, "lxml")
ol = soup.find('ol')
lis = ol.find_all('li')
create_directory(directory)
write_adhyays(lis, directory)
if __name__ == "__main__":
get_adhyay_links()