diff --git a/my_anime_list.py b/my_anime_list.py index ffa7026..0d0f598 100644 --- a/my_anime_list.py +++ b/my_anime_list.py @@ -20,7 +20,7 @@ def details(anime_name): if dub: link = link + 'dub' try: - response = session.get('https://www1.gogoanime.ai/category/' + link) + response = session.get('https://gogoanime.pe/category/' + link) strainer = SoupStrainer('div', attrs={'class': 'anime_video_body'}) soup = BeautifulSoup(response.content, 'lxml', parse_only=strainer) total_episodes = soup.find('ul', id="episode_page").findAll('li') diff --git a/search.py b/search.py index 900a702..f2c4c08 100644 --- a/search.py +++ b/search.py @@ -1,13 +1,12 @@ import requests from bs4 import BeautifulSoup, SoupStrainer -import cchardet import re def search_anime(search_data): output = [] session = requests.Session() try: - response = session.get('https://www1.gogoanime.ai//search.html?keyword=' + search_data) + response = session.get('https://gogoanime.pe//search.html?keyword=' + search_data) strainer = SoupStrainer('div', attrs={'class' : 'last_episodes'}) soup = BeautifulSoup(response.content, 'lxml', parse_only=strainer) diff --git a/stream.py b/stream.py index cd629fd..3bd3efe 100644 --- a/stream.py +++ b/stream.py @@ -1,23 +1,23 @@ import requests from bs4 import BeautifulSoup, SoupStrainer -import cchardet import re def stream_links(link,episode): session = requests.Session() - response = session.get('https://gogoanime.ai' + link + '-episode-' + episode) + response = session.get('https://gogoanime.pe/' + link + '-episode-' + episode) - strainer = SoupStrainer('div', attrs={'class' : 'favorites_book'}) + strainer = SoupStrainer('div', attrs={'class' : 'anime_video_body'}) soup = BeautifulSoup(response.content, 'lxml', parse_only=strainer) try: - link = soup.ul.li.a.get('href') - - response_link = session.get(link) - strainer = SoupStrainer('div', attrs={'class' : 'dowload'}) - soup = BeautifulSoup(response_link.content, 'lxml', parse_only=strainer) - - links = [a['href'] for a in soup.find_all('a', href=True)] - return links[0] + links = {} + + x = soup.find('div', attrs={'class': 'anime_muti_link'}) + temp = x.find('ul').findAll('li') + for t in temp: + type = t.get('class')[0] + link = t.find('a')['data-video'] + links[type] = link + return links except: return None \ No newline at end of file