Skip to content

Commit

Permalink
version 1.02
Browse files Browse the repository at this point in the history
  • Loading branch information
safiuddin786 committed Jul 25, 2021
1 parent 20e4164 commit 1679d06
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 14 deletions.
2 changes: 1 addition & 1 deletion my_anime_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def details(anime_name):
if dub:
link = link + 'dub'
try:
response = session.get('https://www1.gogoanime.ai/category/' + link)
response = session.get('https://gogoanime.pe/category/' + link)
strainer = SoupStrainer('div', attrs={'class': 'anime_video_body'})
soup = BeautifulSoup(response.content, 'lxml', parse_only=strainer)
total_episodes = soup.find('ul', id="episode_page").findAll('li')
Expand Down
3 changes: 1 addition & 2 deletions search.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
import requests
from bs4 import BeautifulSoup, SoupStrainer
import cchardet
import re

def search_anime(search_data):
output = []
session = requests.Session()
try:
response = session.get('https://www1.gogoanime.ai//search.html?keyword=' + search_data)
response = session.get('https://gogoanime.pe//search.html?keyword=' + search_data)

strainer = SoupStrainer('div', attrs={'class' : 'last_episodes'})
soup = BeautifulSoup(response.content, 'lxml', parse_only=strainer)
Expand Down
22 changes: 11 additions & 11 deletions stream.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,23 @@
import requests
from bs4 import BeautifulSoup, SoupStrainer
import cchardet
import re

def stream_links(link,episode):
session = requests.Session()
response = session.get('https://gogoanime.ai' + link + '-episode-' + episode)
response = session.get('https://gogoanime.pe/' + link + '-episode-' + episode)

strainer = SoupStrainer('div', attrs={'class' : 'favorites_book'})
strainer = SoupStrainer('div', attrs={'class' : 'anime_video_body'})
soup = BeautifulSoup(response.content, 'lxml', parse_only=strainer)

try:
link = soup.ul.li.a.get('href')

response_link = session.get(link)
strainer = SoupStrainer('div', attrs={'class' : 'dowload'})
soup = BeautifulSoup(response_link.content, 'lxml', parse_only=strainer)

links = [a['href'] for a in soup.find_all('a', href=True)]
return links[0]
links = {}

x = soup.find('div', attrs={'class': 'anime_muti_link'})
temp = x.find('ul').findAll('li')
for t in temp:
type = t.get('class')[0]
link = t.find('a')['data-video']
links[type] = link
return links
except:
return None

0 comments on commit 1679d06

Please sign in to comment.