6
6
import requests
7
7
from bs4 import BeautifulSoup
8
8
import os
9
+ import sys
9
10
import selenium
10
11
from selenium import webdriver
11
12
from selenium .webdriver .common .keys import Keys
13
+ from selenium .webdriver .chrome .options import Options
14
+
12
15
13
16
chapter = 1
14
17
page = 1
15
18
MangaName = ''
16
- website = 'https://www.mangapanda.com'
19
+ website = 'https://www.mangapanda.com'
20
+ #MangaName=input('Enter the manga that you want to download : ')
21
+
22
+
23
+
17
24
18
25
def downloadImg (url ):
19
26
r = requests .get (url )
@@ -28,33 +35,46 @@ def downloadImg(url):
28
35
with open ("%s.jpg" % filename ,'wb' )as f :
29
36
for chunk in response .iter_content (4096 ):
30
37
f .write (chunk )
38
+ while True :
39
+ try :
40
+ chapter = 1
41
+ page = 1
42
+ MangaName = input ('Enter the manga that you want to download : ' )
43
+ os .mkdir (MangaName )
31
44
32
-
33
- driver = webdriver .Chrome (executable_path = r"chromedriver.exe" ) #need to change to phantom.js
34
- driver .get ("https://www.mangapanda.com/search" )
35
- searchBar = driver .find_element_by_xpath ('//*[@id="searchinput"]' )
36
- searchBar .send_keys (MangaName )
37
- searchBar .send_keys (Keys .RETURN )
38
- print (driver .current_url )
39
-
40
- resp = requests .get (driver .current_url )
41
- d = resp .text
42
45
43
- s = BeautifulSoup (d ,"lxml" )
44
- lis = s .find_all ('a' ,href = True )
45
- manga = lis [7 ]['href' ]
46
+ chrome_options = Options ()
47
+ chrome_options .add_argument ("--headless" )
48
+ driver = webdriver .Chrome ( chrome_options = chrome_options )
49
+ driver .get ("https://www.mangapanda.com/search" )
50
+ searchBar = driver .find_element_by_xpath ('//*[@id="searchinput"]' )
51
+ searchBar .send_keys (MangaName )
52
+ searchBar .send_keys (Keys .RETURN )
53
+ print (driver .current_url )
54
+
55
+ resp = requests .get (driver .current_url )
56
+ d = resp .text
46
57
58
+ s = BeautifulSoup (d ,"lxml" )
59
+ lis = s .find_all ('a' ,href = True )
60
+ manga = lis [7 ]['href' ]
47
61
48
62
49
63
50
- url = website + manga + '/' + str (chapter )+ '/' + str (page )
51
64
52
- check = requests .get (url )
53
- print (check )
65
+ url = website + manga + '/' + str (chapter )+ '/' + str (page )
54
66
55
- while check :
56
- downloadImg (url )
57
- page = page + 1
58
- url = website + manga + '/' + str (chapter )+ '/' + str (page )
59
- print (url )
60
- check = requests .get (url )
67
+ check = requests .get (url )
68
+ #print (check)
69
+ cwd = os .getcwd ()
70
+ cwd = cwd + '/' + MangaName
71
+ os .chdir (cwd )
72
+ print ("Downloading..." )
73
+ while check :
74
+ downloadImg (url )
75
+ page = page + 1
76
+ url = website + manga + '/' + str (chapter )+ '/' + str (page )
77
+ print (manga + '/' + str (chapter )+ '/' + str (page ))
78
+ check = requests .get (url )
79
+ except FileExistsError :
80
+ print ("Manga already exists" )
0 commit comments