python整站下载源码
admin
最后编辑于 2024 年 7 月 17 日
from pywebcopy import save_webpage
import sys
# save_webpage(
# url=”http://www.baidu.com/”,
# project_folder=”E://savedpages//”,
# project_name=”123″,
# bypass_robots=False,
# debug=True,
# open_in_browser=True,
# delay=None,
# threaded=False,
# )
sys.setrecursionlimit(100000) #去深度限制
from pywebcopy import save_website
save_website(
url=”http://www.baidu.com/”,
project_folder=”E://savedpages1//”,
project_name=”baidu”,
bypass_robots=False,
debug=True,
open_in_browser=True,
delay=None,
threaded=False,
)